pio_server.c 40.9 KB
Newer Older
Deike Kleberg's avatar
Deike Kleberg committed
1
2
/** @file ioServer.c
*/
3
4
5
6
#ifdef HAVE_CONFIG_H
#  include "config.h"
#endif

Deike Kleberg's avatar
Deike Kleberg committed
7
8
9
#include "pio_server.h"


10
#include <limits.h>
Deike Kleberg's avatar
Deike Kleberg committed
11
12
#include <stdlib.h>
#include <stdio.h>
13
14
15
16
17
18

#ifdef HAVE_PARALLEL_NC4
#include <core/ppm_combinatorics.h>
#include <core/ppm_rectilinear.h>
#include <ppm/ppm_uniform_partition.h>
#endif
19
#include <yaxt.h>
20

Deike Kleberg's avatar
Deike Kleberg committed
21
#include "cdi.h"
22
#include "cdipio.h"
23
#include "dmemory.h"
24
#include "namespace.h"
25
#include "taxis.h"
Deike Kleberg's avatar
Deike Kleberg committed
26
#include "pio.h"
Deike Kleberg's avatar
Deike Kleberg committed
27
#include "pio_comm.h"
28
#include "pio_interface.h"
Deike Kleberg's avatar
Deike Kleberg committed
29
#include "pio_rpc.h"
Deike Kleberg's avatar
Deike Kleberg committed
30
#include "pio_util.h"
31
#include "cdi_int.h"
32
33
34
#ifndef HAVE_NETCDF_PAR_H
#define MPI_INCLUDED
#endif
35
#include "pio_cdf_int.h"
36
#include "resource_handle.h"
37
#include "resource_unpack.h"
Thomas Jahns's avatar
Thomas Jahns committed
38
#include "stream_cdf.h"
Deike Kleberg's avatar
Deike Kleberg committed
39
#include "vlist_var.h"
40

41

42
43
44
static struct
{
  size_t size;
Thomas Jahns's avatar
Thomas Jahns committed
45
  unsigned char *buffer;
46
  int dictSize;
47
48
} *rxWin = NULL;

Thomas Jahns's avatar
Thomas Jahns committed
49
static MPI_Win getWin = MPI_WIN_NULL;
Thomas Jahns's avatar
Thomas Jahns committed
50
static MPI_Group groupModel = MPI_GROUP_NULL;
Deike Kleberg's avatar
Deike Kleberg committed
51

52
53
54
55
56
#ifdef HAVE_PARALLEL_NC4
/* prime factorization of number of pio collectors */
static uint32_t *pioPrimes;
static int numPioPrimes;
#endif
Deike Kleberg's avatar
Deike Kleberg committed
57

Deike Kleberg's avatar
Deike Kleberg committed
58
59
/************************************************************************/

60
static
Deike Kleberg's avatar
Deike Kleberg committed
61
62
void serverWinCleanup ()
{
63
64
  if (getWin != MPI_WIN_NULL)
    xmpi(MPI_Win_free(&getWin));
65
66
  if (rxWin)
    {
67
      free(rxWin[0].buffer);
68
      free(rxWin);
Deike Kleberg's avatar
Deike Kleberg committed
69
    }
70

71
  xdebug("%s", "cleaned up mpi_win");
Deike Kleberg's avatar
Deike Kleberg committed
72
}
73

Deike Kleberg's avatar
Deike Kleberg committed
74
 /************************************************************************/
75

76
77
static size_t
collDefBufferSizes()
Deike Kleberg's avatar
Deike Kleberg committed
78
{
79
  int nstreams, * streamIndexList, streamNo, vlistID, nvars, varID, iorank;
80
81
  int modelID;
  size_t sumGetBufferSizes = 0;
82
  int rankGlob = commInqRankGlob ();
Deike Kleberg's avatar
Deike Kleberg committed
83
  int nProcsModel = commInqNProcsModel ();
84
  int root = commInqRootGlob ();
Deike Kleberg's avatar
Deike Kleberg committed
85

86
  xassert(rxWin != NULL);
Deike Kleberg's avatar
Deike Kleberg committed
87

Deike Kleberg's avatar
Deike Kleberg committed
88
  nstreams = reshCountType ( &streamOps );
89
  streamIndexList = xmalloc((size_t)nstreams * sizeof (streamIndexList[0]));
90
  reshGetResHListOfType ( nstreams, streamIndexList, &streamOps );
Deike Kleberg's avatar
Deike Kleberg committed
91
92
  for ( streamNo = 0; streamNo < nstreams; streamNo++ )
    {
93
      // space required for data
94
      vlistID = streamInqVlist ( streamIndexList[streamNo] );
Deike Kleberg's avatar
Deike Kleberg committed
95
96
97
98
      nvars = vlistNvars ( vlistID );
      for ( varID = 0; varID < nvars; varID++ )
        {
          iorank = vlistInqVarIOrank ( vlistID, varID );
Deike Kleberg's avatar
Deike Kleberg committed
99
          xassert ( iorank != CDI_UNDEFID );
Deike Kleberg's avatar
Deike Kleberg committed
100
101
          if ( iorank == rankGlob )
            {
Deike Kleberg's avatar
Deike Kleberg committed
102
              for ( modelID = 0; modelID < nProcsModel; modelID++ )
103
                {
104
105
106
107
108
109
                  int decoChunk;
                  {
                    int varSize = vlistInqVarSize(vlistID, varID);
                    int nProcsModel = commInqNProcsModel();
                    decoChunk =
                      (int)ceilf(cdiPIOpartInflate_
110
111
                                 * (float)(varSize + nProcsModel - 1)
                                 / (float)nProcsModel);
112
                  }
Deike Kleberg's avatar
Deike Kleberg committed
113
                  xassert ( decoChunk > 0 );
114
                  rxWin[modelID].size += (size_t)decoChunk * sizeof (double)
115
116
117
118
                    /* re-align chunks to multiple of double size */
                    + sizeof (double) - 1
                    /* one header for data record, one for
                     * corresponding part descriptor*/
119
                    + 2 * sizeof (struct winHeaderEntry)
120
                    /* FIXME: heuristic for size of packed Xt_idxlist */
121
                    + sizeof (Xt_int) * (size_t)decoChunk * 3;
122
                  rxWin[modelID].dictSize += 2;
123
                }
Deike Kleberg's avatar
Deike Kleberg committed
124
            }
125
        }
Deike Kleberg's avatar
Deike Kleberg committed
126
127
      // space required for the 3 function calls streamOpen, streamDefVlist, streamClose 
      // once per stream and timestep for all collprocs only on the modelproc root
128
      rxWin[root].size += numRPCFuncs * sizeof (struct winHeaderEntry)
129
130
131
132
        /* serialized filename */
        + MAXDATAFILENAME
        /* data part of streamDefTimestep */
        + (2 * CDI_MAX_NAME + sizeof (taxis_t));
133
      rxWin[root].dictSize += numRPCFuncs;
Deike Kleberg's avatar
Deike Kleberg committed
134
    }
135
  free ( streamIndexList );
Deike Kleberg's avatar
Deike Kleberg committed
136
137

  for ( modelID = 0; modelID < nProcsModel; modelID++ )
138
    {
139
      /* account for size header */
140
      rxWin[modelID].dictSize += 1;
141
      rxWin[modelID].size += sizeof (struct winHeaderEntry);
142
143
144
      rxWin[modelID].size = roundUpToMultiple(rxWin[modelID].size,
                                              PIO_WIN_ALIGN);
      sumGetBufferSizes += (size_t)rxWin[modelID].size;
145
    }
Deike Kleberg's avatar
Deike Kleberg committed
146
  xassert ( sumGetBufferSizes <= MAXWINBUFFERSIZE );
147
  return sumGetBufferSizes;
Deike Kleberg's avatar
Deike Kleberg committed
148
}
149

Deike Kleberg's avatar
Deike Kleberg committed
150
 /************************************************************************/
151

152
153
154
static void
serverWinCreate(void)
{
Deike Kleberg's avatar
Deike Kleberg committed
155
  int ranks[1], modelID;
156
  MPI_Comm commCalc = commInqCommCalc ();
Deike Kleberg's avatar
Deike Kleberg committed
157
  MPI_Group groupCalc;
158
  int nProcsModel = commInqNProcsModel ();
159
160
161
  MPI_Info no_locks_info;
  xmpi(MPI_Info_create(&no_locks_info));
  xmpi(MPI_Info_set(no_locks_info, "no_locks", "true"));
Deike Kleberg's avatar
Deike Kleberg committed
162

163
  xmpi(MPI_Win_create(MPI_BOTTOM, 0, 1, no_locks_info, commCalc, &getWin));
Deike Kleberg's avatar
Deike Kleberg committed
164
165

  /* target group */
166
167
  ranks[0] = nProcsModel;
  xmpi ( MPI_Comm_group ( commCalc, &groupCalc ));
Deike Kleberg's avatar
Deike Kleberg committed
168
169
  xmpi ( MPI_Group_excl ( groupCalc, 1, ranks, &groupModel ));

170
  rxWin = xcalloc((size_t)nProcsModel, sizeof (rxWin[0]));
171
  size_t totalBufferSize = collDefBufferSizes();
Uwe Schulzweida's avatar
Uwe Schulzweida committed
172
  rxWin[0].buffer = (unsigned char*) xmalloc(totalBufferSize);
173
174
175
176
177
178
  size_t ofs = 0;
  for ( modelID = 1; modelID < nProcsModel; modelID++ )
    {
      ofs += rxWin[modelID - 1].size;
      rxWin[modelID].buffer = rxWin[0].buffer + ofs;
    }
Deike Kleberg's avatar
Deike Kleberg committed
179

180
181
  xmpi(MPI_Info_free(&no_locks_info));

182
  xdebug("%s", "created mpi_win, allocated getBuffer");
Deike Kleberg's avatar
Deike Kleberg committed
183
184
}

Deike Kleberg's avatar
Deike Kleberg committed
185
186
/************************************************************************/

187
static void
188
readFuncCall(struct winHeaderEntry *header)
Deike Kleberg's avatar
Deike Kleberg committed
189
190
{
  int root = commInqRootGlob ();
191
  int funcID = header->id;
192
  union funcArgs *funcArgs = &(header->specific.funcArgs);
Deike Kleberg's avatar
Deike Kleberg committed
193

194
  xassert(funcID >= MINFUNCID && funcID <= MAXFUNCID);
Deike Kleberg's avatar
Deike Kleberg committed
195
196
  switch ( funcID )
    {
197
198
    case STREAMCLOSE:
      {
199
        int streamID
200
          = namespaceAdaptKey2(funcArgs->streamChange.streamID);
201
202
203
204
        streamClose(streamID);
        xdebug("READ FUNCTION CALL FROM WIN:  %s, streamID=%d,"
               " closed stream",
               funcMap[(-1 - funcID)], streamID);
205
206
      }
      break;
Deike Kleberg's avatar
Deike Kleberg committed
207
    case STREAMOPEN:
208
      {
209
        size_t filenamesz = (size_t)funcArgs->newFile.fnamelen;
Deike Kleberg's avatar
Deike Kleberg committed
210
        xassert ( filenamesz > 0 && filenamesz < MAXDATAFILENAME );
211
        const char *filename
212
          = (const char *)(rxWin[root].buffer + header->offset);
213
        xassert(filename[filenamesz] == '\0');
214
        int filetype = funcArgs->newFile.filetype;
215
        int streamID = streamOpenWrite(filename, filetype);
216
        xassert(streamID != CDI_ELIBNAVAIL);
217
218
        xdebug("READ FUNCTION CALL FROM WIN:  %s, filenamesz=%zu,"
               " filename=%s, filetype=%d, OPENED STREAM %d",
219
               funcMap[(-1 - funcID)], filenamesz, filename,
220
               filetype, streamID);
221
      }
222
      break;
223
224
    case STREAMDEFVLIST:
      {
225
        int streamID
226
227
          = namespaceAdaptKey2(funcArgs->streamChange.streamID);
        int vlistID = namespaceAdaptKey2(funcArgs->streamChange.vlistID);
228
229
230
231
        streamDefVlist(streamID, vlistID);
        xdebug("READ FUNCTION CALL FROM WIN:  %s, streamID=%d,"
               " vlistID=%d, called streamDefVlist ().",
               funcMap[(-1 - funcID)], streamID, vlistID);
232
233
      }
      break;
234
235
236
    case STREAMDEFTIMESTEP:
      {
        MPI_Comm commCalc = commInqCommCalc ();
237
        int streamID = funcArgs->streamNewTimestep.streamID;
238
        int originNamespace = namespaceResHDecode(streamID).nsp;
239
240
241
        streamID = namespaceAdaptKey2(streamID);
        int oldTaxisID
          = vlistInqTaxis(streamInqVlist(streamID));
242
        int position = header->offset;
243
244
        int changedTaxisID
          = taxisUnpack((char *)rxWin[root].buffer, (int)rxWin[root].size,
245
                        &position, originNamespace, &commCalc, 0);
246
247
248
249
        taxis_t *oldTaxisPtr = taxisPtr(oldTaxisID);
        taxis_t *changedTaxisPtr = taxisPtr(changedTaxisID);
        ptaxisCopy(oldTaxisPtr, changedTaxisPtr);
        taxisDestroy(changedTaxisID);
250
        streamDefTimestep(streamID, funcArgs->streamNewTimestep.tsID);
251
252
      }
      break;
Deike Kleberg's avatar
Deike Kleberg committed
253
    default:
254
      xabort ( "REMOTE FUNCTIONCALL NOT IMPLEMENTED!" );
Deike Kleberg's avatar
Deike Kleberg committed
255
256
257
258
259
    }
}

/************************************************************************/

260
261
262
263
264
static void
resizeVarGatherBuf(int vlistID, int varID, double **buf, int *bufSize)
{
  int size = vlistInqVarSize(vlistID, varID);
  if (size <= *bufSize) ; else
265
    *buf = xrealloc(*buf, (size_t)(*bufSize = size) * sizeof (buf[0][0]));
266
267
268
269
270
271
272
}

static void
gatherArray(int root, int nProcsModel, int headerIdx,
            int vlistID,
            double *gatherBuf, int *nmiss)
{
273
274
  struct winHeaderEntry *winDict
    = (struct winHeaderEntry *)rxWin[root].buffer;
275
  int streamID = winDict[headerIdx].id;
276
  int varID = winDict[headerIdx].specific.dataRecord.varID;
277
  int varShape[3] = { 0, 0, 0 };
278
  cdiPioQueryVarDims(varShape, vlistID, varID);
279
280
281
282
283
  Xt_int varShapeXt[3];
  static const Xt_int origin[3] = { 0, 0, 0 };
  for (unsigned i = 0; i < 3; ++i)
    varShapeXt[i] = varShape[i];
  int varSize = varShape[0] * varShape[1] * varShape[2];
284
285
286
  struct Xt_offset_ext *partExts
    = xmalloc((size_t)nProcsModel * sizeof (partExts[0]));
  Xt_idxlist *part = xmalloc((size_t)nProcsModel * sizeof (part[0]));
287
288
  MPI_Comm commCalc = commInqCommCalc();
  {
289
    int nmiss_ = 0;
290
291
292
    for (int modelID = 0; modelID < nProcsModel; modelID++)
      {
        struct dataRecord *dataHeader
293
294
295
296
          = &((struct winHeaderEntry *)
              rxWin[modelID].buffer)[headerIdx].specific.dataRecord;
        int position =
          ((struct winHeaderEntry *)rxWin[modelID].buffer)[headerIdx + 1].offset;
297
298
299
        xassert(namespaceAdaptKey2(((struct winHeaderEntry *)
                                    rxWin[modelID].buffer)[headerIdx].id)
                == streamID
300
                && dataHeader->varID == varID
301
302
                && ((struct winHeaderEntry *)
                    rxWin[modelID].buffer)[headerIdx + 1].id == PARTDESCMARKER
303
304
                && position > 0
                && ((size_t)position
305
                    >= sizeof (struct winHeaderEntry) * (size_t)rxWin[modelID].dictSize)
306
307
308
309
                && ((size_t)position < rxWin[modelID].size));
        part[modelID] = xt_idxlist_unpack(rxWin[modelID].buffer,
                                          (int)rxWin[modelID].size,
                                          &position, commCalc);
310
311
312
313
314
        unsigned partSize = (unsigned)xt_idxlist_get_num_indices(part[modelID]);
        size_t charOfs = (size_t)((rxWin[modelID].buffer
                                   + ((struct winHeaderEntry *)
                                      rxWin[modelID].buffer)[headerIdx].offset)
                                  - rxWin[0].buffer);
315
316
        xassert(charOfs % sizeof (double) == 0
                && charOfs / sizeof (double) + partSize <= INT_MAX);
317
        int elemOfs = (int)(charOfs / sizeof (double));
318
        partExts[modelID].start = elemOfs;
319
        partExts[modelID].size = (int)partSize;
320
        partExts[modelID].stride = 1;
321
322
323
324
325
        nmiss_ += dataHeader->nmiss;
      }
    *nmiss = nmiss_;
  }
  Xt_idxlist srcList = xt_idxlist_collection_new(part, nProcsModel);
326
  for (int modelID = 0; modelID < nProcsModel; modelID++)
327
328
329
330
331
332
333
334
    xt_idxlist_delete(part[modelID]);
  free(part);
  Xt_xmap gatherXmap;
  {
    Xt_idxlist dstList
      = xt_idxsection_new(0, 3, varShapeXt, varShapeXt, origin);
    struct Xt_com_list full = { .list = dstList, .rank = 0 };
    gatherXmap = xt_xmap_intersection_new(1, &full, 1, &full, srcList, dstList,
335
                                          MPI_COMM_SELF);
336
337
338
339
    xt_idxlist_delete(dstList);
  }
  xt_idxlist_delete(srcList);

340
  struct Xt_offset_ext gatherExt = { .start = 0, .size = varSize, .stride = 1 };
341
  Xt_redist gatherRedist
342
343
    = xt_redist_p2p_ext_new(gatherXmap, nProcsModel, partExts, 1, &gatherExt,
                            MPI_DOUBLE);
344
  xt_xmap_delete(gatherXmap);
345
  xt_redist_s_exchange1(gatherRedist, rxWin[0].buffer, gatherBuf);
346
  free(partExts);
347
  xt_redist_delete(gatherRedist);
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
}

struct xyzDims
{
  int sizes[3];
};

static inline int
xyzGridSize(struct xyzDims dims)
{
  return dims.sizes[0] * dims.sizes[1] * dims.sizes[2];
}

#ifdef HAVE_PARALLEL_NC4
static void
363
queryVarBounds(struct PPM_extent varShape[3], int vlistID, int varID)
364
{
365
366
  varShape[0].first = 0;
  varShape[1].first = 0;
367
  varShape[2].first = 0;
368
  int sizes[3];
369
  cdiPioQueryVarDims(sizes, vlistID, varID);
370
371
  for (unsigned i = 0; i < 3; ++i)
    varShape[i].size = sizes[i];
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
}

/* compute distribution of collectors such that number of collectors
 * <= number of variable grid cells in each dimension */
static struct xyzDims
varDimsCollGridMatch(const struct PPM_extent varDims[3])
{
  xassert(PPM_extents_size(3, varDims) >= commInqSizeColl());
  struct xyzDims collGrid = { { 1, 1, 1 } };
  /* because of storage order, dividing dimension 3 first is preferred */
  for (int i = 0; i < numPioPrimes; ++i)
    {
      for (int dim = 2; dim >=0; --dim)
        if (collGrid.sizes[dim] * pioPrimes[i] <= varDims[dim].size)
          {
            collGrid.sizes[dim] *= pioPrimes[i];
            goto nextPrime;
          }
      /* no position found, retrack */
      xabort("Not yet implemented back-tracking needed.");
      nextPrime:
      ;
    }
  return collGrid;
}

static void
myVarPart(struct PPM_extent varShape[3], struct xyzDims collGrid,
          struct PPM_extent myPart[3])
{
  int32_t myCollGridCoord[3];
  {
    struct PPM_extent collGridShape[3];
    for (int i = 0; i < 3; ++i)
      {
        collGridShape[i].first = 0;
        collGridShape[i].size = collGrid.sizes[i];
      }
    PPM_lidx2rlcoord_e(3, collGridShape, commInqRankColl(), myCollGridCoord);
    xdebug("my coord: (%d, %d, %d)", myCollGridCoord[0], myCollGridCoord[1],
           myCollGridCoord[2]);
  }
  PPM_uniform_partition_nd(3, varShape, collGrid.sizes,
                           myCollGridCoord, myPart);
}
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
#elif defined (HAVE_LIBNETCDF)
/* needed for writing when some files are only written to by a single process */
/* cdiOpenFileMap(fileID) gives the writer process */
int cdiPioSerialOpenFileMap(int streamID)
{
  return stream_to_pointer(streamID)->ownerRank;
}
/* for load-balancing purposes, count number of files per process */
/* cdiOpenFileCounts[rank] gives number of open files rank has to himself */
static int *cdiSerialOpenFileCount = NULL;
int cdiPioNextOpenRank()
{
  xassert(cdiSerialOpenFileCount != NULL);
  int commCollSize = commInqSizeColl();
  int minRank = 0, minOpenCount = cdiSerialOpenFileCount[0];
  for (int i = 1; i < commCollSize; ++i)
    if (cdiSerialOpenFileCount[i] < minOpenCount)
      {
        minOpenCount = cdiSerialOpenFileCount[i];
        minRank = i;
      }
  return minRank;
}

441
442
static void
cdiPioOpenFileOnRank(int rank)
443
444
{
  xassert(cdiSerialOpenFileCount != NULL
445
          && (unsigned)rank < (unsigned)commInqSizeColl());
446
447
448
  ++(cdiSerialOpenFileCount[rank]);
}

449
450
static void
cdiPioCloseFileOnRank(int rank)
451
452
453
454
455
456
457
{
  xassert(cdiSerialOpenFileCount != NULL
          && rank >= 0 && rank < commInqSizeColl());
  xassert(cdiSerialOpenFileCount[rank] > 0);
  --(cdiSerialOpenFileCount[rank]);
}

458
459
460
461
462
463
464
465
466
467
static void
cdiPioServerCdfDefVars(stream_t *streamptr)
{
  int rank, rankOpen;
  if (commInqIOMode() == PIO_NONE
      || ((rank = commInqRankColl())
          == (rankOpen = cdiPioSerialOpenFileMap(streamptr->self))))
    cdfDefVars(streamptr);
}

468
469
#endif

470
471
472
473
474
475
struct streamMapping {
  int streamID, filetype;
  int firstHeaderIdx, lastHeaderIdx;
  int numVars, *varMap;
};

476
477
478
479
480
481
struct streamMap
{
  struct streamMapping *entries;
  int numEntries;
};

Thomas Jahns's avatar
Thomas Jahns committed
482
483
484
485
486
487
488
489
static int
smCmpStreamID(const void *a_, const void *b_)
{
  const struct streamMapping *a = a_, *b = b_;
  int streamIDa = a->streamID, streamIDb = b->streamID;
  return (streamIDa > streamIDb) - (streamIDa < streamIDb);
}

490
491
492
493
494
495
496
static inline int
inventorizeStream(struct streamMapping *streamMap, int numStreamIDs,
                  int *sizeStreamMap_, int streamID, int headerIdx)
{
  int sizeStreamMap = *sizeStreamMap_;
  if (numStreamIDs < sizeStreamMap) ; else
    {
497
498
499
      streamMap = xrealloc(streamMap,
                           (size_t)(sizeStreamMap *= 2)
                           * sizeof (streamMap[0]));
500
501
502
503
      *sizeStreamMap_ = sizeStreamMap;
    }
  streamMap[numStreamIDs].streamID = streamID;
  streamMap[numStreamIDs].firstHeaderIdx = headerIdx;
504
  streamMap[numStreamIDs].lastHeaderIdx = headerIdx;
505
506
507
508
509
510
511
512
513
  streamMap[numStreamIDs].numVars = -1;
  int filetype = streamInqFiletype(streamID);
  streamMap[numStreamIDs].filetype = filetype;
  if (filetype == FILETYPE_NC || filetype == FILETYPE_NC2
      || filetype == FILETYPE_NC4)
    {
      int vlistID = streamInqVlist(streamID);
      int nvars = vlistNvars(vlistID);
      streamMap[numStreamIDs].numVars = nvars;
514
515
      streamMap[numStreamIDs].varMap
        = xmalloc(sizeof (streamMap[numStreamIDs].varMap[0]) * (size_t)nvars);
516
517
518
519
520
521
      for (int i = 0; i < nvars; ++i)
        streamMap[numStreamIDs].varMap[i] = -1;
    }
  return numStreamIDs + 1;
}

522
523
524
525
526
527
528
529
530
531
static inline int
streamIsInList(struct streamMapping *streamMap, int numStreamIDs,
               int streamIDQuery)
{
  int p = 0;
  for (int i = 0; i < numStreamIDs; ++i)
    p |= streamMap[i].streamID == streamIDQuery;
  return p;
}

532
static struct streamMap
533
buildStreamMap(struct winHeaderEntry *winDict)
534
535
536
{
  int streamIDOld = CDI_UNDEFID;
  int oldStreamIdx = CDI_UNDEFID;
537
  int filetype = FILETYPE_UNDEF;
538
  int sizeStreamMap = 16;
539
540
  struct streamMapping *streamMap
    = xmalloc((size_t)sizeStreamMap * sizeof (streamMap[0]));
541
  int numDataEntries = winDict[0].specific.headerSize.numDataEntries;
542
  int numStreamIDs = 0;
543
  /* find streams written on this process */
544
545
546
  for (int headerIdx = 1; headerIdx < numDataEntries; headerIdx += 2)
    {
      int streamID
547
548
        = winDict[headerIdx].id
        = namespaceAdaptKey2(winDict[headerIdx].id);
549
550
551
552
553
554
555
556
557
558
559
      xassert(streamID > 0);
      if (streamID != streamIDOld)
        {
          for (int i = numStreamIDs - 1; i >= 0; --i)
            if ((streamIDOld = streamMap[i].streamID) == streamID)
              {
                oldStreamIdx = i;
                goto streamIDInventorized;
              }
          oldStreamIdx = numStreamIDs;
          streamIDOld = streamID;
560
561
          numStreamIDs = inventorizeStream(streamMap, numStreamIDs,
                                           &sizeStreamMap, streamID, headerIdx);
562
563
        }
      streamIDInventorized:
564
      filetype = streamMap[oldStreamIdx].filetype;
565
566
567
568
      streamMap[oldStreamIdx].lastHeaderIdx = headerIdx;
      if (filetype == FILETYPE_NC || filetype == FILETYPE_NC2
          || filetype == FILETYPE_NC4)
        {
569
          int varID = winDict[headerIdx].specific.dataRecord.varID;
570
571
572
          streamMap[oldStreamIdx].varMap[varID] = headerIdx;
        }
    }
573
574
575
576
  /* join with list of streams written to in total */
  {
    int *streamIDs, *streamIsWritten;
    int numTotalStreamIDs = streamSize();
Uwe Schulzweida's avatar
Uwe Schulzweida committed
577
    streamIDs = (int*) xmalloc(2 * sizeof (streamIDs[0]) * (size_t)numTotalStreamIDs);
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
    streamGetIndexList(numTotalStreamIDs, streamIDs);
    streamIsWritten = streamIDs + numTotalStreamIDs;
    for (int i = 0; i < numTotalStreamIDs; ++i)
      streamIsWritten[i] = streamIsInList(streamMap, numStreamIDs,
                                          streamIDs[i]);
    /* Find what streams are written to at all on any process */
    xmpi(MPI_Allreduce(MPI_IN_PLACE, streamIsWritten, numTotalStreamIDs,
                       MPI_INT, MPI_BOR, commInqCommColl()));
    /* append streams written to on other tasks to mapping */
    for (int i = 0; i < numTotalStreamIDs; ++i)
      if (streamIsWritten[i] && !streamIsInList(streamMap, numStreamIDs,
                                                streamIDs[i]))
        numStreamIDs = inventorizeStream(streamMap, numStreamIDs,
                                         &sizeStreamMap, streamIDs[i], -1);

    free(streamIDs);
  }
Thomas Jahns's avatar
Thomas Jahns committed
595
  /* sort written streams by streamID */
596
597
  streamMap = xrealloc(streamMap, sizeof (streamMap[0]) * (size_t)numStreamIDs);
  qsort(streamMap, (size_t)numStreamIDs, sizeof (streamMap[0]), smCmpStreamID);
598
599
600
  return (struct streamMap){ .entries = streamMap, .numEntries = numStreamIDs };
}

601
602
603
604
605
606
607
608
609
610
static void
writeGribStream(struct winHeaderEntry *winDict, struct streamMapping *mapping,
                double **data_, int *currentDataBufSize, int root,
                int nProcsModel)
{
  int streamID = mapping->streamID;
  int headerIdx, lastHeaderIdx = mapping->lastHeaderIdx;
  int vlistID = streamInqVlist(streamID);
  if (lastHeaderIdx < 0)
    {
611
      /* write zero bytes to trigger synchronization code in fileWrite */
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
      cdiPioFileWrite(streamInqFileID(streamID), NULL, 0,
                      streamInqCurTimestepID(streamID));
    }
  else
    for (headerIdx = mapping->firstHeaderIdx;
         headerIdx <= lastHeaderIdx;
         headerIdx += 2)
      if (streamID == winDict[headerIdx].id)
        {
          int varID = winDict[headerIdx].specific.dataRecord.varID;
          int size = vlistInqVarSize(vlistID, varID);
          int nmiss;
          resizeVarGatherBuf(vlistID, varID, data_, currentDataBufSize);
          double *data = *data_;
          gatherArray(root, nProcsModel, headerIdx,
                      vlistID, data, &nmiss);
          streamWriteVar(streamID, varID, data, nmiss);
          if ( ddebug > 2 )
            {
              char text[1024];
              sprintf(text, "streamID=%d, var[%d], size=%d",
                      streamID, varID, size);
              xprintArray(text, data, size, DATATYPE_FLT);
            }
        }
}
638

639
640
641
642
643
644
645
#ifdef HAVE_NETCDF4
static void
buildWrittenVars(struct streamMapping *mapping, int **varIsWritten_,
                 int myCollRank, MPI_Comm collComm)
{
  int nvars = mapping->numVars;
  int *varMap = mapping->varMap;
646
647
  int *varIsWritten = *varIsWritten_
    = xrealloc(*varIsWritten_, sizeof (*varIsWritten) * (size_t)nvars);
648
649
650
651
652
653
654
  for (int varID = 0; varID < nvars; ++varID)
    varIsWritten[varID] = ((varMap[varID] != -1)
                           ?myCollRank+1 : 0);
  xmpi(MPI_Allreduce(MPI_IN_PLACE, varIsWritten, nvars,
                     MPI_INT, MPI_BOR, collComm));
}
#endif
655

656
static void readGetBuffers()
Deike Kleberg's avatar
Deike Kleberg committed
657
{
658
  int nProcsModel = commInqNProcsModel ();
Deike Kleberg's avatar
Deike Kleberg committed
659
  int root        = commInqRootGlob ();
660
#ifdef HAVE_NETCDF4
661
  int myCollRank = commInqRankColl();
662
  MPI_Comm collComm = commInqCommColl();
663
#endif
664
  xdebug("%s", "START");
665

666
667
  struct winHeaderEntry *winDict
    = (struct winHeaderEntry *)rxWin[root].buffer;
668
  xassert(winDict[0].id == HEADERSIZEMARKER);
669
670
  {
    int dictSize = rxWin[root].dictSize,
671
      firstNonRPCEntry = dictSize - winDict[0].specific.headerSize.numRPCEntries - 1,
672
673
674
675
676
677
      headerIdx,
      numFuncCalls = 0;
    for (headerIdx = dictSize - 1;
         headerIdx > firstNonRPCEntry;
         --headerIdx)
      {
678
679
        xassert(winDict[headerIdx].id >= MINFUNCID
                && winDict[headerIdx].id <= MAXFUNCID);
680
        ++numFuncCalls;
681
        readFuncCall(winDict + headerIdx);
682
      }
683
    xassert(numFuncCalls == winDict[0].specific.headerSize.numRPCEntries);
684
  }
Thomas Jahns's avatar
Thomas Jahns committed
685
  /* build list of streams, data was transferred for */
686
  {
687
    struct streamMap map = buildStreamMap(winDict);
688
    double *data = NULL;
Thomas Jahns's avatar
Thomas Jahns committed
689
690
691
#ifdef HAVE_NETCDF4
    int *varIsWritten = NULL;
#endif
692
693
694
#if defined (HAVE_PARALLEL_NC4)
    double *writeBuf = NULL;
#endif
Thomas Jahns's avatar
Thomas Jahns committed
695
    int currentDataBufSize = 0;
696
    for (int streamIdx = 0; streamIdx < map.numEntries; ++streamIdx)
Thomas Jahns's avatar
Thomas Jahns committed
697
      {
698
        int streamID = map.entries[streamIdx].streamID;
Thomas Jahns's avatar
Thomas Jahns committed
699
        int vlistID = streamInqVlist(streamID);
700
        int filetype = map.entries[streamIdx].filetype;
Thomas Jahns's avatar
Thomas Jahns committed
701

702
        switch (filetype)
703
704
705
          {
          case FILETYPE_GRB:
          case FILETYPE_GRB2:
706
707
708
            writeGribStream(winDict, map.entries + streamIdx,
                            &data, &currentDataBufSize,
                            root, nProcsModel);
709
            break;
710
711
712
713
714
715
716
#ifdef HAVE_NETCDF4
          case FILETYPE_NC:
          case FILETYPE_NC2:
          case FILETYPE_NC4:
#ifdef HAVE_PARALLEL_NC4
            /* HAVE_PARALLE_NC4 implies having ScalES-PPM and yaxt */
            {
717
718
              int nvars = map.entries[streamIdx].numVars;
              int *varMap = map.entries[streamIdx].varMap;
719
720
              buildWrittenVars(map.entries + streamIdx, &varIsWritten,
                               myCollRank, collComm);
721
722
723
724
              for (int varID = 0; varID < nvars; ++varID)
                if (varIsWritten[varID])
                  {
                    struct PPM_extent varShape[3];
725
                    queryVarBounds(varShape, vlistID, varID);
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
                    struct xyzDims collGrid = varDimsCollGridMatch(varShape);
                    xdebug("writing varID %d with dimensions: "
                           "x=%d, y=%d, z=%d,\n"
                           "found distribution with dimensions:"
                           " x=%d, y=%d, z=%d.", varID,
                           varShape[0].size, varShape[1].size, varShape[2].size,
                           collGrid.sizes[0], collGrid.sizes[1],
                           collGrid.sizes[2]);
                    struct PPM_extent varChunk[3];
                    myVarPart(varShape, collGrid, varChunk);
                    int myChunk[3][2];
                    for (int i = 0; i < 3; ++i)
                      {
                        myChunk[i][0] = PPM_extent_start(varChunk[i]);
                        myChunk[i][1] = PPM_extent_end(varChunk[i]);
                      }
                    xdebug("Writing chunk { { %d, %d }, { %d, %d },"
                           " { %d, %d } }", myChunk[0][0], myChunk[0][1],
                           myChunk[1][0], myChunk[1][1], myChunk[2][0],
                           myChunk[2][1]);
                    Xt_int varSize[3];
                    for (int i = 0; i < 3; ++i)
                      varSize[2 - i] = varShape[i].size;
                    Xt_idxlist preRedistChunk, preWriteChunk;
                    /* prepare yaxt descriptor for current data
                       distribution after collect */
                    int nmiss;
                    if (varMap[varID] == -1)
                      {
                        preRedistChunk = xt_idxempty_new();
                        xdebug("%s", "I got none\n");
                      }
                    else
                      {
                        Xt_int preRedistStart[3] = { 0, 0, 0 };
                        preRedistChunk
                          = xt_idxsection_new(0, 3, varSize, varSize,
                                              preRedistStart);
                        resizeVarGatherBuf(vlistID, varID, &data,
                                           &currentDataBufSize);
                        int headerIdx = varMap[varID];
                        gatherArray(root, nProcsModel, headerIdx,
                                    vlistID, data, &nmiss);
                        xdebug("%s", "I got all\n");
                      }
                    MPI_Bcast(&nmiss, 1, MPI_INT, varIsWritten[varID] - 1,
                              collComm);
                    /* prepare yaxt descriptor for write chunk */
                    {
                      Xt_int preWriteChunkStart[3], preWriteChunkSize[3];
                      for (int i = 0; i < 3; ++i)
                        {
                          preWriteChunkStart[2 - i] = varChunk[i].first;
                          preWriteChunkSize[2 - i] = varChunk[i].size;
                        }
                      preWriteChunk = xt_idxsection_new(0, 3, varSize,
                                                        preWriteChunkSize,
                                                        preWriteChunkStart);
                    }
                    /* prepare redistribution */
                    {
                      Xt_xmap xmap = xt_xmap_all2all_new(preRedistChunk,
                                                         preWriteChunk,
                                                         collComm);
                      Xt_redist redist = xt_redist_p2p_new(xmap, MPI_DOUBLE);
                      xt_idxlist_delete(preRedistChunk);
                      xt_idxlist_delete(preWriteChunk);
                      xt_xmap_delete(xmap);
Uwe Schulzweida's avatar
Uwe Schulzweida committed
794
795
796
                      writeBuf = (double*) xrealloc(writeBuf,
                                                    sizeof (double)
                                                    * PPM_extents_size(3, varChunk));
797
                      xt_redist_s_exchange1(redist, data, writeBuf);
798
799
800
801
802
803
804
805
806
807
                      xt_redist_delete(redist);
                    }
                    /* write chunk */
                    streamWriteVarChunk(streamID, varID,
                                        (const int (*)[2])myChunk, writeBuf,
                                        nmiss);
                  }
            }
#else
            /* determine process which has stream open (writer) and
808
809
810
             * which has data for which variable (var owner)
             * three cases need to be distinguished */
            {
811
812
              int nvars = map.entries[streamIdx].numVars;
              int *varMap = map.entries[streamIdx].varMap;
813
814
              buildWrittenVars(map.entries + streamIdx, &varIsWritten,
                               myCollRank, collComm);
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
              int writerRank;
              if ((writerRank = cdiPioSerialOpenFileMap(streamID))
                  == myCollRank)
                {
                  for (int varID = 0; varID < nvars; ++varID)
                    if (varIsWritten[varID])
                      {
                        int nmiss;
                        int size = vlistInqVarSize(vlistID, varID);
                        resizeVarGatherBuf(vlistID, varID, &data,
                                           &currentDataBufSize);
                        int headerIdx = varMap[varID];
                        if (varIsWritten[varID] == myCollRank + 1)
                          {
                            /* this process has the full array and will
                             * write it */
                            xdebug("gathering varID=%d for direct writing",
                                   varID);
                            gatherArray(root, nProcsModel, headerIdx,
                                        vlistID, data, &nmiss);
                          }
                        else
                          {
                            /* another process has the array and will
                             * send it over */
                            MPI_Status stat;
                            xdebug("receiving varID=%d for writing from"
                                   " process %d",
                                   varID, varIsWritten[varID] - 1);
                            xmpiStat(MPI_Recv(&nmiss, 1, MPI_INT,
                                              varIsWritten[varID] - 1,
                                              COLLBUFNMISS,
                                              collComm, &stat), &stat);
                            xmpiStat(MPI_Recv(data, size, MPI_DOUBLE,
                                              varIsWritten[varID] - 1,
                                              COLLBUFTX,
                                              collComm, &stat), &stat);
                          }
                        streamWriteVar(streamID, varID, data, nmiss);
                      }
                }
              else
                for (int varID = 0; varID < nvars; ++varID)
                  if (varIsWritten[varID] == myCollRank + 1)
                    {
                      /* this process has the full array and another
                       * will write it */
                      int nmiss;
                      int size = vlistInqVarSize(vlistID, varID);
                      resizeVarGatherBuf(vlistID, varID, &data,
                                         &currentDataBufSize);
                      int headerIdx = varMap[varID];
                      gatherArray(root, nProcsModel, headerIdx,
                                  vlistID, data, &nmiss);
                      MPI_Request req;
                      MPI_Status stat;
                      xdebug("sending varID=%d for writing to"
                             " process %d",
                             varID, writerRank);
                      xmpi(MPI_Isend(&nmiss, 1, MPI_INT,
                                     writerRank, COLLBUFNMISS,
                                     collComm, &req));
                      xmpi(MPI_Send(data, size, MPI_DOUBLE,
                                    writerRank, COLLBUFTX,
                                    collComm));
                      xmpiStat(MPI_Wait(&req, &stat), &stat);
                    }
            }
883
884
885
#endif
            break;
#endif
886
887
888
          default:
            xabort("unhandled filetype in parallel I/O.");
          }
889
      }
Thomas Jahns's avatar
Thomas Jahns committed
890
891
#ifdef HAVE_NETCDF4
    free(varIsWritten);
Thomas Jahns's avatar
Thomas Jahns committed
892
893
894
#ifdef HAVE_PARALLEL_NC4
    free(writeBuf);
#endif
Thomas Jahns's avatar
Thomas Jahns committed
895
#endif
896
    free(map.entries);
Thomas Jahns's avatar
Thomas Jahns committed
897
    free(data);
898
  }
899
  xdebug("%s", "RETURN");
900
901
902
903
} 

/************************************************************************/

Deike Kleberg's avatar
Deike Kleberg committed
904

Thomas Jahns's avatar
Thomas Jahns committed
905
906
static
void clearModelWinBuffer(int modelID)
Deike Kleberg's avatar
Deike Kleberg committed
907
908
909
{
  int nProcsModel = commInqNProcsModel ();

Thomas Jahns's avatar
Thomas Jahns committed
910
911
912
913
  xassert((unsigned)modelID < (unsigned)nProcsModel &&
          rxWin != NULL && rxWin[modelID].buffer != NULL &&
          rxWin[modelID].size > 0 &&
          rxWin[modelID].size <= MAXWINBUFFERSIZE );
914
  memset(rxWin[modelID].buffer, 0, rxWin[modelID].size);
Deike Kleberg's avatar
Deike Kleberg committed
915
916
917
918
919
920
}


/************************************************************************/


921
static
922
void getTimeStepData()
Deike Kleberg's avatar
Deike Kleberg committed
923
{
924
  int modelID;
925
  char text[1024];
926
  int nProcsModel = commInqNProcsModel ();
Thomas Jahns's avatar
Thomas Jahns committed
927
928
  void *getWinBaseAddr;
  int attrFound;
929

930
  xdebug("%s", "START");
Deike Kleberg's avatar
Deike Kleberg committed
931

932
933
  for ( modelID = 0; modelID < nProcsModel; modelID++ )
    clearModelWinBuffer(modelID);
Deike Kleberg's avatar
Deike Kleberg committed
934
  // todo put in correct lbs and ubs
935
  xmpi(MPI_Win_start(groupModel, 0, getWin));
936
937
  xmpi(MPI_Win_get_attr(getWin, MPI_WIN_BASE, &getWinBaseAddr, &attrFound));
  xassert(attrFound);
Deike Kleberg's avatar
Deike Kleberg committed
938
939
  for ( modelID = 0; modelID < nProcsModel; modelID++ )
    {
940
      xdebug("modelID=%d, nProcsModel=%d, rxWin[%d].size=%zu,"
Thomas Jahns's avatar
Thomas Jahns committed
941
             " getWin=%p, sizeof(int)=%u",
942
             modelID, nProcsModel, modelID, rxWin[modelID].size,
Thomas Jahns's avatar
Thomas Jahns committed
943
             getWinBaseAddr, (unsigned)sizeof(int));
944
      /* FIXME: this needs to use MPI_PACK for portability */
945
      xmpi(MPI_Get(rxWin[modelID].buffer, (int)rxWin[modelID].size,
946
                   MPI_UNSIGNED_CHAR, modelID, 0,
947
                   (int)rxWin[modelID].size, MPI_UNSIGNED_CHAR, getWin));
Deike Kleberg's avatar
Deike Kleberg committed
948
    }
949
  xmpi ( MPI_Win_complete ( getWin ));
Deike Kleberg's avatar
Deike Kleberg committed
950

951
  if ( ddebug > 2 )
Deike Kleberg's avatar
Deike Kleberg committed
952
    for ( modelID = 0; modelID < nProcsModel; modelID++ )
953
      {
954
        sprintf(text, "rxWin[%d].size=%zu from PE%d rxWin[%d].buffer",
955
                modelID, rxWin[modelID].size, modelID, modelID);
956
        xprintArray(text, rxWin[modelID].buffer,
957
                    (int)(rxWin[modelID].size / sizeof (double)),
958
                    DATATYPE_FLT);
959
      }
960
961
  readGetBuffers();

962
  xdebug("%s", "RETURN");
Deike Kleberg's avatar
Deike Kleberg committed
963
}
Deike Kleberg's avatar
Deike Kleberg committed
964
965
966

/************************************************************************/

967
968
969
970
971
972
973
974
975
976
977
#if defined (HAVE_LIBNETCDF) && ! defined (HAVE_PARALLEL_NC4)
static int
cdiPioStreamCDFOpenWrap(const char *filename, const char *filemode,
                        int filetype, stream_t *streamptr,
                        int recordBufIsToBeCreated)
{
  switch (filetype)
    {
    case FILETYPE_NC4:
    case FILETYPE_NC4C:
      {
Thomas Jahns's avatar
Thomas Jahns committed
978
979
        /* Only needs initialization to shut up gcc */
        int rank = -1, fileID;
Thomas Jahns's avatar
Thomas Jahns committed
980
981
        int ioMode = commInqIOMode();
        if (ioMode == PIO_NONE
982
983
984
985
            || commInqRankColl() == (rank = cdiPioNextOpenRank()))
          fileID = cdiStreamOpenDefaultDelegate(filename, filemode, filetype,
                                                streamptr,
                                                recordBufIsToBeCreated);
986
987
        else
          streamptr->filetype = filetype;
Thomas Jahns's avatar
Thomas Jahns committed
988
        if (ioMode != PIO_NONE)
989
990
          xmpi(MPI_Bcast(&fileID, 1, MPI_INT, rank, commInqCommColl()));
        streamptr->ownerRank = rank;
991
        cdiPioOpenFileOnRank(rank);
992
993
994
995
996
997
998
999
1000
        return fileID;
      }
    default:
      return cdiStreamOpenDefaultDelegate(filename, filemode, filetype,
                                          streamptr, recordBufIsToBeCreated);
    }
}

static void
For faster browsing, not all history is shown. View entire blame