pio_server.c 39.9 KB
Newer Older
Deike Kleberg's avatar
Deike Kleberg committed
1
2
/** @file ioServer.c
*/
3
4
5
6
7
8
#ifdef HAVE_CONFIG_H
#  include "config.h"
#endif

#ifdef USE_MPI

Deike Kleberg's avatar
Deike Kleberg committed
9
10
11
#include "pio_server.h"


12
#include <limits.h>
Deike Kleberg's avatar
Deike Kleberg committed
13
14
#include <stdlib.h>
#include <stdio.h>
15
16
17
18
19
20

#ifdef HAVE_PARALLEL_NC4
#include <core/ppm_combinatorics.h>
#include <core/ppm_rectilinear.h>
#include <ppm/ppm_uniform_partition.h>
#endif
21
#include <yaxt.h>
22

Deike Kleberg's avatar
Deike Kleberg committed
23
#include "cdi.h"
24
#include "namespace.h"
25
#include "taxis.h"
Deike Kleberg's avatar
Deike Kleberg committed
26
#include "pio.h"
Deike Kleberg's avatar
Deike Kleberg committed
27
#include "pio_comm.h"
28
#include "pio_interface.h"
Deike Kleberg's avatar
Deike Kleberg committed
29
#include "pio_rpc.h"
Deike Kleberg's avatar
Deike Kleberg committed
30
#include "pio_util.h"
31
#include "cdi_int.h"
32
33
34
#ifndef HAVE_NETCDF_PAR_H
#define MPI_INCLUDED
#endif
35
#include "pio_cdf_int.h"
36
#include "resource_handle.h"
37
#include "resource_unpack.h"
Thomas Jahns's avatar
Thomas Jahns committed
38
#include "stream_cdf.h"
Deike Kleberg's avatar
Deike Kleberg committed
39
#include "vlist_var.h"
40

41

42
extern resOps streamOps;
43
extern void arrayDestroy ( void );
Deike Kleberg's avatar
Deike Kleberg committed
44

45
46
47
static struct
{
  size_t size;
Thomas Jahns's avatar
Thomas Jahns committed
48
  unsigned char *buffer;
49
  int dictSize;
50
51
} *rxWin = NULL;

Thomas Jahns's avatar
Thomas Jahns committed
52
static MPI_Win getWin = MPI_WIN_NULL;
Thomas Jahns's avatar
Thomas Jahns committed
53
static MPI_Group groupModel = MPI_GROUP_NULL;
Deike Kleberg's avatar
Deike Kleberg committed
54

55
56
57
58
59
#ifdef HAVE_PARALLEL_NC4
/* prime factorization of number of pio collectors */
static uint32_t *pioPrimes;
static int numPioPrimes;
#endif
Deike Kleberg's avatar
Deike Kleberg committed
60

Deike Kleberg's avatar
Deike Kleberg committed
61
62
/************************************************************************/

63
static
Deike Kleberg's avatar
Deike Kleberg committed
64
65
void serverWinCleanup ()
{
66
67
  if (getWin != MPI_WIN_NULL)
    xmpi(MPI_Win_free(&getWin));
68
69
  if (rxWin)
    {
70
      free(rxWin[0].buffer);
71
      free(rxWin);
Deike Kleberg's avatar
Deike Kleberg committed
72
    }
73

74
  xdebug("%s", "cleaned up mpi_win");
Deike Kleberg's avatar
Deike Kleberg committed
75
}
76

Deike Kleberg's avatar
Deike Kleberg committed
77
 /************************************************************************/
78

79
80
static size_t
collDefBufferSizes()
Deike Kleberg's avatar
Deike Kleberg committed
81
{
82
  int nstreams, * streamIndexList, streamNo, vlistID, nvars, varID, iorank;
83
84
  int modelID;
  size_t sumGetBufferSizes = 0;
85
  int rankGlob = commInqRankGlob ();
Deike Kleberg's avatar
Deike Kleberg committed
86
  int nProcsModel = commInqNProcsModel ();
87
  int root = commInqRootGlob ();
Deike Kleberg's avatar
Deike Kleberg committed
88

89
  xassert(rxWin != NULL);
Deike Kleberg's avatar
Deike Kleberg committed
90

Deike Kleberg's avatar
Deike Kleberg committed
91
  nstreams = reshCountType ( &streamOps );
92
93
  streamIndexList = xmalloc ( nstreams * sizeof ( streamIndexList[0] ));
  reshGetResHListOfType ( nstreams, streamIndexList, &streamOps );
Deike Kleberg's avatar
Deike Kleberg committed
94
95
  for ( streamNo = 0; streamNo < nstreams; streamNo++ )
    {
96
      // space required for data
97
      vlistID = streamInqVlist ( streamIndexList[streamNo] );
Deike Kleberg's avatar
Deike Kleberg committed
98
99
100
101
      nvars = vlistNvars ( vlistID );
      for ( varID = 0; varID < nvars; varID++ )
        {
          iorank = vlistInqVarIOrank ( vlistID, varID );
Deike Kleberg's avatar
Deike Kleberg committed
102
          xassert ( iorank != CDI_UNDEFID );
Deike Kleberg's avatar
Deike Kleberg committed
103
104
          if ( iorank == rankGlob )
            {
Deike Kleberg's avatar
Deike Kleberg committed
105
              for ( modelID = 0; modelID < nProcsModel; modelID++ )
106
                {
107
108
109
110
111
112
113
114
                  int decoChunk;
                  {
                    int varSize = vlistInqVarSize(vlistID, varID);
                    int nProcsModel = commInqNProcsModel();
                    decoChunk =
                      (int)ceilf(cdiPIOpartInflate_
                                 * (varSize + nProcsModel - 1)/nProcsModel);
                  }
Deike Kleberg's avatar
Deike Kleberg committed
115
                  xassert ( decoChunk > 0 );
116
                  rxWin[modelID].size += decoChunk * sizeof (double)
117
118
119
120
                    /* re-align chunks to multiple of double size */
                    + sizeof (double) - 1
                    /* one header for data record, one for
                     * corresponding part descriptor*/
121
                    + 2 * sizeof (struct winHeaderEntry)
122
123
124
                    /* FIXME: heuristic for size of packed Xt_idxlist */
                    + sizeof (Xt_int) * decoChunk * 3;
                  rxWin[modelID].dictSize += 2;
125
                }
Deike Kleberg's avatar
Deike Kleberg committed
126
            }
127
        }
Deike Kleberg's avatar
Deike Kleberg committed
128
129
      // space required for the 3 function calls streamOpen, streamDefVlist, streamClose 
      // once per stream and timestep for all collprocs only on the modelproc root
130
      rxWin[root].size += numRPCFuncs * sizeof (struct winHeaderEntry)
131
132
133
134
        /* serialized filename */
        + MAXDATAFILENAME
        /* data part of streamDefTimestep */
        + (2 * CDI_MAX_NAME + sizeof (taxis_t));
135
      rxWin[root].dictSize += numRPCFuncs;
Deike Kleberg's avatar
Deike Kleberg committed
136
    }
137
  free ( streamIndexList );
Deike Kleberg's avatar
Deike Kleberg committed
138
139

  for ( modelID = 0; modelID < nProcsModel; modelID++ )
140
    {
141
      /* account for size header */
142
      rxWin[modelID].dictSize += 1;
143
      rxWin[modelID].size += sizeof (struct winHeaderEntry);
144
145
146
      rxWin[modelID].size = roundUpToMultiple(rxWin[modelID].size,
                                              PIO_WIN_ALIGN);
      sumGetBufferSizes += (size_t)rxWin[modelID].size;
147
    }
Deike Kleberg's avatar
Deike Kleberg committed
148
  xassert ( sumGetBufferSizes <= MAXWINBUFFERSIZE );
149
  return sumGetBufferSizes;
Deike Kleberg's avatar
Deike Kleberg committed
150
}
151

Deike Kleberg's avatar
Deike Kleberg committed
152
 /************************************************************************/
153
154

static 
Deike Kleberg's avatar
Deike Kleberg committed
155
156
 void serverWinCreate ()
{ 
Deike Kleberg's avatar
Deike Kleberg committed
157
  int ranks[1], modelID;
158
  MPI_Comm commCalc = commInqCommCalc ();
Deike Kleberg's avatar
Deike Kleberg committed
159
  MPI_Group groupCalc;
160
  int nProcsModel = commInqNProcsModel ();
Deike Kleberg's avatar
Deike Kleberg committed
161

Deike Kleberg's avatar
Deike Kleberg committed
162
  xmpi ( MPI_Win_create ( MPI_BOTTOM, 0, 1, MPI_INFO_NULL,
163
                          commCalc, &getWin ));
Deike Kleberg's avatar
Deike Kleberg committed
164
165

  /* target group */
166
167
  ranks[0] = nProcsModel;
  xmpi ( MPI_Comm_group ( commCalc, &groupCalc ));
Deike Kleberg's avatar
Deike Kleberg committed
168
169
  xmpi ( MPI_Group_excl ( groupCalc, 1, ranks, &groupModel ));

170
  rxWin = xcalloc(nProcsModel, sizeof (rxWin[0]));
171
172
173
174
175
176
177
178
  size_t totalBufferSize = collDefBufferSizes();
  rxWin[0].buffer = xmalloc(totalBufferSize);
  size_t ofs = 0;
  for ( modelID = 1; modelID < nProcsModel; modelID++ )
    {
      ofs += rxWin[modelID - 1].size;
      rxWin[modelID].buffer = rxWin[0].buffer + ofs;
    }
Deike Kleberg's avatar
Deike Kleberg committed
179

180
  xdebug("%s", "created mpi_win, allocated getBuffer");
Deike Kleberg's avatar
Deike Kleberg committed
181
182
}

Deike Kleberg's avatar
Deike Kleberg committed
183
184
/************************************************************************/

185
static void
186
readFuncCall(struct winHeaderEntry *header)
Deike Kleberg's avatar
Deike Kleberg committed
187
188
{
  int root = commInqRootGlob ();
189
  int funcID = header->id;
190
  union funcArgs *funcArgs = &(header->specific.funcArgs);
Deike Kleberg's avatar
Deike Kleberg committed
191

192
  xassert(funcID >= MINFUNCID && funcID <= MAXFUNCID);
Deike Kleberg's avatar
Deike Kleberg committed
193
194
  switch ( funcID )
    {
195
196
    case STREAMCLOSE:
      {
197
        int streamID
198
          = namespaceAdaptKey2(funcArgs->streamChange.streamID);
199
200
201
202
        streamClose(streamID);
        xdebug("READ FUNCTION CALL FROM WIN:  %s, streamID=%d,"
               " closed stream",
               funcMap[(-1 - funcID)], streamID);
203
204
      }
      break;
Deike Kleberg's avatar
Deike Kleberg committed
205
    case STREAMOPEN:
206
      {
207
        size_t filenamesz = funcArgs->newFile.fnamelen;
Deike Kleberg's avatar
Deike Kleberg committed
208
        xassert ( filenamesz > 0 && filenamesz < MAXDATAFILENAME );
209
        const char *filename
210
          = (const char *)(rxWin[root].buffer + header->offset);
211
        xassert(filename[filenamesz] == '\0');
212
        int filetype = funcArgs->newFile.filetype;
213
        int streamID = streamOpenWrite(filename, filetype);
214
        xassert(streamID != CDI_ELIBNAVAIL);
215
216
        xdebug("READ FUNCTION CALL FROM WIN:  %s, filenamesz=%zu,"
               " filename=%s, filetype=%d, OPENED STREAM %d",
217
               funcMap[(-1 - funcID)], filenamesz, filename,
218
               filetype, streamID);
219
      }
220
      break;
221
222
    case STREAMDEFVLIST:
      {
223
        int streamID
224
225
          = namespaceAdaptKey2(funcArgs->streamChange.streamID);
        int vlistID = namespaceAdaptKey2(funcArgs->streamChange.vlistID);
226
227
228
229
        streamDefVlist(streamID, vlistID);
        xdebug("READ FUNCTION CALL FROM WIN:  %s, streamID=%d,"
               " vlistID=%d, called streamDefVlist ().",
               funcMap[(-1 - funcID)], streamID, vlistID);
230
231
      }
      break;
232
233
234
    case STREAMDEFTIMESTEP:
      {
        MPI_Comm commCalc = commInqCommCalc ();
235
        int streamID = funcArgs->streamNewTimestep.streamID;
236
237
238
239
        int nspTarget = namespaceResHDecode(streamID).nsp;
        streamID = namespaceAdaptKey2(streamID);
        int oldTaxisID
          = vlistInqTaxis(streamInqVlist(streamID));
240
        int position = header->offset;
241
242
243
244
245
246
247
        int changedTaxisID
          = taxisUnpack((char *)rxWin[root].buffer, (int)rxWin[root].size,
                        &position, nspTarget, &commCalc, 0);
        taxis_t *oldTaxisPtr = taxisPtr(oldTaxisID);
        taxis_t *changedTaxisPtr = taxisPtr(changedTaxisID);
        ptaxisCopy(oldTaxisPtr, changedTaxisPtr);
        taxisDestroy(changedTaxisID);
248
        streamDefTimestep(streamID, funcArgs->streamNewTimestep.tsID);
249
250
      }
      break;
Deike Kleberg's avatar
Deike Kleberg committed
251
    default:
252
      xabort ( "REMOTE FUNCTIONCALL NOT IMPLEMENTED!" );
Deike Kleberg's avatar
Deike Kleberg committed
253
254
255
256
257
    }
}

/************************************************************************/

258
259
260
261
262
263
264
265
266
267
268
269
270
static void
resizeVarGatherBuf(int vlistID, int varID, double **buf, int *bufSize)
{
  int size = vlistInqVarSize(vlistID, varID);
  if (size <= *bufSize) ; else
    *buf = xrealloc(*buf, (*bufSize = size) * sizeof (buf[0][0]));
}

static void
gatherArray(int root, int nProcsModel, int headerIdx,
            int vlistID,
            double *gatherBuf, int *nmiss)
{
271
272
  struct winHeaderEntry *winDict
    = (struct winHeaderEntry *)rxWin[root].buffer;
273
  int streamID = winDict[headerIdx].id;
274
  int varID = winDict[headerIdx].specific.dataRecord.varID;
275
  int varShape[3] = { 0, 0, 0 };
276
  cdiPioQueryVarDims(varShape, vlistID, varID);
277
278
279
280
281
282
283
284
285
286
287
288
289
290
  Xt_int varShapeXt[3];
  static const Xt_int origin[3] = { 0, 0, 0 };
  for (unsigned i = 0; i < 3; ++i)
    varShapeXt[i] = varShape[i];
  int varSize = varShape[0] * varShape[1] * varShape[2];
  int *partOfs = xmalloc(2 * varSize * sizeof (partOfs[0])),
    *gatherOfs = partOfs + varSize;
  Xt_idxlist *part = xmalloc(nProcsModel * sizeof (part[0]));
  MPI_Comm commCalc = commInqCommCalc();
  {
    int nmiss_ = 0, partOfsOfs = 0;
    for (int modelID = 0; modelID < nProcsModel; modelID++)
      {
        struct dataRecord *dataHeader
291
292
293
294
          = &((struct winHeaderEntry *)
              rxWin[modelID].buffer)[headerIdx].specific.dataRecord;
        int position =
          ((struct winHeaderEntry *)rxWin[modelID].buffer)[headerIdx + 1].offset;
295
296
297
        xassert(namespaceAdaptKey2(((struct winHeaderEntry *)
                                    rxWin[modelID].buffer)[headerIdx].id)
                == streamID
298
                && dataHeader->varID == varID
299
300
                && ((struct winHeaderEntry *)
                    rxWin[modelID].buffer)[headerIdx + 1].id == PARTDESCMARKER
301
302
                && position > 0
                && ((size_t)position
303
                    >= sizeof (struct winHeaderEntry) * rxWin[modelID].dictSize)
304
305
306
307
308
                && ((size_t)position < rxWin[modelID].size));
        part[modelID] = xt_idxlist_unpack(rxWin[modelID].buffer,
                                          (int)rxWin[modelID].size,
                                          &position, commCalc);
        Xt_int partSize = xt_idxlist_get_num_indices(part[modelID]);
309
310
311
        size_t charOfs = (rxWin[modelID].buffer
                          + ((struct winHeaderEntry *)
                             rxWin[modelID].buffer)[headerIdx].offset)
312
313
314
315
316
317
318
319
320
321
322
323
          - rxWin[0].buffer;
        xassert(charOfs % sizeof (double) == 0
                && charOfs / sizeof (double) + partSize <= INT_MAX);
        int elemOfs = charOfs / sizeof (double);
        for (int i = 0; i < (int)partSize; ++i)
          partOfs[partOfsOfs + i] = elemOfs + i;
        partOfsOfs += partSize;
        nmiss_ += dataHeader->nmiss;
      }
    *nmiss = nmiss_;
  }
  Xt_idxlist srcList = xt_idxlist_collection_new(part, nProcsModel);
324
  for (int modelID = 0; modelID < nProcsModel; modelID++)
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
    xt_idxlist_delete(part[modelID]);
  free(part);
  Xt_xmap gatherXmap;
  {
    Xt_idxlist dstList
      = xt_idxsection_new(0, 3, varShapeXt, varShapeXt, origin);
    struct Xt_com_list full = { .list = dstList, .rank = 0 };
    gatherXmap = xt_xmap_intersection_new(1, &full, 1, &full, srcList, dstList,
                                        MPI_COMM_SELF);
    xt_idxlist_delete(dstList);
  }
  xt_idxlist_delete(srcList);
  for (int i = 0; i < varSize; ++i)
    gatherOfs[i] = i;

  Xt_redist gatherRedist
    = xt_redist_p2p_off_new(gatherXmap, partOfs, gatherOfs, MPI_DOUBLE);
  xt_xmap_delete(gatherXmap);
343
  xt_redist_s_exchange1(gatherRedist, rxWin[0].buffer, gatherBuf);
344
345
  free(partOfs);
  xt_redist_delete(gatherRedist);
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
}

struct xyzDims
{
  int sizes[3];
};

static inline int
xyzGridSize(struct xyzDims dims)
{
  return dims.sizes[0] * dims.sizes[1] * dims.sizes[2];
}

#ifdef HAVE_PARALLEL_NC4
static void
361
queryVarBounds(struct PPM_extent varShape[3], int vlistID, int varID)
362
{
363
364
  varShape[0].first = 0;
  varShape[1].first = 0;
365
  varShape[2].first = 0;
366
  int sizes[3];
367
  cdiPioQueryVarDims(sizes, vlistID, varID);
368
369
  for (unsigned i = 0; i < 3; ++i)
    varShape[i].size = sizes[i];
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
}

/* compute distribution of collectors such that number of collectors
 * <= number of variable grid cells in each dimension */
static struct xyzDims
varDimsCollGridMatch(const struct PPM_extent varDims[3])
{
  xassert(PPM_extents_size(3, varDims) >= commInqSizeColl());
  struct xyzDims collGrid = { { 1, 1, 1 } };
  /* because of storage order, dividing dimension 3 first is preferred */
  for (int i = 0; i < numPioPrimes; ++i)
    {
      for (int dim = 2; dim >=0; --dim)
        if (collGrid.sizes[dim] * pioPrimes[i] <= varDims[dim].size)
          {
            collGrid.sizes[dim] *= pioPrimes[i];
            goto nextPrime;
          }
      /* no position found, retrack */
      xabort("Not yet implemented back-tracking needed.");
      nextPrime:
      ;
    }
  return collGrid;
}

static void
myVarPart(struct PPM_extent varShape[3], struct xyzDims collGrid,
          struct PPM_extent myPart[3])
{
  int32_t myCollGridCoord[3];
  {
    struct PPM_extent collGridShape[3];
    for (int i = 0; i < 3; ++i)
      {
        collGridShape[i].first = 0;
        collGridShape[i].size = collGrid.sizes[i];
      }
    PPM_lidx2rlcoord_e(3, collGridShape, commInqRankColl(), myCollGridCoord);
    xdebug("my coord: (%d, %d, %d)", myCollGridCoord[0], myCollGridCoord[1],
           myCollGridCoord[2]);
  }
  PPM_uniform_partition_nd(3, varShape, collGrid.sizes,
                           myCollGridCoord, myPart);
}
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
#elif defined (HAVE_LIBNETCDF)
/* needed for writing when some files are only written to by a single process */
/* cdiOpenFileMap(fileID) gives the writer process */
int cdiPioSerialOpenFileMap(int streamID)
{
  return stream_to_pointer(streamID)->ownerRank;
}
/* for load-balancing purposes, count number of files per process */
/* cdiOpenFileCounts[rank] gives number of open files rank has to himself */
static int *cdiSerialOpenFileCount = NULL;
int cdiPioNextOpenRank()
{
  xassert(cdiSerialOpenFileCount != NULL);
  int commCollSize = commInqSizeColl();
  int minRank = 0, minOpenCount = cdiSerialOpenFileCount[0];
  for (int i = 1; i < commCollSize; ++i)
    if (cdiSerialOpenFileCount[i] < minOpenCount)
      {
        minOpenCount = cdiSerialOpenFileCount[i];
        minRank = i;
      }
  return minRank;
}

void cdiPioOpenFileOnRank(int rank)
{
  xassert(cdiSerialOpenFileCount != NULL
          && rank >= 0 && rank < commInqSizeColl());
  ++(cdiSerialOpenFileCount[rank]);
}


void cdiPioCloseFileOnRank(int rank)
{
  xassert(cdiSerialOpenFileCount != NULL
          && rank >= 0 && rank < commInqSizeColl());
  xassert(cdiSerialOpenFileCount[rank] > 0);
  --(cdiSerialOpenFileCount[rank]);
}

455
456
457
458
459
460
461
462
463
464
static void
cdiPioServerCdfDefVars(stream_t *streamptr)
{
  int rank, rankOpen;
  if (commInqIOMode() == PIO_NONE
      || ((rank = commInqRankColl())
          == (rankOpen = cdiPioSerialOpenFileMap(streamptr->self))))
    cdfDefVars(streamptr);
}

465
466
#endif

467
468
469
470
471
472
struct streamMapping {
  int streamID, filetype;
  int firstHeaderIdx, lastHeaderIdx;
  int numVars, *varMap;
};

473
474
475
476
477
478
struct streamMap
{
  struct streamMapping *entries;
  int numEntries;
};

Thomas Jahns's avatar
Thomas Jahns committed
479
480
481
482
483
484
485
486
static int
smCmpStreamID(const void *a_, const void *b_)
{
  const struct streamMapping *a = a_, *b = b_;
  int streamIDa = a->streamID, streamIDb = b->streamID;
  return (streamIDa > streamIDb) - (streamIDa < streamIDb);
}

487
488
489
490
491
492
493
494
495
496
497
498
499
500
static inline int
inventorizeStream(struct streamMapping *streamMap, int numStreamIDs,
                  int *sizeStreamMap_, int streamID, int headerIdx)
{
  int sizeStreamMap = *sizeStreamMap_;
  if (numStreamIDs < sizeStreamMap) ; else
    {
      streamMap = xrealloc(streamMap,
                           (sizeStreamMap *= 2)
                           * sizeof (streamMap[0]));
      *sizeStreamMap_ = sizeStreamMap;
    }
  streamMap[numStreamIDs].streamID = streamID;
  streamMap[numStreamIDs].firstHeaderIdx = headerIdx;
501
  streamMap[numStreamIDs].lastHeaderIdx = headerIdx;
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
  streamMap[numStreamIDs].numVars = -1;
  int filetype = streamInqFiletype(streamID);
  streamMap[numStreamIDs].filetype = filetype;
  if (filetype == FILETYPE_NC || filetype == FILETYPE_NC2
      || filetype == FILETYPE_NC4)
    {
      int vlistID = streamInqVlist(streamID);
      int nvars = vlistNvars(vlistID);
      streamMap[numStreamIDs].numVars = nvars;
      streamMap[numStreamIDs].varMap
        = xmalloc(sizeof (streamMap[numStreamIDs].varMap[0])
                  * nvars);
      for (int i = 0; i < nvars; ++i)
        streamMap[numStreamIDs].varMap[i] = -1;
    }
  return numStreamIDs + 1;
}

520
521
522
523
524
525
526
527
528
529
static inline int
streamIsInList(struct streamMapping *streamMap, int numStreamIDs,
               int streamIDQuery)
{
  int p = 0;
  for (int i = 0; i < numStreamIDs; ++i)
    p |= streamMap[i].streamID == streamIDQuery;
  return p;
}

530
static struct streamMap
531
buildStreamMap(struct winHeaderEntry *winDict)
532
533
534
535
536
{
  int streamIDOld = CDI_UNDEFID;
  int oldStreamIdx = CDI_UNDEFID;
  int filetype = CDI_UNDEFID;
  int sizeStreamMap = 16;
537
538
  struct streamMapping *streamMap
    = xmalloc(sizeStreamMap * sizeof (streamMap[0]));
539
  int numDataEntries = winDict[0].specific.headerSize.numDataEntries;
540
  int numStreamIDs = 0;
541
  /* find streams written on this process */
542
543
544
  for (int headerIdx = 1; headerIdx < numDataEntries; headerIdx += 2)
    {
      int streamID
545
546
        = winDict[headerIdx].id
        = namespaceAdaptKey2(winDict[headerIdx].id);
547
548
549
550
551
552
553
554
555
556
557
      xassert(streamID > 0);
      if (streamID != streamIDOld)
        {
          for (int i = numStreamIDs - 1; i >= 0; --i)
            if ((streamIDOld = streamMap[i].streamID) == streamID)
              {
                oldStreamIdx = i;
                goto streamIDInventorized;
              }
          oldStreamIdx = numStreamIDs;
          streamIDOld = streamID;
558
559
          numStreamIDs = inventorizeStream(streamMap, numStreamIDs,
                                           &sizeStreamMap, streamID, headerIdx);
560
561
        }
      streamIDInventorized:
562
      filetype = streamMap[oldStreamIdx].filetype;
563
564
565
566
      streamMap[oldStreamIdx].lastHeaderIdx = headerIdx;
      if (filetype == FILETYPE_NC || filetype == FILETYPE_NC2
          || filetype == FILETYPE_NC4)
        {
567
          int varID = winDict[headerIdx].specific.dataRecord.varID;
568
569
570
          streamMap[oldStreamIdx].varMap[varID] = headerIdx;
        }
    }
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
  /* join with list of streams written to in total */
  {
    int *streamIDs, *streamIsWritten;
    int numTotalStreamIDs = streamSize();
    streamIDs = xmalloc(2 * sizeof (streamIDs[0]) * (size_t)numTotalStreamIDs);
    streamGetIndexList(numTotalStreamIDs, streamIDs);
    streamIsWritten = streamIDs + numTotalStreamIDs;
    for (int i = 0; i < numTotalStreamIDs; ++i)
      streamIsWritten[i] = streamIsInList(streamMap, numStreamIDs,
                                          streamIDs[i]);
    /* Find what streams are written to at all on any process */
    xmpi(MPI_Allreduce(MPI_IN_PLACE, streamIsWritten, numTotalStreamIDs,
                       MPI_INT, MPI_BOR, commInqCommColl()));
    /* append streams written to on other tasks to mapping */
    for (int i = 0; i < numTotalStreamIDs; ++i)
      if (streamIsWritten[i] && !streamIsInList(streamMap, numStreamIDs,
                                                streamIDs[i]))
        numStreamIDs = inventorizeStream(streamMap, numStreamIDs,
                                         &sizeStreamMap, streamIDs[i], -1);

    free(streamIDs);
  }
Thomas Jahns's avatar
Thomas Jahns committed
593
594
595
  /* sort written streams by streamID */
  streamMap = xrealloc(streamMap, sizeof (streamMap[0]) * numStreamIDs);
  qsort(streamMap, numStreamIDs, sizeof (streamMap[0]), smCmpStreamID);
596
597
598
599
  return (struct streamMap){ .entries = streamMap, .numEntries = numStreamIDs };
}


600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
#ifdef HAVE_NETCDF4
static void
buildWrittenVars(struct streamMapping *mapping, int **varIsWritten_,
                 int myCollRank, MPI_Comm collComm)
{
  int nvars = mapping->numVars;
  int *varMap = mapping->varMap;
  int *varIsWritten = *varIsWritten_ = xmalloc(sizeof (*varIsWritten) * nvars);
  for (int varID = 0; varID < nvars; ++varID)
    varIsWritten[varID] = ((varMap[varID] != -1)
                           ?myCollRank+1 : 0);
  xmpi(MPI_Allreduce(MPI_IN_PLACE, varIsWritten, nvars,
                     MPI_INT, MPI_BOR, collComm));
}
#endif
615

616
static void readGetBuffers()
Deike Kleberg's avatar
Deike Kleberg committed
617
{
618
  int nProcsModel = commInqNProcsModel ();
Deike Kleberg's avatar
Deike Kleberg committed
619
  int root        = commInqRootGlob ();
620
#ifdef HAVE_NETCDF4
621
  int myCollRank = commInqRankColl();
622
  MPI_Comm collComm = commInqCommColl();
623
#endif
624
  xdebug("%s", "START");
625

626
627
  struct winHeaderEntry *winDict
    = (struct winHeaderEntry *)rxWin[root].buffer;
628
  xassert(winDict[0].id == HEADERSIZEMARKER);
629
630
  {
    int dictSize = rxWin[root].dictSize,
631
      firstNonRPCEntry = dictSize - winDict[0].specific.headerSize.numRPCEntries - 1,
632
633
634
635
636
637
      headerIdx,
      numFuncCalls = 0;
    for (headerIdx = dictSize - 1;
         headerIdx > firstNonRPCEntry;
         --headerIdx)
      {
638
639
        xassert(winDict[headerIdx].id >= MINFUNCID
                && winDict[headerIdx].id <= MAXFUNCID);
640
        ++numFuncCalls;
641
        readFuncCall(winDict + headerIdx);
642
      }
643
    xassert(numFuncCalls == winDict[0].specific.headerSize.numRPCEntries);
644
  }
Thomas Jahns's avatar
Thomas Jahns committed
645
  /* build list of streams, data was transferred for */
646
  {
647
    struct streamMap map = buildStreamMap(winDict);
648
649
650
651
    double *data = NULL;
#if defined (HAVE_PARALLEL_NC4)
    double *writeBuf = NULL;
#endif
Thomas Jahns's avatar
Thomas Jahns committed
652
    int currentDataBufSize = 0;
653
    for (int streamIdx = 0; streamIdx < map.numEntries; ++streamIdx)
Thomas Jahns's avatar
Thomas Jahns committed
654
      {
655
        int streamID = map.entries[streamIdx].streamID;
Thomas Jahns's avatar
Thomas Jahns committed
656
        int vlistID = streamInqVlist(streamID);
657
        int filetype = map.entries[streamIdx].filetype;
Thomas Jahns's avatar
Thomas Jahns committed
658

659
        switch (filetype)
660
661
662
          {
          case FILETYPE_GRB:
          case FILETYPE_GRB2:
Thomas Jahns's avatar
Thomas Jahns committed
663
            {
664
              int headerIdx, lastHeaderIdx = map.entries[streamIdx].lastHeaderIdx;
665
              if (lastHeaderIdx < 0)
666
667
668
669
670
                {
                  cdiPioFileWrite(streamInqFileID(streamID), NULL, 0,
                                  streamInqCurTimestepID(streamID));
                  break;
                }
671
              for (headerIdx = map.entries[streamIdx].firstHeaderIdx;
672
                   headerIdx <= lastHeaderIdx;
673
                   headerIdx += 2)
674
                if (streamID == winDict[headerIdx].id)
675
                  {
676
                    int varID = winDict[headerIdx].specific.dataRecord.varID;
677
                    int size = vlistInqVarSize(vlistID, varID);
678
679
680
681
682
                    int nmiss;
                    resizeVarGatherBuf(vlistID, varID, &data,
                                       &currentDataBufSize);
                    gatherArray(root, nProcsModel, headerIdx,
                                vlistID, data, &nmiss);
683
684
685
686
687
688
689
690
691
                    streamWriteVar(streamID, varID, data, nmiss);
                    if ( ddebug > 2 )
                      {
                        char text[1024];
                        sprintf(text, "streamID=%d, var[%d], size=%d",
                                streamID, varID, size);
                        xprintArray(text, data, size, DATATYPE_FLT);
                      }
                  }
Thomas Jahns's avatar
Thomas Jahns committed
692
            }
693
            break;
694
695
696
697
698
699
700
#ifdef HAVE_NETCDF4
          case FILETYPE_NC:
          case FILETYPE_NC2:
          case FILETYPE_NC4:
#ifdef HAVE_PARALLEL_NC4
            /* HAVE_PARALLE_NC4 implies having ScalES-PPM and yaxt */
            {
701
              int nvars = map.entries[streamIdx].numVars;
702
              int *varIsWritten;
703
              int *varMap = map.entries[streamIdx].varMap;
704
705
              buildWrittenVars(map.entries + streamIdx, &varIsWritten,
                               myCollRank, collComm);
706
707
708
709
              for (int varID = 0; varID < nvars; ++varID)
                if (varIsWritten[varID])
                  {
                    struct PPM_extent varShape[3];
710
                    queryVarBounds(varShape, vlistID, varID);
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
                    struct xyzDims collGrid = varDimsCollGridMatch(varShape);
                    xdebug("writing varID %d with dimensions: "
                           "x=%d, y=%d, z=%d,\n"
                           "found distribution with dimensions:"
                           " x=%d, y=%d, z=%d.", varID,
                           varShape[0].size, varShape[1].size, varShape[2].size,
                           collGrid.sizes[0], collGrid.sizes[1],
                           collGrid.sizes[2]);
                    struct PPM_extent varChunk[3];
                    myVarPart(varShape, collGrid, varChunk);
                    int myChunk[3][2];
                    for (int i = 0; i < 3; ++i)
                      {
                        myChunk[i][0] = PPM_extent_start(varChunk[i]);
                        myChunk[i][1] = PPM_extent_end(varChunk[i]);
                      }
                    xdebug("Writing chunk { { %d, %d }, { %d, %d },"
                           " { %d, %d } }", myChunk[0][0], myChunk[0][1],
                           myChunk[1][0], myChunk[1][1], myChunk[2][0],
                           myChunk[2][1]);
                    Xt_int varSize[3];
                    for (int i = 0; i < 3; ++i)
                      varSize[2 - i] = varShape[i].size;
                    Xt_idxlist preRedistChunk, preWriteChunk;
                    /* prepare yaxt descriptor for current data
                       distribution after collect */
                    int nmiss;
                    if (varMap[varID] == -1)
                      {
                        preRedistChunk = xt_idxempty_new();
                        xdebug("%s", "I got none\n");
                      }
                    else
                      {
                        Xt_int preRedistStart[3] = { 0, 0, 0 };
                        preRedistChunk
                          = xt_idxsection_new(0, 3, varSize, varSize,
                                              preRedistStart);
                        resizeVarGatherBuf(vlistID, varID, &data,
                                           &currentDataBufSize);
                        int headerIdx = varMap[varID];
                        gatherArray(root, nProcsModel, headerIdx,
                                    vlistID, data, &nmiss);
                        xdebug("%s", "I got all\n");
                      }
                    MPI_Bcast(&nmiss, 1, MPI_INT, varIsWritten[varID] - 1,
                              collComm);
                    /* prepare yaxt descriptor for write chunk */
                    {
                      Xt_int preWriteChunkStart[3], preWriteChunkSize[3];
                      for (int i = 0; i < 3; ++i)
                        {
                          preWriteChunkStart[2 - i] = varChunk[i].first;
                          preWriteChunkSize[2 - i] = varChunk[i].size;
                        }
                      preWriteChunk = xt_idxsection_new(0, 3, varSize,
                                                        preWriteChunkSize,
                                                        preWriteChunkStart);
                    }
                    /* prepare redistribution */
                    {
                      Xt_xmap xmap = xt_xmap_all2all_new(preRedistChunk,
                                                         preWriteChunk,
                                                         collComm);
                      Xt_redist redist = xt_redist_p2p_new(xmap, MPI_DOUBLE);
                      xt_idxlist_delete(preRedistChunk);
                      xt_idxlist_delete(preWriteChunk);
                      xt_xmap_delete(xmap);
                      writeBuf = xrealloc(writeBuf,
                                          sizeof (double)
                                          * PPM_extents_size(3, varChunk));
782
                      xt_redist_s_exchange1(redist, data, writeBuf);
783
784
785
786
787
788
789
790
791
792
                      xt_redist_delete(redist);
                    }
                    /* write chunk */
                    streamWriteVarChunk(streamID, varID,
                                        (const int (*)[2])myChunk, writeBuf,
                                        nmiss);
                  }
            }
#else
            /* determine process which has stream open (writer) and
793
794
795
             * which has data for which variable (var owner)
             * three cases need to be distinguished */
            {
796
              int nvars = map.entries[streamIdx].numVars;
797
              int *varIsWritten;
798
              int *varMap = map.entries[streamIdx].varMap;
799
800
              buildWrittenVars(map.entries + streamIdx, &varIsWritten,
                               myCollRank, collComm);
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
              int writerRank;
              if ((writerRank = cdiPioSerialOpenFileMap(streamID))
                  == myCollRank)
                {
                  for (int varID = 0; varID < nvars; ++varID)
                    if (varIsWritten[varID])
                      {
                        int nmiss;
                        int size = vlistInqVarSize(vlistID, varID);
                        resizeVarGatherBuf(vlistID, varID, &data,
                                           &currentDataBufSize);
                        int headerIdx = varMap[varID];
                        if (varIsWritten[varID] == myCollRank + 1)
                          {
                            /* this process has the full array and will
                             * write it */
                            xdebug("gathering varID=%d for direct writing",
                                   varID);
                            gatherArray(root, nProcsModel, headerIdx,
                                        vlistID, data, &nmiss);
                          }
                        else
                          {
                            /* another process has the array and will
                             * send it over */
                            MPI_Status stat;
                            xdebug("receiving varID=%d for writing from"
                                   " process %d",
                                   varID, varIsWritten[varID] - 1);
                            xmpiStat(MPI_Recv(&nmiss, 1, MPI_INT,
                                              varIsWritten[varID] - 1,
                                              COLLBUFNMISS,
                                              collComm, &stat), &stat);
                            xmpiStat(MPI_Recv(data, size, MPI_DOUBLE,
                                              varIsWritten[varID] - 1,
                                              COLLBUFTX,
                                              collComm, &stat), &stat);
                          }
                        streamWriteVar(streamID, varID, data, nmiss);
                      }
                }
              else
                for (int varID = 0; varID < nvars; ++varID)
                  if (varIsWritten[varID] == myCollRank + 1)
                    {
                      /* this process has the full array and another
                       * will write it */
                      int nmiss;
                      int size = vlistInqVarSize(vlistID, varID);
                      resizeVarGatherBuf(vlistID, varID, &data,
                                         &currentDataBufSize);
                      int headerIdx = varMap[varID];
                      gatherArray(root, nProcsModel, headerIdx,
                                  vlistID, data, &nmiss);
                      MPI_Request req;
                      MPI_Status stat;
                      xdebug("sending varID=%d for writing to"
                             " process %d",
                             varID, writerRank);
                      xmpi(MPI_Isend(&nmiss, 1, MPI_INT,
                                     writerRank, COLLBUFNMISS,
                                     collComm, &req));
                      xmpi(MPI_Send(data, size, MPI_DOUBLE,
                                    writerRank, COLLBUFTX,
                                    collComm));
                      xmpiStat(MPI_Wait(&req, &stat), &stat);
                    }
            }
869
870
871
#endif
            break;
#endif
872
873
874
          default:
            xabort("unhandled filetype in parallel I/O.");
          }
875
      }
876
    free(map.entries);
Thomas Jahns's avatar
Thomas Jahns committed
877
    free(data);
878
  }
879
  xdebug("%s", "RETURN");
880
881
882
883
} 

/************************************************************************/

Deike Kleberg's avatar
Deike Kleberg committed
884

Thomas Jahns's avatar
Thomas Jahns committed
885
886
static
void clearModelWinBuffer(int modelID)
Deike Kleberg's avatar
Deike Kleberg committed
887
888
889
{
  int nProcsModel = commInqNProcsModel ();

Deike Kleberg's avatar
Deike Kleberg committed
890
891
  xassert ( modelID                >= 0           &&
            modelID                 < nProcsModel &&
892
            rxWin != NULL && rxWin[modelID].buffer != NULL &&
893
894
            rxWin[modelID].size > 0 &&
            rxWin[modelID].size <= MAXWINBUFFERSIZE );
895
  memset(rxWin[modelID].buffer, 0, rxWin[modelID].size);
Deike Kleberg's avatar
Deike Kleberg committed
896
897
898
899
900
901
}


/************************************************************************/


902
static
903
void getTimeStepData()
Deike Kleberg's avatar
Deike Kleberg committed
904
{
905
  int modelID;
906
  char text[1024];
907
  int nProcsModel = commInqNProcsModel ();
Thomas Jahns's avatar
Thomas Jahns committed
908
909
  void *getWinBaseAddr;
  int attrFound;
910

911
  xdebug("%s", "START");
Deike Kleberg's avatar
Deike Kleberg committed
912
913

  // todo put in correct lbs and ubs
914
  xmpi(MPI_Win_start(groupModel, 0, getWin));
915
916
  xmpi(MPI_Win_get_attr(getWin, MPI_WIN_BASE, &getWinBaseAddr, &attrFound));
  xassert(attrFound);
Deike Kleberg's avatar
Deike Kleberg committed
917
918
  for ( modelID = 0; modelID < nProcsModel; modelID++ )
    {
Thomas Jahns's avatar
Thomas Jahns committed
919
      clearModelWinBuffer(modelID);
920
      xdebug("modelID=%d, nProcsModel=%d, rxWin[%d].size=%zu,"
Thomas Jahns's avatar
Thomas Jahns committed
921
             " getWin=%p, sizeof(int)=%u",
922
             modelID, nProcsModel, modelID, rxWin[modelID].size,
Thomas Jahns's avatar
Thomas Jahns committed
923
             getWinBaseAddr, (unsigned)sizeof(int));
924
      /* FIXME: this needs to use MPI_PACK for portability */
925
926
927
      xmpi(MPI_Get(rxWin[modelID].buffer, rxWin[modelID].size,
                   MPI_UNSIGNED_CHAR, modelID, 0,
                   rxWin[modelID].size, MPI_UNSIGNED_CHAR, getWin));
Deike Kleberg's avatar
Deike Kleberg committed
928
    }
929
  xmpi ( MPI_Win_complete ( getWin ));
Deike Kleberg's avatar
Deike Kleberg committed
930

931
  if ( ddebug > 2 )
Deike Kleberg's avatar
Deike Kleberg committed
932
    for ( modelID = 0; modelID < nProcsModel; modelID++ )
933
      {
934
        sprintf(text, "rxWin[%d].size=%zu from PE%d rxWin[%d].buffer",
935
                modelID, rxWin[modelID].size, modelID, modelID);
936
        xprintArray(text, rxWin[modelID].buffer,
937
938
                    rxWin[modelID].size / sizeof (double),
                    DATATYPE_FLT);
939
      }
940
941
  readGetBuffers();

942
  xdebug("%s", "RETURN");
Deike Kleberg's avatar
Deike Kleberg committed
943
}
Deike Kleberg's avatar
Deike Kleberg committed
944
945
946

/************************************************************************/

947
948
949
950
951
952
953
954
955
956
957
958
#if defined (HAVE_LIBNETCDF) && ! defined (HAVE_PARALLEL_NC4)
static int
cdiPioStreamCDFOpenWrap(const char *filename, const char *filemode,
                        int filetype, stream_t *streamptr,
                        int recordBufIsToBeCreated)
{
  switch (filetype)
    {
    case FILETYPE_NC4:
    case FILETYPE_NC4C:
      {
        int rank, fileID;
Thomas Jahns's avatar
Thomas Jahns committed
959
960
        int ioMode = commInqIOMode();
        if (ioMode == PIO_NONE
961
962
963
964
            || commInqRankColl() == (rank = cdiPioNextOpenRank()))
          fileID = cdiStreamOpenDefaultDelegate(filename, filemode, filetype,
                                                streamptr,
                                                recordBufIsToBeCreated);
Thomas Jahns's avatar
Thomas Jahns committed
965
        if (ioMode != PIO_NONE)
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
          xmpi(MPI_Bcast(&fileID, 1, MPI_INT, rank, commInqCommColl()));
        streamptr->ownerRank = rank;
        return fileID;
      }
    default:
      return cdiStreamOpenDefaultDelegate(filename, filemode, filetype,
                                          streamptr, recordBufIsToBeCreated);
    }
}

static void
cdiPioStreamCDFCloseWrap(stream_t *streamptr, int recordBufIsToBeDeleted)
{
  int fileID   = streamptr->fileID;
  int filetype = streamptr->filetype;
  if ( fileID == CDI_UNDEFID )
    Warning("File %s not open!", streamptr->filename);
  else
    switch (filetype)
      {
      case FILETYPE_NC:
      case FILETYPE_NC2:
      case FILETYPE_NC4:
      case FILETYPE_NC4C:
        {
          int rank, rankOpen;
          if (commInqIOMode() == PIO_NONE
              || ((rank = commInqRankColl())
                  == (rankOpen = cdiPioSerialOpenFileMap(streamptr->self))))
            cdiStreamCloseDefaultDelegate(streamptr, recordBufIsToBeDeleted);
          break;
        }
      default:
        cdiStreamCloseDefaultDelegate(streamptr, recordBufIsToBeDeleted);
      }
For faster browsing, not all history is shown. View entire blame