pio_server.c 40.4 KB
Newer Older
Deike Kleberg's avatar
Deike Kleberg committed
1
2
/** @file ioServer.c
*/
3
4
5
6
7
8
#ifdef HAVE_CONFIG_H
#  include "config.h"
#endif

#ifdef USE_MPI

Deike Kleberg's avatar
Deike Kleberg committed
9
10
11
#include "pio_server.h"


12
#include <limits.h>
Deike Kleberg's avatar
Deike Kleberg committed
13
14
#include <stdlib.h>
#include <stdio.h>
15
16
17
18
19
20

#ifdef HAVE_PARALLEL_NC4
#include <core/ppm_combinatorics.h>
#include <core/ppm_rectilinear.h>
#include <ppm/ppm_uniform_partition.h>
#endif
21
#include <yaxt.h>
22

Deike Kleberg's avatar
Deike Kleberg committed
23
#include "cdi.h"
24
#include "namespace.h"
25
#include "taxis.h"
Deike Kleberg's avatar
Deike Kleberg committed
26
#include "pio.h"
Deike Kleberg's avatar
Deike Kleberg committed
27
#include "pio_comm.h"
28
#include "pio_interface.h"
Deike Kleberg's avatar
Deike Kleberg committed
29
#include "pio_rpc.h"
Deike Kleberg's avatar
Deike Kleberg committed
30
#include "pio_util.h"
31
#include "cdi_int.h"
32
33
34
#ifndef HAVE_NETCDF_PAR_H
#define MPI_INCLUDED
#endif
35
#include "pio_cdf_int.h"
36
#include "resource_handle.h"
37
#include "resource_unpack.h"
Thomas Jahns's avatar
Thomas Jahns committed
38
#include "stream_cdf.h"
Deike Kleberg's avatar
Deike Kleberg committed
39
#include "vlist_var.h"
40

41

42
extern resOps streamOps;
43
extern void arrayDestroy ( void );
Deike Kleberg's avatar
Deike Kleberg committed
44

45
46
47
static struct
{
  size_t size;
Thomas Jahns's avatar
Thomas Jahns committed
48
  unsigned char *buffer;
49
  int dictSize;
50
51
} *rxWin = NULL;

Thomas Jahns's avatar
Thomas Jahns committed
52
static MPI_Win getWin = MPI_WIN_NULL;
Thomas Jahns's avatar
Thomas Jahns committed
53
static MPI_Group groupModel = MPI_GROUP_NULL;
Deike Kleberg's avatar
Deike Kleberg committed
54

55
56
57
58
59
#ifdef HAVE_PARALLEL_NC4
/* prime factorization of number of pio collectors */
static uint32_t *pioPrimes;
static int numPioPrimes;
#endif
Deike Kleberg's avatar
Deike Kleberg committed
60

Deike Kleberg's avatar
Deike Kleberg committed
61
62
/************************************************************************/

63
static
Deike Kleberg's avatar
Deike Kleberg committed
64
65
void serverWinCleanup ()
{
66
67
  if (getWin != MPI_WIN_NULL)
    xmpi(MPI_Win_free(&getWin));
68
69
  if (rxWin)
    {
70
      free(rxWin[0].buffer);
71
      free(rxWin);
Deike Kleberg's avatar
Deike Kleberg committed
72
    }
73

74
  xdebug("%s", "cleaned up mpi_win");
Deike Kleberg's avatar
Deike Kleberg committed
75
}
76

Deike Kleberg's avatar
Deike Kleberg committed
77
 /************************************************************************/
78

79
80
static size_t
collDefBufferSizes()
Deike Kleberg's avatar
Deike Kleberg committed
81
{
82
  int nstreams, * streamIndexList, streamNo, vlistID, nvars, varID, iorank;
83
84
  int modelID;
  size_t sumGetBufferSizes = 0;
85
  int rankGlob = commInqRankGlob ();
Deike Kleberg's avatar
Deike Kleberg committed
86
  int nProcsModel = commInqNProcsModel ();
87
  int root = commInqRootGlob ();
Deike Kleberg's avatar
Deike Kleberg committed
88

89
  xassert(rxWin != NULL);
Deike Kleberg's avatar
Deike Kleberg committed
90

Deike Kleberg's avatar
Deike Kleberg committed
91
  nstreams = reshCountType ( &streamOps );
92
93
  streamIndexList = xmalloc ( nstreams * sizeof ( streamIndexList[0] ));
  reshGetResHListOfType ( nstreams, streamIndexList, &streamOps );
Deike Kleberg's avatar
Deike Kleberg committed
94
95
  for ( streamNo = 0; streamNo < nstreams; streamNo++ )
    {
96
      // space required for data
97
      vlistID = streamInqVlist ( streamIndexList[streamNo] );
Deike Kleberg's avatar
Deike Kleberg committed
98
99
100
101
      nvars = vlistNvars ( vlistID );
      for ( varID = 0; varID < nvars; varID++ )
        {
          iorank = vlistInqVarIOrank ( vlistID, varID );
Deike Kleberg's avatar
Deike Kleberg committed
102
          xassert ( iorank != CDI_UNDEFID );
Deike Kleberg's avatar
Deike Kleberg committed
103
104
          if ( iorank == rankGlob )
            {
Deike Kleberg's avatar
Deike Kleberg committed
105
              for ( modelID = 0; modelID < nProcsModel; modelID++ )
106
                {
107
108
109
110
111
112
113
114
                  int decoChunk;
                  {
                    int varSize = vlistInqVarSize(vlistID, varID);
                    int nProcsModel = commInqNProcsModel();
                    decoChunk =
                      (int)ceilf(cdiPIOpartInflate_
                                 * (varSize + nProcsModel - 1)/nProcsModel);
                  }
Deike Kleberg's avatar
Deike Kleberg committed
115
                  xassert ( decoChunk > 0 );
116
                  rxWin[modelID].size += decoChunk * sizeof (double)
117
118
119
120
                    /* re-align chunks to multiple of double size */
                    + sizeof (double) - 1
                    /* one header for data record, one for
                     * corresponding part descriptor*/
121
                    + 2 * sizeof (struct winHeaderEntry)
122
123
124
                    /* FIXME: heuristic for size of packed Xt_idxlist */
                    + sizeof (Xt_int) * decoChunk * 3;
                  rxWin[modelID].dictSize += 2;
125
                }
Deike Kleberg's avatar
Deike Kleberg committed
126
            }
127
        }
Deike Kleberg's avatar
Deike Kleberg committed
128
129
      // space required for the 3 function calls streamOpen, streamDefVlist, streamClose 
      // once per stream and timestep for all collprocs only on the modelproc root
130
      rxWin[root].size += numRPCFuncs * sizeof (struct winHeaderEntry)
131
132
133
134
        /* serialized filename */
        + MAXDATAFILENAME
        /* data part of streamDefTimestep */
        + (2 * CDI_MAX_NAME + sizeof (taxis_t));
135
      rxWin[root].dictSize += numRPCFuncs;
Deike Kleberg's avatar
Deike Kleberg committed
136
    }
137
  free ( streamIndexList );
Deike Kleberg's avatar
Deike Kleberg committed
138
139

  for ( modelID = 0; modelID < nProcsModel; modelID++ )
140
    {
141
      /* account for size header */
142
      rxWin[modelID].dictSize += 1;
143
      rxWin[modelID].size += sizeof (struct winHeaderEntry);
144
145
146
      rxWin[modelID].size = roundUpToMultiple(rxWin[modelID].size,
                                              PIO_WIN_ALIGN);
      sumGetBufferSizes += (size_t)rxWin[modelID].size;
147
    }
Deike Kleberg's avatar
Deike Kleberg committed
148
  xassert ( sumGetBufferSizes <= MAXWINBUFFERSIZE );
149
  return sumGetBufferSizes;
Deike Kleberg's avatar
Deike Kleberg committed
150
}
151

Deike Kleberg's avatar
Deike Kleberg committed
152
 /************************************************************************/
153

154
155
156
static void
serverWinCreate(void)
{
Deike Kleberg's avatar
Deike Kleberg committed
157
  int ranks[1], modelID;
158
  MPI_Comm commCalc = commInqCommCalc ();
Deike Kleberg's avatar
Deike Kleberg committed
159
  MPI_Group groupCalc;
160
  int nProcsModel = commInqNProcsModel ();
161
162
163
  MPI_Info no_locks_info;
  xmpi(MPI_Info_create(&no_locks_info));
  xmpi(MPI_Info_set(no_locks_info, "no_locks", "true"));
Deike Kleberg's avatar
Deike Kleberg committed
164

165
  xmpi(MPI_Win_create(MPI_BOTTOM, 0, 1, no_locks_info, commCalc, &getWin));
Deike Kleberg's avatar
Deike Kleberg committed
166
167

  /* target group */
168
169
  ranks[0] = nProcsModel;
  xmpi ( MPI_Comm_group ( commCalc, &groupCalc ));
Deike Kleberg's avatar
Deike Kleberg committed
170
171
  xmpi ( MPI_Group_excl ( groupCalc, 1, ranks, &groupModel ));

172
  rxWin = xcalloc(nProcsModel, sizeof (rxWin[0]));
173
174
175
176
177
178
179
180
  size_t totalBufferSize = collDefBufferSizes();
  rxWin[0].buffer = xmalloc(totalBufferSize);
  size_t ofs = 0;
  for ( modelID = 1; modelID < nProcsModel; modelID++ )
    {
      ofs += rxWin[modelID - 1].size;
      rxWin[modelID].buffer = rxWin[0].buffer + ofs;
    }
Deike Kleberg's avatar
Deike Kleberg committed
181

182
183
  xmpi(MPI_Info_free(&no_locks_info));

184
  xdebug("%s", "created mpi_win, allocated getBuffer");
Deike Kleberg's avatar
Deike Kleberg committed
185
186
}

Deike Kleberg's avatar
Deike Kleberg committed
187
188
/************************************************************************/

189
static void
190
readFuncCall(struct winHeaderEntry *header)
Deike Kleberg's avatar
Deike Kleberg committed
191
192
{
  int root = commInqRootGlob ();
193
  int funcID = header->id;
194
  union funcArgs *funcArgs = &(header->specific.funcArgs);
Deike Kleberg's avatar
Deike Kleberg committed
195

196
  xassert(funcID >= MINFUNCID && funcID <= MAXFUNCID);
Deike Kleberg's avatar
Deike Kleberg committed
197
198
  switch ( funcID )
    {
199
200
    case STREAMCLOSE:
      {
201
        int streamID
202
          = namespaceAdaptKey2(funcArgs->streamChange.streamID);
203
204
205
206
        streamClose(streamID);
        xdebug("READ FUNCTION CALL FROM WIN:  %s, streamID=%d,"
               " closed stream",
               funcMap[(-1 - funcID)], streamID);
207
208
      }
      break;
Deike Kleberg's avatar
Deike Kleberg committed
209
    case STREAMOPEN:
210
      {
211
        size_t filenamesz = funcArgs->newFile.fnamelen;
Deike Kleberg's avatar
Deike Kleberg committed
212
        xassert ( filenamesz > 0 && filenamesz < MAXDATAFILENAME );
213
        const char *filename
214
          = (const char *)(rxWin[root].buffer + header->offset);
215
        xassert(filename[filenamesz] == '\0');
216
        int filetype = funcArgs->newFile.filetype;
217
        int streamID = streamOpenWrite(filename, filetype);
218
        xassert(streamID != CDI_ELIBNAVAIL);
219
220
        xdebug("READ FUNCTION CALL FROM WIN:  %s, filenamesz=%zu,"
               " filename=%s, filetype=%d, OPENED STREAM %d",
221
               funcMap[(-1 - funcID)], filenamesz, filename,
222
               filetype, streamID);
223
      }
224
      break;
225
226
    case STREAMDEFVLIST:
      {
227
        int streamID
228
229
          = namespaceAdaptKey2(funcArgs->streamChange.streamID);
        int vlistID = namespaceAdaptKey2(funcArgs->streamChange.vlistID);
230
231
232
233
        streamDefVlist(streamID, vlistID);
        xdebug("READ FUNCTION CALL FROM WIN:  %s, streamID=%d,"
               " vlistID=%d, called streamDefVlist ().",
               funcMap[(-1 - funcID)], streamID, vlistID);
234
235
      }
      break;
236
237
238
    case STREAMDEFTIMESTEP:
      {
        MPI_Comm commCalc = commInqCommCalc ();
239
        int streamID = funcArgs->streamNewTimestep.streamID;
240
241
242
243
        int nspTarget = namespaceResHDecode(streamID).nsp;
        streamID = namespaceAdaptKey2(streamID);
        int oldTaxisID
          = vlistInqTaxis(streamInqVlist(streamID));
244
        int position = header->offset;
245
246
247
248
249
250
251
        int changedTaxisID
          = taxisUnpack((char *)rxWin[root].buffer, (int)rxWin[root].size,
                        &position, nspTarget, &commCalc, 0);
        taxis_t *oldTaxisPtr = taxisPtr(oldTaxisID);
        taxis_t *changedTaxisPtr = taxisPtr(changedTaxisID);
        ptaxisCopy(oldTaxisPtr, changedTaxisPtr);
        taxisDestroy(changedTaxisID);
252
        streamDefTimestep(streamID, funcArgs->streamNewTimestep.tsID);
253
254
      }
      break;
Deike Kleberg's avatar
Deike Kleberg committed
255
    default:
256
      xabort ( "REMOTE FUNCTIONCALL NOT IMPLEMENTED!" );
Deike Kleberg's avatar
Deike Kleberg committed
257
258
259
260
261
    }
}

/************************************************************************/

262
263
264
265
266
267
268
269
270
271
272
273
274
static void
resizeVarGatherBuf(int vlistID, int varID, double **buf, int *bufSize)
{
  int size = vlistInqVarSize(vlistID, varID);
  if (size <= *bufSize) ; else
    *buf = xrealloc(*buf, (*bufSize = size) * sizeof (buf[0][0]));
}

static void
gatherArray(int root, int nProcsModel, int headerIdx,
            int vlistID,
            double *gatherBuf, int *nmiss)
{
275
276
  struct winHeaderEntry *winDict
    = (struct winHeaderEntry *)rxWin[root].buffer;
277
  int streamID = winDict[headerIdx].id;
278
  int varID = winDict[headerIdx].specific.dataRecord.varID;
279
  int varShape[3] = { 0, 0, 0 };
280
  cdiPioQueryVarDims(varShape, vlistID, varID);
281
282
283
284
285
286
287
288
289
290
291
292
293
294
  Xt_int varShapeXt[3];
  static const Xt_int origin[3] = { 0, 0, 0 };
  for (unsigned i = 0; i < 3; ++i)
    varShapeXt[i] = varShape[i];
  int varSize = varShape[0] * varShape[1] * varShape[2];
  int *partOfs = xmalloc(2 * varSize * sizeof (partOfs[0])),
    *gatherOfs = partOfs + varSize;
  Xt_idxlist *part = xmalloc(nProcsModel * sizeof (part[0]));
  MPI_Comm commCalc = commInqCommCalc();
  {
    int nmiss_ = 0, partOfsOfs = 0;
    for (int modelID = 0; modelID < nProcsModel; modelID++)
      {
        struct dataRecord *dataHeader
295
296
297
298
          = &((struct winHeaderEntry *)
              rxWin[modelID].buffer)[headerIdx].specific.dataRecord;
        int position =
          ((struct winHeaderEntry *)rxWin[modelID].buffer)[headerIdx + 1].offset;
299
300
301
        xassert(namespaceAdaptKey2(((struct winHeaderEntry *)
                                    rxWin[modelID].buffer)[headerIdx].id)
                == streamID
302
                && dataHeader->varID == varID
303
304
                && ((struct winHeaderEntry *)
                    rxWin[modelID].buffer)[headerIdx + 1].id == PARTDESCMARKER
305
306
                && position > 0
                && ((size_t)position
307
                    >= sizeof (struct winHeaderEntry) * rxWin[modelID].dictSize)
308
309
310
311
312
                && ((size_t)position < rxWin[modelID].size));
        part[modelID] = xt_idxlist_unpack(rxWin[modelID].buffer,
                                          (int)rxWin[modelID].size,
                                          &position, commCalc);
        Xt_int partSize = xt_idxlist_get_num_indices(part[modelID]);
313
314
315
        size_t charOfs = (rxWin[modelID].buffer
                          + ((struct winHeaderEntry *)
                             rxWin[modelID].buffer)[headerIdx].offset)
316
317
318
319
320
321
322
323
324
325
326
327
          - rxWin[0].buffer;
        xassert(charOfs % sizeof (double) == 0
                && charOfs / sizeof (double) + partSize <= INT_MAX);
        int elemOfs = charOfs / sizeof (double);
        for (int i = 0; i < (int)partSize; ++i)
          partOfs[partOfsOfs + i] = elemOfs + i;
        partOfsOfs += partSize;
        nmiss_ += dataHeader->nmiss;
      }
    *nmiss = nmiss_;
  }
  Xt_idxlist srcList = xt_idxlist_collection_new(part, nProcsModel);
328
  for (int modelID = 0; modelID < nProcsModel; modelID++)
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
    xt_idxlist_delete(part[modelID]);
  free(part);
  Xt_xmap gatherXmap;
  {
    Xt_idxlist dstList
      = xt_idxsection_new(0, 3, varShapeXt, varShapeXt, origin);
    struct Xt_com_list full = { .list = dstList, .rank = 0 };
    gatherXmap = xt_xmap_intersection_new(1, &full, 1, &full, srcList, dstList,
                                        MPI_COMM_SELF);
    xt_idxlist_delete(dstList);
  }
  xt_idxlist_delete(srcList);
  for (int i = 0; i < varSize; ++i)
    gatherOfs[i] = i;

  Xt_redist gatherRedist
    = xt_redist_p2p_off_new(gatherXmap, partOfs, gatherOfs, MPI_DOUBLE);
  xt_xmap_delete(gatherXmap);
347
  xt_redist_s_exchange1(gatherRedist, rxWin[0].buffer, gatherBuf);
348
349
  free(partOfs);
  xt_redist_delete(gatherRedist);
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
}

struct xyzDims
{
  int sizes[3];
};

static inline int
xyzGridSize(struct xyzDims dims)
{
  return dims.sizes[0] * dims.sizes[1] * dims.sizes[2];
}

#ifdef HAVE_PARALLEL_NC4
static void
365
queryVarBounds(struct PPM_extent varShape[3], int vlistID, int varID)
366
{
367
368
  varShape[0].first = 0;
  varShape[1].first = 0;
369
  varShape[2].first = 0;
370
  int sizes[3];
371
  cdiPioQueryVarDims(sizes, vlistID, varID);
372
373
  for (unsigned i = 0; i < 3; ++i)
    varShape[i].size = sizes[i];
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
}

/* compute distribution of collectors such that number of collectors
 * <= number of variable grid cells in each dimension */
static struct xyzDims
varDimsCollGridMatch(const struct PPM_extent varDims[3])
{
  xassert(PPM_extents_size(3, varDims) >= commInqSizeColl());
  struct xyzDims collGrid = { { 1, 1, 1 } };
  /* because of storage order, dividing dimension 3 first is preferred */
  for (int i = 0; i < numPioPrimes; ++i)
    {
      for (int dim = 2; dim >=0; --dim)
        if (collGrid.sizes[dim] * pioPrimes[i] <= varDims[dim].size)
          {
            collGrid.sizes[dim] *= pioPrimes[i];
            goto nextPrime;
          }
      /* no position found, retrack */
      xabort("Not yet implemented back-tracking needed.");
      nextPrime:
      ;
    }
  return collGrid;
}

static void
myVarPart(struct PPM_extent varShape[3], struct xyzDims collGrid,
          struct PPM_extent myPart[3])
{
  int32_t myCollGridCoord[3];
  {
    struct PPM_extent collGridShape[3];
    for (int i = 0; i < 3; ++i)
      {
        collGridShape[i].first = 0;
        collGridShape[i].size = collGrid.sizes[i];
      }
    PPM_lidx2rlcoord_e(3, collGridShape, commInqRankColl(), myCollGridCoord);
    xdebug("my coord: (%d, %d, %d)", myCollGridCoord[0], myCollGridCoord[1],
           myCollGridCoord[2]);
  }
  PPM_uniform_partition_nd(3, varShape, collGrid.sizes,
                           myCollGridCoord, myPart);
}
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
#elif defined (HAVE_LIBNETCDF)
/* needed for writing when some files are only written to by a single process */
/* cdiOpenFileMap(fileID) gives the writer process */
int cdiPioSerialOpenFileMap(int streamID)
{
  return stream_to_pointer(streamID)->ownerRank;
}
/* for load-balancing purposes, count number of files per process */
/* cdiOpenFileCounts[rank] gives number of open files rank has to himself */
static int *cdiSerialOpenFileCount = NULL;
int cdiPioNextOpenRank()
{
  xassert(cdiSerialOpenFileCount != NULL);
  int commCollSize = commInqSizeColl();
  int minRank = 0, minOpenCount = cdiSerialOpenFileCount[0];
  for (int i = 1; i < commCollSize; ++i)
    if (cdiSerialOpenFileCount[i] < minOpenCount)
      {
        minOpenCount = cdiSerialOpenFileCount[i];
        minRank = i;
      }
  return minRank;
}

void cdiPioOpenFileOnRank(int rank)
{
  xassert(cdiSerialOpenFileCount != NULL
          && rank >= 0 && rank < commInqSizeColl());
  ++(cdiSerialOpenFileCount[rank]);
}


void cdiPioCloseFileOnRank(int rank)
{
  xassert(cdiSerialOpenFileCount != NULL
          && rank >= 0 && rank < commInqSizeColl());
  xassert(cdiSerialOpenFileCount[rank] > 0);
  --(cdiSerialOpenFileCount[rank]);
}

459
460
461
462
463
464
465
466
467
468
static void
cdiPioServerCdfDefVars(stream_t *streamptr)
{
  int rank, rankOpen;
  if (commInqIOMode() == PIO_NONE
      || ((rank = commInqRankColl())
          == (rankOpen = cdiPioSerialOpenFileMap(streamptr->self))))
    cdfDefVars(streamptr);
}

469
470
#endif

471
472
473
474
475
476
struct streamMapping {
  int streamID, filetype;
  int firstHeaderIdx, lastHeaderIdx;
  int numVars, *varMap;
};

477
478
479
480
481
482
struct streamMap
{
  struct streamMapping *entries;
  int numEntries;
};

Thomas Jahns's avatar
Thomas Jahns committed
483
484
485
486
487
488
489
490
static int
smCmpStreamID(const void *a_, const void *b_)
{
  const struct streamMapping *a = a_, *b = b_;
  int streamIDa = a->streamID, streamIDb = b->streamID;
  return (streamIDa > streamIDb) - (streamIDa < streamIDb);
}

491
492
493
494
495
496
497
498
499
500
501
502
503
504
static inline int
inventorizeStream(struct streamMapping *streamMap, int numStreamIDs,
                  int *sizeStreamMap_, int streamID, int headerIdx)
{
  int sizeStreamMap = *sizeStreamMap_;
  if (numStreamIDs < sizeStreamMap) ; else
    {
      streamMap = xrealloc(streamMap,
                           (sizeStreamMap *= 2)
                           * sizeof (streamMap[0]));
      *sizeStreamMap_ = sizeStreamMap;
    }
  streamMap[numStreamIDs].streamID = streamID;
  streamMap[numStreamIDs].firstHeaderIdx = headerIdx;
505
  streamMap[numStreamIDs].lastHeaderIdx = headerIdx;
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
  streamMap[numStreamIDs].numVars = -1;
  int filetype = streamInqFiletype(streamID);
  streamMap[numStreamIDs].filetype = filetype;
  if (filetype == FILETYPE_NC || filetype == FILETYPE_NC2
      || filetype == FILETYPE_NC4)
    {
      int vlistID = streamInqVlist(streamID);
      int nvars = vlistNvars(vlistID);
      streamMap[numStreamIDs].numVars = nvars;
      streamMap[numStreamIDs].varMap
        = xmalloc(sizeof (streamMap[numStreamIDs].varMap[0])
                  * nvars);
      for (int i = 0; i < nvars; ++i)
        streamMap[numStreamIDs].varMap[i] = -1;
    }
  return numStreamIDs + 1;
}

524
525
526
527
528
529
530
531
532
533
static inline int
streamIsInList(struct streamMapping *streamMap, int numStreamIDs,
               int streamIDQuery)
{
  int p = 0;
  for (int i = 0; i < numStreamIDs; ++i)
    p |= streamMap[i].streamID == streamIDQuery;
  return p;
}

534
static struct streamMap
535
buildStreamMap(struct winHeaderEntry *winDict)
536
537
538
539
540
{
  int streamIDOld = CDI_UNDEFID;
  int oldStreamIdx = CDI_UNDEFID;
  int filetype = CDI_UNDEFID;
  int sizeStreamMap = 16;
541
542
  struct streamMapping *streamMap
    = xmalloc(sizeStreamMap * sizeof (streamMap[0]));
543
  int numDataEntries = winDict[0].specific.headerSize.numDataEntries;
544
  int numStreamIDs = 0;
545
  /* find streams written on this process */
546
547
548
  for (int headerIdx = 1; headerIdx < numDataEntries; headerIdx += 2)
    {
      int streamID
549
550
        = winDict[headerIdx].id
        = namespaceAdaptKey2(winDict[headerIdx].id);
551
552
553
554
555
556
557
558
559
560
561
      xassert(streamID > 0);
      if (streamID != streamIDOld)
        {
          for (int i = numStreamIDs - 1; i >= 0; --i)
            if ((streamIDOld = streamMap[i].streamID) == streamID)
              {
                oldStreamIdx = i;
                goto streamIDInventorized;
              }
          oldStreamIdx = numStreamIDs;
          streamIDOld = streamID;
562
563
          numStreamIDs = inventorizeStream(streamMap, numStreamIDs,
                                           &sizeStreamMap, streamID, headerIdx);
564
565
        }
      streamIDInventorized:
566
      filetype = streamMap[oldStreamIdx].filetype;
567
568
569
570
      streamMap[oldStreamIdx].lastHeaderIdx = headerIdx;
      if (filetype == FILETYPE_NC || filetype == FILETYPE_NC2
          || filetype == FILETYPE_NC4)
        {
571
          int varID = winDict[headerIdx].specific.dataRecord.varID;
572
573
574
          streamMap[oldStreamIdx].varMap[varID] = headerIdx;
        }
    }
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
  /* join with list of streams written to in total */
  {
    int *streamIDs, *streamIsWritten;
    int numTotalStreamIDs = streamSize();
    streamIDs = xmalloc(2 * sizeof (streamIDs[0]) * (size_t)numTotalStreamIDs);
    streamGetIndexList(numTotalStreamIDs, streamIDs);
    streamIsWritten = streamIDs + numTotalStreamIDs;
    for (int i = 0; i < numTotalStreamIDs; ++i)
      streamIsWritten[i] = streamIsInList(streamMap, numStreamIDs,
                                          streamIDs[i]);
    /* Find what streams are written to at all on any process */
    xmpi(MPI_Allreduce(MPI_IN_PLACE, streamIsWritten, numTotalStreamIDs,
                       MPI_INT, MPI_BOR, commInqCommColl()));
    /* append streams written to on other tasks to mapping */
    for (int i = 0; i < numTotalStreamIDs; ++i)
      if (streamIsWritten[i] && !streamIsInList(streamMap, numStreamIDs,
                                                streamIDs[i]))
        numStreamIDs = inventorizeStream(streamMap, numStreamIDs,
                                         &sizeStreamMap, streamIDs[i], -1);

    free(streamIDs);
  }
Thomas Jahns's avatar
Thomas Jahns committed
597
598
599
  /* sort written streams by streamID */
  streamMap = xrealloc(streamMap, sizeof (streamMap[0]) * numStreamIDs);
  qsort(streamMap, numStreamIDs, sizeof (streamMap[0]), smCmpStreamID);
600
601
602
  return (struct streamMap){ .entries = streamMap, .numEntries = numStreamIDs };
}

603
604
605
606
607
608
609
610
611
612
static void
writeGribStream(struct winHeaderEntry *winDict, struct streamMapping *mapping,
                double **data_, int *currentDataBufSize, int root,
                int nProcsModel)
{
  int streamID = mapping->streamID;
  int headerIdx, lastHeaderIdx = mapping->lastHeaderIdx;
  int vlistID = streamInqVlist(streamID);
  if (lastHeaderIdx < 0)
    {
613
      /* write zero bytes to trigger synchronization code in fileWrite */
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
      cdiPioFileWrite(streamInqFileID(streamID), NULL, 0,
                      streamInqCurTimestepID(streamID));
    }
  else
    for (headerIdx = mapping->firstHeaderIdx;
         headerIdx <= lastHeaderIdx;
         headerIdx += 2)
      if (streamID == winDict[headerIdx].id)
        {
          int varID = winDict[headerIdx].specific.dataRecord.varID;
          int size = vlistInqVarSize(vlistID, varID);
          int nmiss;
          resizeVarGatherBuf(vlistID, varID, data_, currentDataBufSize);
          double *data = *data_;
          gatherArray(root, nProcsModel, headerIdx,
                      vlistID, data, &nmiss);
          streamWriteVar(streamID, varID, data, nmiss);
          if ( ddebug > 2 )
            {
              char text[1024];
              sprintf(text, "streamID=%d, var[%d], size=%d",
                      streamID, varID, size);
              xprintArray(text, data, size, DATATYPE_FLT);
            }
        }
}
640

641
642
643
644
645
646
647
#ifdef HAVE_NETCDF4
static void
buildWrittenVars(struct streamMapping *mapping, int **varIsWritten_,
                 int myCollRank, MPI_Comm collComm)
{
  int nvars = mapping->numVars;
  int *varMap = mapping->varMap;
Thomas Jahns's avatar
Thomas Jahns committed
648
649
  int *varIsWritten = *varIsWritten_
    = xrealloc(*varIsWritten_, sizeof (*varIsWritten) * nvars);
650
651
652
653
654
655
656
  for (int varID = 0; varID < nvars; ++varID)
    varIsWritten[varID] = ((varMap[varID] != -1)
                           ?myCollRank+1 : 0);
  xmpi(MPI_Allreduce(MPI_IN_PLACE, varIsWritten, nvars,
                     MPI_INT, MPI_BOR, collComm));
}
#endif
657

658
static void readGetBuffers()
Deike Kleberg's avatar
Deike Kleberg committed
659
{
660
  int nProcsModel = commInqNProcsModel ();
Deike Kleberg's avatar
Deike Kleberg committed
661
  int root        = commInqRootGlob ();
662
#ifdef HAVE_NETCDF4
663
  int myCollRank = commInqRankColl();
664
  MPI_Comm collComm = commInqCommColl();
665
#endif
666
  xdebug("%s", "START");
667

668
669
  struct winHeaderEntry *winDict
    = (struct winHeaderEntry *)rxWin[root].buffer;
670
  xassert(winDict[0].id == HEADERSIZEMARKER);
671
672
  {
    int dictSize = rxWin[root].dictSize,
673
      firstNonRPCEntry = dictSize - winDict[0].specific.headerSize.numRPCEntries - 1,
674
675
676
677
678
679
      headerIdx,
      numFuncCalls = 0;
    for (headerIdx = dictSize - 1;
         headerIdx > firstNonRPCEntry;
         --headerIdx)
      {
680
681
        xassert(winDict[headerIdx].id >= MINFUNCID
                && winDict[headerIdx].id <= MAXFUNCID);
682
        ++numFuncCalls;
683
        readFuncCall(winDict + headerIdx);
684
      }
685
    xassert(numFuncCalls == winDict[0].specific.headerSize.numRPCEntries);
686
  }
Thomas Jahns's avatar
Thomas Jahns committed
687
  /* build list of streams, data was transferred for */
688
  {
689
    struct streamMap map = buildStreamMap(winDict);
690
    double *data = NULL;
Thomas Jahns's avatar
Thomas Jahns committed
691
692
693
#ifdef HAVE_NETCDF4
    int *varIsWritten = NULL;
#endif
694
695
696
#if defined (HAVE_PARALLEL_NC4)
    double *writeBuf = NULL;
#endif
Thomas Jahns's avatar
Thomas Jahns committed
697
    int currentDataBufSize = 0;
698
    for (int streamIdx = 0; streamIdx < map.numEntries; ++streamIdx)
Thomas Jahns's avatar
Thomas Jahns committed
699
      {
700
        int streamID = map.entries[streamIdx].streamID;
Thomas Jahns's avatar
Thomas Jahns committed
701
        int vlistID = streamInqVlist(streamID);
702
        int filetype = map.entries[streamIdx].filetype;
Thomas Jahns's avatar
Thomas Jahns committed
703

704
        switch (filetype)
705
706
707
          {
          case FILETYPE_GRB:
          case FILETYPE_GRB2:
708
709
710
            writeGribStream(winDict, map.entries + streamIdx,
                            &data, &currentDataBufSize,
                            root, nProcsModel);
711
            break;
712
713
714
715
716
717
718
#ifdef HAVE_NETCDF4
          case FILETYPE_NC:
          case FILETYPE_NC2:
          case FILETYPE_NC4:
#ifdef HAVE_PARALLEL_NC4
            /* HAVE_PARALLE_NC4 implies having ScalES-PPM and yaxt */
            {
719
720
              int nvars = map.entries[streamIdx].numVars;
              int *varMap = map.entries[streamIdx].varMap;
721
722
              buildWrittenVars(map.entries + streamIdx, &varIsWritten,
                               myCollRank, collComm);
723
724
725
726
              for (int varID = 0; varID < nvars; ++varID)
                if (varIsWritten[varID])
                  {
                    struct PPM_extent varShape[3];
727
                    queryVarBounds(varShape, vlistID, varID);
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
                    struct xyzDims collGrid = varDimsCollGridMatch(varShape);
                    xdebug("writing varID %d with dimensions: "
                           "x=%d, y=%d, z=%d,\n"
                           "found distribution with dimensions:"
                           " x=%d, y=%d, z=%d.", varID,
                           varShape[0].size, varShape[1].size, varShape[2].size,
                           collGrid.sizes[0], collGrid.sizes[1],
                           collGrid.sizes[2]);
                    struct PPM_extent varChunk[3];
                    myVarPart(varShape, collGrid, varChunk);
                    int myChunk[3][2];
                    for (int i = 0; i < 3; ++i)
                      {
                        myChunk[i][0] = PPM_extent_start(varChunk[i]);
                        myChunk[i][1] = PPM_extent_end(varChunk[i]);
                      }
                    xdebug("Writing chunk { { %d, %d }, { %d, %d },"
                           " { %d, %d } }", myChunk[0][0], myChunk[0][1],
                           myChunk[1][0], myChunk[1][1], myChunk[2][0],
                           myChunk[2][1]);
                    Xt_int varSize[3];
                    for (int i = 0; i < 3; ++i)
                      varSize[2 - i] = varShape[i].size;
                    Xt_idxlist preRedistChunk, preWriteChunk;
                    /* prepare yaxt descriptor for current data
                       distribution after collect */
                    int nmiss;
                    if (varMap[varID] == -1)
                      {
                        preRedistChunk = xt_idxempty_new();
                        xdebug("%s", "I got none\n");
                      }
                    else
                      {
                        Xt_int preRedistStart[3] = { 0, 0, 0 };
                        preRedistChunk
                          = xt_idxsection_new(0, 3, varSize, varSize,
                                              preRedistStart);
                        resizeVarGatherBuf(vlistID, varID, &data,
                                           &currentDataBufSize);
                        int headerIdx = varMap[varID];
                        gatherArray(root, nProcsModel, headerIdx,
                                    vlistID, data, &nmiss);
                        xdebug("%s", "I got all\n");
                      }
                    MPI_Bcast(&nmiss, 1, MPI_INT, varIsWritten[varID] - 1,
                              collComm);
                    /* prepare yaxt descriptor for write chunk */
                    {
                      Xt_int preWriteChunkStart[3], preWriteChunkSize[3];
                      for (int i = 0; i < 3; ++i)
                        {
                          preWriteChunkStart[2 - i] = varChunk[i].first;
                          preWriteChunkSize[2 - i] = varChunk[i].size;
                        }
                      preWriteChunk = xt_idxsection_new(0, 3, varSize,
                                                        preWriteChunkSize,
                                                        preWriteChunkStart);
                    }
                    /* prepare redistribution */
                    {
                      Xt_xmap xmap = xt_xmap_all2all_new(preRedistChunk,
                                                         preWriteChunk,
                                                         collComm);
                      Xt_redist redist = xt_redist_p2p_new(xmap, MPI_DOUBLE);
                      xt_idxlist_delete(preRedistChunk);
                      xt_idxlist_delete(preWriteChunk);
                      xt_xmap_delete(xmap);
                      writeBuf = xrealloc(writeBuf,
                                          sizeof (double)
                                          * PPM_extents_size(3, varChunk));
799
                      xt_redist_s_exchange1(redist, data, writeBuf);
800
801
802
803
804
805
806
807
808
809
                      xt_redist_delete(redist);
                    }
                    /* write chunk */
                    streamWriteVarChunk(streamID, varID,
                                        (const int (*)[2])myChunk, writeBuf,
                                        nmiss);
                  }
            }
#else
            /* determine process which has stream open (writer) and
810
811
812
             * which has data for which variable (var owner)
             * three cases need to be distinguished */
            {
813
814
              int nvars = map.entries[streamIdx].numVars;
              int *varMap = map.entries[streamIdx].varMap;
815
816
              buildWrittenVars(map.entries + streamIdx, &varIsWritten,
                               myCollRank, collComm);
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
              int writerRank;
              if ((writerRank = cdiPioSerialOpenFileMap(streamID))
                  == myCollRank)
                {
                  for (int varID = 0; varID < nvars; ++varID)
                    if (varIsWritten[varID])
                      {
                        int nmiss;
                        int size = vlistInqVarSize(vlistID, varID);
                        resizeVarGatherBuf(vlistID, varID, &data,
                                           &currentDataBufSize);
                        int headerIdx = varMap[varID];
                        if (varIsWritten[varID] == myCollRank + 1)
                          {
                            /* this process has the full array and will
                             * write it */
                            xdebug("gathering varID=%d for direct writing",
                                   varID);
                            gatherArray(root, nProcsModel, headerIdx,
                                        vlistID, data, &nmiss);
                          }
                        else
                          {
                            /* another process has the array and will
                             * send it over */
                            MPI_Status stat;
                            xdebug("receiving varID=%d for writing from"
                                   " process %d",
                                   varID, varIsWritten[varID] - 1);
                            xmpiStat(MPI_Recv(&nmiss, 1, MPI_INT,
                                              varIsWritten[varID] - 1,
                                              COLLBUFNMISS,
                                              collComm, &stat), &stat);
                            xmpiStat(MPI_Recv(data, size, MPI_DOUBLE,
                                              varIsWritten[varID] - 1,
                                              COLLBUFTX,
                                              collComm, &stat), &stat);
                          }
                        streamWriteVar(streamID, varID, data, nmiss);
                      }
                }
              else
                for (int varID = 0; varID < nvars; ++varID)
                  if (varIsWritten[varID] == myCollRank + 1)
                    {
                      /* this process has the full array and another
                       * will write it */
                      int nmiss;
                      int size = vlistInqVarSize(vlistID, varID);
                      resizeVarGatherBuf(vlistID, varID, &data,
                                         &currentDataBufSize);
                      int headerIdx = varMap[varID];
                      gatherArray(root, nProcsModel, headerIdx,
                                  vlistID, data, &nmiss);
                      MPI_Request req;
                      MPI_Status stat;
                      xdebug("sending varID=%d for writing to"
                             " process %d",
                             varID, writerRank);
                      xmpi(MPI_Isend(&nmiss, 1, MPI_INT,
                                     writerRank, COLLBUFNMISS,
                                     collComm, &req));
                      xmpi(MPI_Send(data, size, MPI_DOUBLE,
                                    writerRank, COLLBUFTX,
                                    collComm));
                      xmpiStat(MPI_Wait(&req, &stat), &stat);
                    }
            }
885
886
887
#endif
            break;
#endif
888
889
890
          default:
            xabort("unhandled filetype in parallel I/O.");
          }
891
      }
Thomas Jahns's avatar
Thomas Jahns committed
892
893
#ifdef HAVE_NETCDF4
    free(varIsWritten);
Thomas Jahns's avatar
Thomas Jahns committed
894
895
896
#ifdef HAVE_PARALLEL_NC4
    free(writeBuf);
#endif
Thomas Jahns's avatar
Thomas Jahns committed
897
#endif
898
    free(map.entries);
Thomas Jahns's avatar
Thomas Jahns committed
899
    free(data);
900
  }
901
  xdebug("%s", "RETURN");
902
903
904
905
} 

/************************************************************************/

Deike Kleberg's avatar
Deike Kleberg committed
906

Thomas Jahns's avatar
Thomas Jahns committed
907
908
static
void clearModelWinBuffer(int modelID)
Deike Kleberg's avatar
Deike Kleberg committed
909
910
911
{
  int nProcsModel = commInqNProcsModel ();

Deike Kleberg's avatar
Deike Kleberg committed
912
913
  xassert ( modelID                >= 0           &&
            modelID                 < nProcsModel &&
914
            rxWin != NULL && rxWin[modelID].buffer != NULL &&
915
916
            rxWin[modelID].size > 0 &&
            rxWin[modelID].size <= MAXWINBUFFERSIZE );
917
  memset(rxWin[modelID].buffer, 0, rxWin[modelID].size);
Deike Kleberg's avatar
Deike Kleberg committed
918
919
920
921
922
923
}


/************************************************************************/


924
static
925
void getTimeStepData()
Deike Kleberg's avatar
Deike Kleberg committed
926
{
927
  int modelID;
928
  char text[1024];
929
  int nProcsModel = commInqNProcsModel ();
Thomas Jahns's avatar
Thomas Jahns committed
930
931
  void *getWinBaseAddr;
  int attrFound;
932

933
  xdebug("%s", "START");
Deike Kleberg's avatar
Deike Kleberg committed
934

935
936
  for ( modelID = 0; modelID < nProcsModel; modelID++ )
    clearModelWinBuffer(modelID);
Deike Kleberg's avatar
Deike Kleberg committed
937
  // todo put in correct lbs and ubs
938
  xmpi(MPI_Win_start(groupModel, 0, getWin));
939
940
  xmpi(MPI_Win_get_attr(getWin, MPI_WIN_BASE, &getWinBaseAddr, &attrFound));
  xassert(attrFound);
Deike Kleberg's avatar
Deike Kleberg committed
941
942
  for ( modelID = 0; modelID < nProcsModel; modelID++ )
    {
943
      xdebug("modelID=%d, nProcsModel=%d, rxWin[%d].size=%zu,"
Thomas Jahns's avatar
Thomas Jahns committed
944
             " getWin=%p, sizeof(int)=%u",
945
             modelID, nProcsModel, modelID, rxWin[modelID].size,
Thomas Jahns's avatar
Thomas Jahns committed
946
             getWinBaseAddr, (unsigned)sizeof(int));
947
      /* FIXME: this needs to use MPI_PACK for portability */
948
949
950
      xmpi(MPI_Get(rxWin[modelID].buffer, rxWin[modelID].size,
                   MPI_UNSIGNED_CHAR, modelID, 0,
                   rxWin[modelID].size, MPI_UNSIGNED_CHAR, getWin));
Deike Kleberg's avatar
Deike Kleberg committed
951
    }
952
  xmpi ( MPI_Win_complete ( getWin ));
Deike Kleberg's avatar
Deike Kleberg committed
953

954
  if ( ddebug > 2 )
Deike Kleberg's avatar
Deike Kleberg committed
955
    for ( modelID = 0; modelID < nProcsModel; modelID++ )
956
      {
957
        sprintf(text, "rxWin[%d].size=%zu from PE%d rxWin[%d].buffer",
958
                modelID, rxWin[modelID].size, modelID, modelID);
959
        xprintArray(text, rxWin[modelID].buffer,
960
961
                    rxWin[modelID].size / sizeof (double),
                    DATATYPE_FLT);
962
      }
963
964
  readGetBuffers();

965
  xdebug("%s", "RETURN");
Deike Kleberg's avatar
Deike Kleberg committed
966
}
Deike Kleberg's avatar
Deike Kleberg committed
967
968
969

/************************************************************************/

970
971
972
973
974
975
976
977
978
979
980
981
#if defined (HAVE_LIBNETCDF) && ! defined (HAVE_PARALLEL_NC4)
static int
cdiPioStreamCDFOpenWrap(const char *filename, const char *filemode,
                        int filetype, stream_t *streamptr,
                        int recordBufIsToBeCreated)
{
  switch (filetype)
    {
    case FILETYPE_NC4:
    case FILETYPE_NC4C:
      {
        int rank, fileID;
Thomas Jahns's avatar
Thomas Jahns committed
982
983
        int ioMode = commInqIOMode();
        if (ioMode == PIO_NONE
984
985
986
987
            || commInqRankColl() == (rank = cdiPioNextOpenRank()))
          fileID = cdiStreamOpenDefaultDelegate(filename, filemode, filetype,
                                                streamptr,
                                                recordBufIsToBeCreated);
Thomas Jahns's avatar
Thomas Jahns committed
988
        if (ioMode != PIO_NONE)
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
          xmpi(MPI_Bcast(&fileID, 1, MPI_INT, rank, commInqCommColl()));
        streamptr->ownerRank = rank;
        return fileID;
      }
    default:
      return cdiStreamOpenDefaultDelegate(filename, filemode, filetype,
                                          streamptr, recordBufIsToBeCreated);
    }
}

static void
cdiPioStreamCDFCloseWrap(stream_t *streamptr, int recordBufIsToBeDeleted)
{
  int fileID   = streamptr->fileID;
  int filetype = streamptr->filetype;
  if ( fileID == CDI_UNDEFID )
    Warning("File %s not open!", streamptr->filename);
  else
    switch (filetype)
      {
      case FILETYPE_NC:
      case FILETYPE_NC2:
      case FILETYPE_NC4:
      case FILETYPE_NC4C:
        {
          int rank, rankOpen;
          if (commInqIOMode() == PIO_NONE
              || ((rank = commInqRankColl())
                  == (rankOpen = cdiPioSerialOpenFileMap(streamptr->self))))
            cdiStreamCloseDefaultDelegate(streamptr, recordBufIsToBeDeleted);
          break;
        }
      default:
        cdiStreamCloseDefaultDelegate(streamptr, recordBufIsToBeDeleted);
      }
}
Thomas Jahns's avatar
Thomas Jahns committed
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035

static void
cdiPioCdfDefTimestep(stream_t *streamptr, int tsID)
{
  int rank, rankOpen, streamID = streamptr->self;
  if (commInqIOMode() == PIO_NONE
      || ((rank = commInqRankColl())
          == (rankOpen = cdiPioSerialOpenFileMap(streamID))))
    cdfDefTimestep(streamptr, tsID);
}

1036
1037
#endif

Deike Kleberg's avatar
Deike Kleberg committed
1038
1039
1040
1041
1042
1043
1044
1045
/**
  @brief is encapsulated in CDI library and run on I/O PEs.

  @param

  @return
*/

1046
void IOServer ()
Deike Kleberg's avatar
Deike Kleberg committed
1047
{
1048
  int source, tag, size, nProcsModel=commInqNProcsModel();
1049
  static int nfinished = 0;
Deike Kleberg's avatar
Deike Kleberg committed
1050
  char * buffer;
1051
1052
  MPI_Comm commCalc;
  MPI_Status status;
Deike Kleberg's avatar
Deike Kleberg committed
1053

1054
  xdebug("%s", "START");
Deike Kleberg's avatar
Deike Kleberg committed
1055

1056
  backendInit ();
1057
1058
  if ( commInqRankNode () == commInqSpecialRankNode ()) 
    backendFinalize ();
1059
  commCalc = commInqCommCalc ();
1060
#ifdef HAVE_PARALLEL_NC4
1061
  cdiPioEnableNetCDFParAccess();
1062
1063
  numPioPrimes = PPM_prime_factorization_32((uint32_t)commInqSizeColl(),
                                            &pioPrimes);
1064
1065
1066
#elif defined (HAVE_LIBNETCDF)
  cdiSerialOpenFileCount = xcalloc(sizeof (cdiSerialOpenFileCount[0]),
                                   commInqSizeColl());
1067
1068
1069
1070
1071
1072
1073
1074
  namespaceSwitchSet(NSSWITCH_STREAM_OPEN_BACKEND,
                     NSSW_FUNC(cdiPioStreamCDFOpenWrap));
  namespaceSwitchSet(NSSWITCH_STREAM_CLOSE_BACKEND,
                     NSSW_FUNC(cdiPioStreamCDFCloseWrap));
  namespaceSwitchSet(NSSWITCH_CDF_DEF_TIMESTEP,
                     NSSW_FUNC(cdiPioCdfDefTimestep));
  namespaceSwitchSet(NSSWITCH_CDF_STREAM_SETUP,
                     NSSW_FUNC(cdiPioServerCdfDefVars));
1075
#endif
1076
1077
1078
  namespaceSwitchSet(NSSWITCH_FILE_WRITE,
                     NSSW_FUNC(cdiPioFileWrite));

Deike Kleberg's avatar
Deike Kleberg committed
1079
  for ( ;; )
1080
    {
Deike Kleberg's avatar
Deike Kleberg committed
1081
      xmpi ( MPI_Probe ( MPI_ANY_SOURCE, MPI_ANY_TAG, commCalc, &status ));
1082
      
Deike Kleberg's avatar
Deike Kleberg committed
1083
1084
      source = status.MPI_SOURCE;
      tag    = status.MPI_TAG;
1085
      
Deike Kleberg's avatar
Deike Kleberg committed
1086
      switch ( tag )
1087
        {
1088
1089
1090
1091
1092
1093
1094
        case FINALIZE:
          {
            int i;
            xdebugMsg(tag, source, nfinished);
            xmpi(MPI_Recv(&i, 1, MPI_INTEGER, source,
                          tag, commCalc, &status));
          }
1095
          xdebug("%s", "RECEIVED MESSAGE WITH TAG \"FINALIZE\"");
1096
          nfinished++;
Thomas Jahns's avatar
Thomas Jahns committed
1097
1098
1099
          xdebug("nfinished=%d, nProcsModel=%d", nfinished, nProcsModel);
          if ( nfinished == nProcsModel )
            {
1100
              {
Deike Kleberg's avatar
Deike Kleberg committed
1101
                int nStreams = streamSize ();
Thomas Jahns's avatar
Thomas Jahns committed
1102

Deike Kleberg's avatar
Deike Kleberg committed
1103
1104
1105
1106
                if ( nStreams > 0 )
                  {
                    int streamNo;
                    int * resHs;
Thomas Jahns's avatar
Thomas Jahns committed
1107

Deike Kleberg's avatar
Deike Kleberg committed
1108
                    resHs       = xmalloc ( nStreams * sizeof ( resHs[0] ));
1109
                    streamGetIndexList ( nStreams, resHs );
Deike Kleberg's avatar
Deike Kleberg committed
1110
1111
1112
1113
                    for ( streamNo = 0; streamNo < nStreams; streamNo++ )
                      streamClose ( resHs[streamNo] );
                    free ( resHs );
                  }
Deike Kleberg's avatar
Deike Kleberg committed
1114
              }
Thomas Jahns's avatar
Thomas Jahns committed
1115
1116
              backendCleanup();
              serverWinCleanup();
1117
              /* listDestroy(); */
1118
              xdebug("%s", "RETURN");
Deike Kleberg's avatar
Deike Kleberg committed
1119
1120
              return;
            }
1121
	  
1122
1123
          break;
          
Deike Kleberg's avatar
Deike Kleberg committed
1124
	case RESOURCES:
1125
	  xdebugMsg (  tag, source, nfinished );
1126
	  xmpi ( MPI_Get_count ( &status, MPI_CHAR, &size ));
Thomas Jahns's avatar
Thomas Jahns committed
1127
	  buffer = xmalloc(size);
1128
1129
	  xmpi ( MPI_Recv ( buffer, size, MPI_PACKED, source,
                            tag, commCalc, &status ));
1130
          xdebug("%s", "RECEIVED MESSAGE WITH TAG \"RESOURCES\"");
1131
	  reshUnpackResources(buffer, size, &commCalc);
1132
          xdebug("%s", "");
Deike Kleberg's avatar
Deike Kleberg committed
1133
	  free ( buffer );
Thomas Jahns's avatar
Thomas Jahns committed
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
          {
            int rankGlob = commInqRankGlob();
            if ( ddebug > 0 && rankGlob == nProcsModel)
              {
                static const char baseName[] = "reshListIOServer.",
                  suffix[] = ".txt";
                /* 9 digits for rank at most */
                char buf[sizeof (baseName) + 9 + sizeof (suffix) + 1];
                snprintf(buf, sizeof (buf), "%s%d%s", baseName, rankGlob,
                         suffix);
                FILE *fp = fopen(buf, "w");
                xassert(fp);
                reshListPrint(fp);