pio_server.c 40.4 KB
Newer Older
Deike Kleberg's avatar
Deike Kleberg committed
1
2
/** @file ioServer.c
*/
3
4
5
6
7
8
#ifdef HAVE_CONFIG_H
#  include "config.h"
#endif

#ifdef USE_MPI

Deike Kleberg's avatar
Deike Kleberg committed
9
10
11
#include "pio_server.h"


12
#include <limits.h>
Deike Kleberg's avatar
Deike Kleberg committed
13
14
#include <stdlib.h>
#include <stdio.h>
15
16
17
18
19
20

#ifdef HAVE_PARALLEL_NC4
#include <core/ppm_combinatorics.h>
#include <core/ppm_rectilinear.h>
#include <ppm/ppm_uniform_partition.h>
#endif
21
#include <yaxt.h>
22

Deike Kleberg's avatar
Deike Kleberg committed
23
#include "cdi.h"
24
#include "cdipio.h"
25
#include "namespace.h"
26
#include "taxis.h"
Deike Kleberg's avatar
Deike Kleberg committed
27
#include "pio.h"
Deike Kleberg's avatar
Deike Kleberg committed
28
#include "pio_comm.h"
29
#include "pio_interface.h"
Deike Kleberg's avatar
Deike Kleberg committed
30
#include "pio_rpc.h"
Deike Kleberg's avatar
Deike Kleberg committed
31
#include "pio_util.h"
32
#include "cdi_int.h"
33
34
35
#ifndef HAVE_NETCDF_PAR_H
#define MPI_INCLUDED
#endif
36
#include "pio_cdf_int.h"
37
#include "resource_handle.h"
38
#include "resource_unpack.h"
Thomas Jahns's avatar
Thomas Jahns committed
39
#include "stream_cdf.h"
Deike Kleberg's avatar
Deike Kleberg committed
40
#include "vlist_var.h"
41

42

43
extern resOps streamOps;
44
extern void arrayDestroy ( void );
Deike Kleberg's avatar
Deike Kleberg committed
45

46
47
48
static struct
{
  size_t size;
Thomas Jahns's avatar
Thomas Jahns committed
49
  unsigned char *buffer;
50
  int dictSize;
51
52
} *rxWin = NULL;

Thomas Jahns's avatar
Thomas Jahns committed
53
static MPI_Win getWin = MPI_WIN_NULL;
Thomas Jahns's avatar
Thomas Jahns committed
54
static MPI_Group groupModel = MPI_GROUP_NULL;
Deike Kleberg's avatar
Deike Kleberg committed
55

56
57
58
59
60
#ifdef HAVE_PARALLEL_NC4
/* prime factorization of number of pio collectors */
static uint32_t *pioPrimes;
static int numPioPrimes;
#endif
Deike Kleberg's avatar
Deike Kleberg committed
61

Deike Kleberg's avatar
Deike Kleberg committed
62
63
/************************************************************************/

64
static
Deike Kleberg's avatar
Deike Kleberg committed
65
66
void serverWinCleanup ()
{
67
68
  if (getWin != MPI_WIN_NULL)
    xmpi(MPI_Win_free(&getWin));
69
70
  if (rxWin)
    {
71
      free(rxWin[0].buffer);
72
      free(rxWin);
Deike Kleberg's avatar
Deike Kleberg committed
73
    }
74

75
  xdebug("%s", "cleaned up mpi_win");
Deike Kleberg's avatar
Deike Kleberg committed
76
}
77

Deike Kleberg's avatar
Deike Kleberg committed
78
 /************************************************************************/
79

80
81
static size_t
collDefBufferSizes()
Deike Kleberg's avatar
Deike Kleberg committed
82
{
83
  int nstreams, * streamIndexList, streamNo, vlistID, nvars, varID, iorank;
84
85
  int modelID;
  size_t sumGetBufferSizes = 0;
86
  int rankGlob = commInqRankGlob ();
Deike Kleberg's avatar
Deike Kleberg committed
87
  int nProcsModel = commInqNProcsModel ();
88
  int root = commInqRootGlob ();
Deike Kleberg's avatar
Deike Kleberg committed
89

90
  xassert(rxWin != NULL);
Deike Kleberg's avatar
Deike Kleberg committed
91

Deike Kleberg's avatar
Deike Kleberg committed
92
  nstreams = reshCountType ( &streamOps );
93
94
  streamIndexList = xmalloc ( nstreams * sizeof ( streamIndexList[0] ));
  reshGetResHListOfType ( nstreams, streamIndexList, &streamOps );
Deike Kleberg's avatar
Deike Kleberg committed
95
96
  for ( streamNo = 0; streamNo < nstreams; streamNo++ )
    {
97
      // space required for data
98
      vlistID = streamInqVlist ( streamIndexList[streamNo] );
Deike Kleberg's avatar
Deike Kleberg committed
99
100
101
102
      nvars = vlistNvars ( vlistID );
      for ( varID = 0; varID < nvars; varID++ )
        {
          iorank = vlistInqVarIOrank ( vlistID, varID );
Deike Kleberg's avatar
Deike Kleberg committed
103
          xassert ( iorank != CDI_UNDEFID );
Deike Kleberg's avatar
Deike Kleberg committed
104
105
          if ( iorank == rankGlob )
            {
Deike Kleberg's avatar
Deike Kleberg committed
106
              for ( modelID = 0; modelID < nProcsModel; modelID++ )
107
                {
108
109
110
111
112
113
114
115
                  int decoChunk;
                  {
                    int varSize = vlistInqVarSize(vlistID, varID);
                    int nProcsModel = commInqNProcsModel();
                    decoChunk =
                      (int)ceilf(cdiPIOpartInflate_
                                 * (varSize + nProcsModel - 1)/nProcsModel);
                  }
Deike Kleberg's avatar
Deike Kleberg committed
116
                  xassert ( decoChunk > 0 );
117
                  rxWin[modelID].size += decoChunk * sizeof (double)
118
119
120
121
                    /* re-align chunks to multiple of double size */
                    + sizeof (double) - 1
                    /* one header for data record, one for
                     * corresponding part descriptor*/
122
                    + 2 * sizeof (struct winHeaderEntry)
123
124
125
                    /* FIXME: heuristic for size of packed Xt_idxlist */
                    + sizeof (Xt_int) * decoChunk * 3;
                  rxWin[modelID].dictSize += 2;
126
                }
Deike Kleberg's avatar
Deike Kleberg committed
127
            }
128
        }
Deike Kleberg's avatar
Deike Kleberg committed
129
130
      // space required for the 3 function calls streamOpen, streamDefVlist, streamClose 
      // once per stream and timestep for all collprocs only on the modelproc root
131
      rxWin[root].size += numRPCFuncs * sizeof (struct winHeaderEntry)
132
133
134
135
        /* serialized filename */
        + MAXDATAFILENAME
        /* data part of streamDefTimestep */
        + (2 * CDI_MAX_NAME + sizeof (taxis_t));
136
      rxWin[root].dictSize += numRPCFuncs;
Deike Kleberg's avatar
Deike Kleberg committed
137
    }
138
  free ( streamIndexList );
Deike Kleberg's avatar
Deike Kleberg committed
139
140

  for ( modelID = 0; modelID < nProcsModel; modelID++ )
141
    {
142
      /* account for size header */
143
      rxWin[modelID].dictSize += 1;
144
      rxWin[modelID].size += sizeof (struct winHeaderEntry);
145
146
147
      rxWin[modelID].size = roundUpToMultiple(rxWin[modelID].size,
                                              PIO_WIN_ALIGN);
      sumGetBufferSizes += (size_t)rxWin[modelID].size;
148
    }
Deike Kleberg's avatar
Deike Kleberg committed
149
  xassert ( sumGetBufferSizes <= MAXWINBUFFERSIZE );
150
  return sumGetBufferSizes;
Deike Kleberg's avatar
Deike Kleberg committed
151
}
152

Deike Kleberg's avatar
Deike Kleberg committed
153
 /************************************************************************/
154

155
156
157
static void
serverWinCreate(void)
{
Deike Kleberg's avatar
Deike Kleberg committed
158
  int ranks[1], modelID;
159
  MPI_Comm commCalc = commInqCommCalc ();
Deike Kleberg's avatar
Deike Kleberg committed
160
  MPI_Group groupCalc;
161
  int nProcsModel = commInqNProcsModel ();
162
163
164
  MPI_Info no_locks_info;
  xmpi(MPI_Info_create(&no_locks_info));
  xmpi(MPI_Info_set(no_locks_info, "no_locks", "true"));
Deike Kleberg's avatar
Deike Kleberg committed
165

166
  xmpi(MPI_Win_create(MPI_BOTTOM, 0, 1, no_locks_info, commCalc, &getWin));
Deike Kleberg's avatar
Deike Kleberg committed
167
168

  /* target group */
169
170
  ranks[0] = nProcsModel;
  xmpi ( MPI_Comm_group ( commCalc, &groupCalc ));
Deike Kleberg's avatar
Deike Kleberg committed
171
172
  xmpi ( MPI_Group_excl ( groupCalc, 1, ranks, &groupModel ));

173
  rxWin = xcalloc(nProcsModel, sizeof (rxWin[0]));
174
175
176
177
178
179
180
181
  size_t totalBufferSize = collDefBufferSizes();
  rxWin[0].buffer = xmalloc(totalBufferSize);
  size_t ofs = 0;
  for ( modelID = 1; modelID < nProcsModel; modelID++ )
    {
      ofs += rxWin[modelID - 1].size;
      rxWin[modelID].buffer = rxWin[0].buffer + ofs;
    }
Deike Kleberg's avatar
Deike Kleberg committed
182

183
184
  xmpi(MPI_Info_free(&no_locks_info));

185
  xdebug("%s", "created mpi_win, allocated getBuffer");
Deike Kleberg's avatar
Deike Kleberg committed
186
187
}

Deike Kleberg's avatar
Deike Kleberg committed
188
189
/************************************************************************/

190
static void
191
readFuncCall(struct winHeaderEntry *header)
Deike Kleberg's avatar
Deike Kleberg committed
192
193
{
  int root = commInqRootGlob ();
194
  int funcID = header->id;
195
  union funcArgs *funcArgs = &(header->specific.funcArgs);
Deike Kleberg's avatar
Deike Kleberg committed
196

197
  xassert(funcID >= MINFUNCID && funcID <= MAXFUNCID);
Deike Kleberg's avatar
Deike Kleberg committed
198
199
  switch ( funcID )
    {
200
201
    case STREAMCLOSE:
      {
202
        int streamID
203
          = namespaceAdaptKey2(funcArgs->streamChange.streamID);
204
205
206
207
        streamClose(streamID);
        xdebug("READ FUNCTION CALL FROM WIN:  %s, streamID=%d,"
               " closed stream",
               funcMap[(-1 - funcID)], streamID);
208
209
      }
      break;
Deike Kleberg's avatar
Deike Kleberg committed
210
    case STREAMOPEN:
211
      {
212
        size_t filenamesz = funcArgs->newFile.fnamelen;
Deike Kleberg's avatar
Deike Kleberg committed
213
        xassert ( filenamesz > 0 && filenamesz < MAXDATAFILENAME );
214
        const char *filename
215
          = (const char *)(rxWin[root].buffer + header->offset);
216
        xassert(filename[filenamesz] == '\0');
217
        int filetype = funcArgs->newFile.filetype;
218
        int streamID = streamOpenWrite(filename, filetype);
219
        xassert(streamID != CDI_ELIBNAVAIL);
220
221
        xdebug("READ FUNCTION CALL FROM WIN:  %s, filenamesz=%zu,"
               " filename=%s, filetype=%d, OPENED STREAM %d",
222
               funcMap[(-1 - funcID)], filenamesz, filename,
223
               filetype, streamID);
224
      }
225
      break;
226
227
    case STREAMDEFVLIST:
      {
228
        int streamID
229
230
          = namespaceAdaptKey2(funcArgs->streamChange.streamID);
        int vlistID = namespaceAdaptKey2(funcArgs->streamChange.vlistID);
231
232
233
234
        streamDefVlist(streamID, vlistID);
        xdebug("READ FUNCTION CALL FROM WIN:  %s, streamID=%d,"
               " vlistID=%d, called streamDefVlist ().",
               funcMap[(-1 - funcID)], streamID, vlistID);
235
236
      }
      break;
237
238
239
    case STREAMDEFTIMESTEP:
      {
        MPI_Comm commCalc = commInqCommCalc ();
240
        int streamID = funcArgs->streamNewTimestep.streamID;
241
242
243
244
        int nspTarget = namespaceResHDecode(streamID).nsp;
        streamID = namespaceAdaptKey2(streamID);
        int oldTaxisID
          = vlistInqTaxis(streamInqVlist(streamID));
245
        int position = header->offset;
246
247
248
249
250
251
252
        int changedTaxisID
          = taxisUnpack((char *)rxWin[root].buffer, (int)rxWin[root].size,
                        &position, nspTarget, &commCalc, 0);
        taxis_t *oldTaxisPtr = taxisPtr(oldTaxisID);
        taxis_t *changedTaxisPtr = taxisPtr(changedTaxisID);
        ptaxisCopy(oldTaxisPtr, changedTaxisPtr);
        taxisDestroy(changedTaxisID);
253
        streamDefTimestep(streamID, funcArgs->streamNewTimestep.tsID);
254
255
      }
      break;
Deike Kleberg's avatar
Deike Kleberg committed
256
    default:
257
      xabort ( "REMOTE FUNCTIONCALL NOT IMPLEMENTED!" );
Deike Kleberg's avatar
Deike Kleberg committed
258
259
260
261
262
    }
}

/************************************************************************/

263
264
265
266
267
268
269
270
271
272
273
274
275
static void
resizeVarGatherBuf(int vlistID, int varID, double **buf, int *bufSize)
{
  int size = vlistInqVarSize(vlistID, varID);
  if (size <= *bufSize) ; else
    *buf = xrealloc(*buf, (*bufSize = size) * sizeof (buf[0][0]));
}

static void
gatherArray(int root, int nProcsModel, int headerIdx,
            int vlistID,
            double *gatherBuf, int *nmiss)
{
276
277
  struct winHeaderEntry *winDict
    = (struct winHeaderEntry *)rxWin[root].buffer;
278
  int streamID = winDict[headerIdx].id;
279
  int varID = winDict[headerIdx].specific.dataRecord.varID;
280
  int varShape[3] = { 0, 0, 0 };
281
  cdiPioQueryVarDims(varShape, vlistID, varID);
282
283
284
285
286
287
288
289
290
291
292
293
294
295
  Xt_int varShapeXt[3];
  static const Xt_int origin[3] = { 0, 0, 0 };
  for (unsigned i = 0; i < 3; ++i)
    varShapeXt[i] = varShape[i];
  int varSize = varShape[0] * varShape[1] * varShape[2];
  int *partOfs = xmalloc(2 * varSize * sizeof (partOfs[0])),
    *gatherOfs = partOfs + varSize;
  Xt_idxlist *part = xmalloc(nProcsModel * sizeof (part[0]));
  MPI_Comm commCalc = commInqCommCalc();
  {
    int nmiss_ = 0, partOfsOfs = 0;
    for (int modelID = 0; modelID < nProcsModel; modelID++)
      {
        struct dataRecord *dataHeader
296
297
298
299
          = &((struct winHeaderEntry *)
              rxWin[modelID].buffer)[headerIdx].specific.dataRecord;
        int position =
          ((struct winHeaderEntry *)rxWin[modelID].buffer)[headerIdx + 1].offset;
300
301
302
        xassert(namespaceAdaptKey2(((struct winHeaderEntry *)
                                    rxWin[modelID].buffer)[headerIdx].id)
                == streamID
303
                && dataHeader->varID == varID
304
305
                && ((struct winHeaderEntry *)
                    rxWin[modelID].buffer)[headerIdx + 1].id == PARTDESCMARKER
306
307
                && position > 0
                && ((size_t)position
308
                    >= sizeof (struct winHeaderEntry) * rxWin[modelID].dictSize)
309
310
311
312
313
                && ((size_t)position < rxWin[modelID].size));
        part[modelID] = xt_idxlist_unpack(rxWin[modelID].buffer,
                                          (int)rxWin[modelID].size,
                                          &position, commCalc);
        Xt_int partSize = xt_idxlist_get_num_indices(part[modelID]);
314
315
316
        size_t charOfs = (rxWin[modelID].buffer
                          + ((struct winHeaderEntry *)
                             rxWin[modelID].buffer)[headerIdx].offset)
317
318
319
320
321
322
323
324
325
326
327
328
          - rxWin[0].buffer;
        xassert(charOfs % sizeof (double) == 0
                && charOfs / sizeof (double) + partSize <= INT_MAX);
        int elemOfs = charOfs / sizeof (double);
        for (int i = 0; i < (int)partSize; ++i)
          partOfs[partOfsOfs + i] = elemOfs + i;
        partOfsOfs += partSize;
        nmiss_ += dataHeader->nmiss;
      }
    *nmiss = nmiss_;
  }
  Xt_idxlist srcList = xt_idxlist_collection_new(part, nProcsModel);
329
  for (int modelID = 0; modelID < nProcsModel; modelID++)
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
    xt_idxlist_delete(part[modelID]);
  free(part);
  Xt_xmap gatherXmap;
  {
    Xt_idxlist dstList
      = xt_idxsection_new(0, 3, varShapeXt, varShapeXt, origin);
    struct Xt_com_list full = { .list = dstList, .rank = 0 };
    gatherXmap = xt_xmap_intersection_new(1, &full, 1, &full, srcList, dstList,
                                        MPI_COMM_SELF);
    xt_idxlist_delete(dstList);
  }
  xt_idxlist_delete(srcList);
  for (int i = 0; i < varSize; ++i)
    gatherOfs[i] = i;

  Xt_redist gatherRedist
    = xt_redist_p2p_off_new(gatherXmap, partOfs, gatherOfs, MPI_DOUBLE);
  xt_xmap_delete(gatherXmap);
348
  xt_redist_s_exchange1(gatherRedist, rxWin[0].buffer, gatherBuf);
349
350
  free(partOfs);
  xt_redist_delete(gatherRedist);
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
}

struct xyzDims
{
  int sizes[3];
};

static inline int
xyzGridSize(struct xyzDims dims)
{
  return dims.sizes[0] * dims.sizes[1] * dims.sizes[2];
}

#ifdef HAVE_PARALLEL_NC4
static void
366
queryVarBounds(struct PPM_extent varShape[3], int vlistID, int varID)
367
{
368
369
  varShape[0].first = 0;
  varShape[1].first = 0;
370
  varShape[2].first = 0;
371
  int sizes[3];
372
  cdiPioQueryVarDims(sizes, vlistID, varID);
373
374
  for (unsigned i = 0; i < 3; ++i)
    varShape[i].size = sizes[i];
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
}

/* compute distribution of collectors such that number of collectors
 * <= number of variable grid cells in each dimension */
static struct xyzDims
varDimsCollGridMatch(const struct PPM_extent varDims[3])
{
  xassert(PPM_extents_size(3, varDims) >= commInqSizeColl());
  struct xyzDims collGrid = { { 1, 1, 1 } };
  /* because of storage order, dividing dimension 3 first is preferred */
  for (int i = 0; i < numPioPrimes; ++i)
    {
      for (int dim = 2; dim >=0; --dim)
        if (collGrid.sizes[dim] * pioPrimes[i] <= varDims[dim].size)
          {
            collGrid.sizes[dim] *= pioPrimes[i];
            goto nextPrime;
          }
      /* no position found, retrack */
      xabort("Not yet implemented back-tracking needed.");
      nextPrime:
      ;
    }
  return collGrid;
}

static void
myVarPart(struct PPM_extent varShape[3], struct xyzDims collGrid,
          struct PPM_extent myPart[3])
{
  int32_t myCollGridCoord[3];
  {
    struct PPM_extent collGridShape[3];
    for (int i = 0; i < 3; ++i)
      {
        collGridShape[i].first = 0;
        collGridShape[i].size = collGrid.sizes[i];
      }
    PPM_lidx2rlcoord_e(3, collGridShape, commInqRankColl(), myCollGridCoord);
    xdebug("my coord: (%d, %d, %d)", myCollGridCoord[0], myCollGridCoord[1],
           myCollGridCoord[2]);
  }
  PPM_uniform_partition_nd(3, varShape, collGrid.sizes,
                           myCollGridCoord, myPart);
}
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
#elif defined (HAVE_LIBNETCDF)
/* needed for writing when some files are only written to by a single process */
/* cdiOpenFileMap(fileID) gives the writer process */
int cdiPioSerialOpenFileMap(int streamID)
{
  return stream_to_pointer(streamID)->ownerRank;
}
/* for load-balancing purposes, count number of files per process */
/* cdiOpenFileCounts[rank] gives number of open files rank has to himself */
static int *cdiSerialOpenFileCount = NULL;
int cdiPioNextOpenRank()
{
  xassert(cdiSerialOpenFileCount != NULL);
  int commCollSize = commInqSizeColl();
  int minRank = 0, minOpenCount = cdiSerialOpenFileCount[0];
  for (int i = 1; i < commCollSize; ++i)
    if (cdiSerialOpenFileCount[i] < minOpenCount)
      {
        minOpenCount = cdiSerialOpenFileCount[i];
        minRank = i;
      }
  return minRank;
}

void cdiPioOpenFileOnRank(int rank)
{
  xassert(cdiSerialOpenFileCount != NULL
          && rank >= 0 && rank < commInqSizeColl());
  ++(cdiSerialOpenFileCount[rank]);
}


void cdiPioCloseFileOnRank(int rank)
{
  xassert(cdiSerialOpenFileCount != NULL
          && rank >= 0 && rank < commInqSizeColl());
  xassert(cdiSerialOpenFileCount[rank] > 0);
  --(cdiSerialOpenFileCount[rank]);
}

460
461
462
463
464
465
466
467
468
469
static void
cdiPioServerCdfDefVars(stream_t *streamptr)
{
  int rank, rankOpen;
  if (commInqIOMode() == PIO_NONE
      || ((rank = commInqRankColl())
          == (rankOpen = cdiPioSerialOpenFileMap(streamptr->self))))
    cdfDefVars(streamptr);
}

470
471
#endif

472
473
474
475
476
477
struct streamMapping {
  int streamID, filetype;
  int firstHeaderIdx, lastHeaderIdx;
  int numVars, *varMap;
};

478
479
480
481
482
483
struct streamMap
{
  struct streamMapping *entries;
  int numEntries;
};

Thomas Jahns's avatar
Thomas Jahns committed
484
485
486
487
488
489
490
491
static int
smCmpStreamID(const void *a_, const void *b_)
{
  const struct streamMapping *a = a_, *b = b_;
  int streamIDa = a->streamID, streamIDb = b->streamID;
  return (streamIDa > streamIDb) - (streamIDa < streamIDb);
}

492
493
494
495
496
497
498
499
500
501
502
503
504
505
static inline int
inventorizeStream(struct streamMapping *streamMap, int numStreamIDs,
                  int *sizeStreamMap_, int streamID, int headerIdx)
{
  int sizeStreamMap = *sizeStreamMap_;
  if (numStreamIDs < sizeStreamMap) ; else
    {
      streamMap = xrealloc(streamMap,
                           (sizeStreamMap *= 2)
                           * sizeof (streamMap[0]));
      *sizeStreamMap_ = sizeStreamMap;
    }
  streamMap[numStreamIDs].streamID = streamID;
  streamMap[numStreamIDs].firstHeaderIdx = headerIdx;
506
  streamMap[numStreamIDs].lastHeaderIdx = headerIdx;
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
  streamMap[numStreamIDs].numVars = -1;
  int filetype = streamInqFiletype(streamID);
  streamMap[numStreamIDs].filetype = filetype;
  if (filetype == FILETYPE_NC || filetype == FILETYPE_NC2
      || filetype == FILETYPE_NC4)
    {
      int vlistID = streamInqVlist(streamID);
      int nvars = vlistNvars(vlistID);
      streamMap[numStreamIDs].numVars = nvars;
      streamMap[numStreamIDs].varMap
        = xmalloc(sizeof (streamMap[numStreamIDs].varMap[0])
                  * nvars);
      for (int i = 0; i < nvars; ++i)
        streamMap[numStreamIDs].varMap[i] = -1;
    }
  return numStreamIDs + 1;
}

525
526
527
528
529
530
531
532
533
534
static inline int
streamIsInList(struct streamMapping *streamMap, int numStreamIDs,
               int streamIDQuery)
{
  int p = 0;
  for (int i = 0; i < numStreamIDs; ++i)
    p |= streamMap[i].streamID == streamIDQuery;
  return p;
}

535
static struct streamMap
536
buildStreamMap(struct winHeaderEntry *winDict)
537
538
539
540
541
{
  int streamIDOld = CDI_UNDEFID;
  int oldStreamIdx = CDI_UNDEFID;
  int filetype = CDI_UNDEFID;
  int sizeStreamMap = 16;
542
543
  struct streamMapping *streamMap
    = xmalloc(sizeStreamMap * sizeof (streamMap[0]));
544
  int numDataEntries = winDict[0].specific.headerSize.numDataEntries;
545
  int numStreamIDs = 0;
546
  /* find streams written on this process */
547
548
549
  for (int headerIdx = 1; headerIdx < numDataEntries; headerIdx += 2)
    {
      int streamID
550
551
        = winDict[headerIdx].id
        = namespaceAdaptKey2(winDict[headerIdx].id);
552
553
554
555
556
557
558
559
560
561
562
      xassert(streamID > 0);
      if (streamID != streamIDOld)
        {
          for (int i = numStreamIDs - 1; i >= 0; --i)
            if ((streamIDOld = streamMap[i].streamID) == streamID)
              {
                oldStreamIdx = i;
                goto streamIDInventorized;
              }
          oldStreamIdx = numStreamIDs;
          streamIDOld = streamID;
563
564
          numStreamIDs = inventorizeStream(streamMap, numStreamIDs,
                                           &sizeStreamMap, streamID, headerIdx);
565
566
        }
      streamIDInventorized:
567
      filetype = streamMap[oldStreamIdx].filetype;
568
569
570
571
      streamMap[oldStreamIdx].lastHeaderIdx = headerIdx;
      if (filetype == FILETYPE_NC || filetype == FILETYPE_NC2
          || filetype == FILETYPE_NC4)
        {
572
          int varID = winDict[headerIdx].specific.dataRecord.varID;
573
574
575
          streamMap[oldStreamIdx].varMap[varID] = headerIdx;
        }
    }
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
  /* join with list of streams written to in total */
  {
    int *streamIDs, *streamIsWritten;
    int numTotalStreamIDs = streamSize();
    streamIDs = xmalloc(2 * sizeof (streamIDs[0]) * (size_t)numTotalStreamIDs);
    streamGetIndexList(numTotalStreamIDs, streamIDs);
    streamIsWritten = streamIDs + numTotalStreamIDs;
    for (int i = 0; i < numTotalStreamIDs; ++i)
      streamIsWritten[i] = streamIsInList(streamMap, numStreamIDs,
                                          streamIDs[i]);
    /* Find what streams are written to at all on any process */
    xmpi(MPI_Allreduce(MPI_IN_PLACE, streamIsWritten, numTotalStreamIDs,
                       MPI_INT, MPI_BOR, commInqCommColl()));
    /* append streams written to on other tasks to mapping */
    for (int i = 0; i < numTotalStreamIDs; ++i)
      if (streamIsWritten[i] && !streamIsInList(streamMap, numStreamIDs,
                                                streamIDs[i]))
        numStreamIDs = inventorizeStream(streamMap, numStreamIDs,
                                         &sizeStreamMap, streamIDs[i], -1);

    free(streamIDs);
  }
Thomas Jahns's avatar
Thomas Jahns committed
598
599
600
  /* sort written streams by streamID */
  streamMap = xrealloc(streamMap, sizeof (streamMap[0]) * numStreamIDs);
  qsort(streamMap, numStreamIDs, sizeof (streamMap[0]), smCmpStreamID);
601
602
603
  return (struct streamMap){ .entries = streamMap, .numEntries = numStreamIDs };
}

604
605
606
607
608
609
610
611
612
613
static void
writeGribStream(struct winHeaderEntry *winDict, struct streamMapping *mapping,
                double **data_, int *currentDataBufSize, int root,
                int nProcsModel)
{
  int streamID = mapping->streamID;
  int headerIdx, lastHeaderIdx = mapping->lastHeaderIdx;
  int vlistID = streamInqVlist(streamID);
  if (lastHeaderIdx < 0)
    {
614
      /* write zero bytes to trigger synchronization code in fileWrite */
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
      cdiPioFileWrite(streamInqFileID(streamID), NULL, 0,
                      streamInqCurTimestepID(streamID));
    }
  else
    for (headerIdx = mapping->firstHeaderIdx;
         headerIdx <= lastHeaderIdx;
         headerIdx += 2)
      if (streamID == winDict[headerIdx].id)
        {
          int varID = winDict[headerIdx].specific.dataRecord.varID;
          int size = vlistInqVarSize(vlistID, varID);
          int nmiss;
          resizeVarGatherBuf(vlistID, varID, data_, currentDataBufSize);
          double *data = *data_;
          gatherArray(root, nProcsModel, headerIdx,
                      vlistID, data, &nmiss);
          streamWriteVar(streamID, varID, data, nmiss);
          if ( ddebug > 2 )
            {
              char text[1024];
              sprintf(text, "streamID=%d, var[%d], size=%d",
                      streamID, varID, size);
              xprintArray(text, data, size, DATATYPE_FLT);
            }
        }
}
641

642
643
644
645
646
647
648
#ifdef HAVE_NETCDF4
static void
buildWrittenVars(struct streamMapping *mapping, int **varIsWritten_,
                 int myCollRank, MPI_Comm collComm)
{
  int nvars = mapping->numVars;
  int *varMap = mapping->varMap;
Thomas Jahns's avatar
Thomas Jahns committed
649
650
  int *varIsWritten = *varIsWritten_
    = xrealloc(*varIsWritten_, sizeof (*varIsWritten) * nvars);
651
652
653
654
655
656
657
  for (int varID = 0; varID < nvars; ++varID)
    varIsWritten[varID] = ((varMap[varID] != -1)
                           ?myCollRank+1 : 0);
  xmpi(MPI_Allreduce(MPI_IN_PLACE, varIsWritten, nvars,
                     MPI_INT, MPI_BOR, collComm));
}
#endif
658

659
static void readGetBuffers()
Deike Kleberg's avatar
Deike Kleberg committed
660
{
661
  int nProcsModel = commInqNProcsModel ();
Deike Kleberg's avatar
Deike Kleberg committed
662
  int root        = commInqRootGlob ();
663
#ifdef HAVE_NETCDF4
664
  int myCollRank = commInqRankColl();
665
  MPI_Comm collComm = commInqCommColl();
666
#endif
667
  xdebug("%s", "START");
668

669
670
  struct winHeaderEntry *winDict
    = (struct winHeaderEntry *)rxWin[root].buffer;
671
  xassert(winDict[0].id == HEADERSIZEMARKER);
672
673
  {
    int dictSize = rxWin[root].dictSize,
674
      firstNonRPCEntry = dictSize - winDict[0].specific.headerSize.numRPCEntries - 1,
675
676
677
678
679
680
      headerIdx,
      numFuncCalls = 0;
    for (headerIdx = dictSize - 1;
         headerIdx > firstNonRPCEntry;
         --headerIdx)
      {
681
682
        xassert(winDict[headerIdx].id >= MINFUNCID
                && winDict[headerIdx].id <= MAXFUNCID);
683
        ++numFuncCalls;
684
        readFuncCall(winDict + headerIdx);
685
      }
686
    xassert(numFuncCalls == winDict[0].specific.headerSize.numRPCEntries);
687
  }
Thomas Jahns's avatar
Thomas Jahns committed
688
  /* build list of streams, data was transferred for */
689
  {
690
    struct streamMap map = buildStreamMap(winDict);
691
    double *data = NULL;
Thomas Jahns's avatar
Thomas Jahns committed
692
693
694
#ifdef HAVE_NETCDF4
    int *varIsWritten = NULL;
#endif
695
696
697
#if defined (HAVE_PARALLEL_NC4)
    double *writeBuf = NULL;
#endif
Thomas Jahns's avatar
Thomas Jahns committed
698
    int currentDataBufSize = 0;
699
    for (int streamIdx = 0; streamIdx < map.numEntries; ++streamIdx)
Thomas Jahns's avatar
Thomas Jahns committed
700
      {
701
        int streamID = map.entries[streamIdx].streamID;
Thomas Jahns's avatar
Thomas Jahns committed
702
        int vlistID = streamInqVlist(streamID);
703
        int filetype = map.entries[streamIdx].filetype;
Thomas Jahns's avatar
Thomas Jahns committed
704

705
        switch (filetype)
706
707
708
          {
          case FILETYPE_GRB:
          case FILETYPE_GRB2:
709
710
711
            writeGribStream(winDict, map.entries + streamIdx,
                            &data, &currentDataBufSize,
                            root, nProcsModel);
712
            break;
713
714
715
716
717
718
719
#ifdef HAVE_NETCDF4
          case FILETYPE_NC:
          case FILETYPE_NC2:
          case FILETYPE_NC4:
#ifdef HAVE_PARALLEL_NC4
            /* HAVE_PARALLE_NC4 implies having ScalES-PPM and yaxt */
            {
720
721
              int nvars = map.entries[streamIdx].numVars;
              int *varMap = map.entries[streamIdx].varMap;
722
723
              buildWrittenVars(map.entries + streamIdx, &varIsWritten,
                               myCollRank, collComm);
724
725
726
727
              for (int varID = 0; varID < nvars; ++varID)
                if (varIsWritten[varID])
                  {
                    struct PPM_extent varShape[3];
728
                    queryVarBounds(varShape, vlistID, varID);
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
                    struct xyzDims collGrid = varDimsCollGridMatch(varShape);
                    xdebug("writing varID %d with dimensions: "
                           "x=%d, y=%d, z=%d,\n"
                           "found distribution with dimensions:"
                           " x=%d, y=%d, z=%d.", varID,
                           varShape[0].size, varShape[1].size, varShape[2].size,
                           collGrid.sizes[0], collGrid.sizes[1],
                           collGrid.sizes[2]);
                    struct PPM_extent varChunk[3];
                    myVarPart(varShape, collGrid, varChunk);
                    int myChunk[3][2];
                    for (int i = 0; i < 3; ++i)
                      {
                        myChunk[i][0] = PPM_extent_start(varChunk[i]);
                        myChunk[i][1] = PPM_extent_end(varChunk[i]);
                      }
                    xdebug("Writing chunk { { %d, %d }, { %d, %d },"
                           " { %d, %d } }", myChunk[0][0], myChunk[0][1],
                           myChunk[1][0], myChunk[1][1], myChunk[2][0],
                           myChunk[2][1]);
                    Xt_int varSize[3];
                    for (int i = 0; i < 3; ++i)
                      varSize[2 - i] = varShape[i].size;
                    Xt_idxlist preRedistChunk, preWriteChunk;
                    /* prepare yaxt descriptor for current data
                       distribution after collect */
                    int nmiss;
                    if (varMap[varID] == -1)
                      {
                        preRedistChunk = xt_idxempty_new();
                        xdebug("%s", "I got none\n");
                      }
                    else
                      {
                        Xt_int preRedistStart[3] = { 0, 0, 0 };
                        preRedistChunk
                          = xt_idxsection_new(0, 3, varSize, varSize,
                                              preRedistStart);
                        resizeVarGatherBuf(vlistID, varID, &data,
                                           &currentDataBufSize);
                        int headerIdx = varMap[varID];
                        gatherArray(root, nProcsModel, headerIdx,
                                    vlistID, data, &nmiss);
                        xdebug("%s", "I got all\n");
                      }
                    MPI_Bcast(&nmiss, 1, MPI_INT, varIsWritten[varID] - 1,
                              collComm);
                    /* prepare yaxt descriptor for write chunk */
                    {
                      Xt_int preWriteChunkStart[3], preWriteChunkSize[3];
                      for (int i = 0; i < 3; ++i)
                        {
                          preWriteChunkStart[2 - i] = varChunk[i].first;
                          preWriteChunkSize[2 - i] = varChunk[i].size;
                        }
                      preWriteChunk = xt_idxsection_new(0, 3, varSize,
                                                        preWriteChunkSize,
                                                        preWriteChunkStart);
                    }
                    /* prepare redistribution */
                    {
                      Xt_xmap xmap = xt_xmap_all2all_new(preRedistChunk,
                                                         preWriteChunk,
                                                         collComm);
                      Xt_redist redist = xt_redist_p2p_new(xmap, MPI_DOUBLE);
                      xt_idxlist_delete(preRedistChunk);
                      xt_idxlist_delete(preWriteChunk);
                      xt_xmap_delete(xmap);
                      writeBuf = xrealloc(writeBuf,
                                          sizeof (double)
                                          * PPM_extents_size(3, varChunk));
800
                      xt_redist_s_exchange1(redist, data, writeBuf);
801
802
803
804
805
806
807
808
809
810
                      xt_redist_delete(redist);
                    }
                    /* write chunk */
                    streamWriteVarChunk(streamID, varID,
                                        (const int (*)[2])myChunk, writeBuf,
                                        nmiss);
                  }
            }
#else
            /* determine process which has stream open (writer) and
811
812
813
             * which has data for which variable (var owner)
             * three cases need to be distinguished */
            {
814
815
              int nvars = map.entries[streamIdx].numVars;
              int *varMap = map.entries[streamIdx].varMap;
816
817
              buildWrittenVars(map.entries + streamIdx, &varIsWritten,
                               myCollRank, collComm);
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
              int writerRank;
              if ((writerRank = cdiPioSerialOpenFileMap(streamID))
                  == myCollRank)
                {
                  for (int varID = 0; varID < nvars; ++varID)
                    if (varIsWritten[varID])
                      {
                        int nmiss;
                        int size = vlistInqVarSize(vlistID, varID);
                        resizeVarGatherBuf(vlistID, varID, &data,
                                           &currentDataBufSize);
                        int headerIdx = varMap[varID];
                        if (varIsWritten[varID] == myCollRank + 1)
                          {
                            /* this process has the full array and will
                             * write it */
                            xdebug("gathering varID=%d for direct writing",
                                   varID);
                            gatherArray(root, nProcsModel, headerIdx,
                                        vlistID, data, &nmiss);
                          }
                        else
                          {
                            /* another process has the array and will
                             * send it over */
                            MPI_Status stat;
                            xdebug("receiving varID=%d for writing from"
                                   " process %d",
                                   varID, varIsWritten[varID] - 1);
                            xmpiStat(MPI_Recv(&nmiss, 1, MPI_INT,
                                              varIsWritten[varID] - 1,
                                              COLLBUFNMISS,
                                              collComm, &stat), &stat);
                            xmpiStat(MPI_Recv(data, size, MPI_DOUBLE,
                                              varIsWritten[varID] - 1,
                                              COLLBUFTX,
                                              collComm, &stat), &stat);
                          }
                        streamWriteVar(streamID, varID, data, nmiss);
                      }
                }
              else
                for (int varID = 0; varID < nvars; ++varID)
                  if (varIsWritten[varID] == myCollRank + 1)
                    {
                      /* this process has the full array and another
                       * will write it */
                      int nmiss;
                      int size = vlistInqVarSize(vlistID, varID);
                      resizeVarGatherBuf(vlistID, varID, &data,
                                         &currentDataBufSize);
                      int headerIdx = varMap[varID];
                      gatherArray(root, nProcsModel, headerIdx,
                                  vlistID, data, &nmiss);
                      MPI_Request req;
                      MPI_Status stat;
                      xdebug("sending varID=%d for writing to"
                             " process %d",
                             varID, writerRank);
                      xmpi(MPI_Isend(&nmiss, 1, MPI_INT,
                                     writerRank, COLLBUFNMISS,
                                     collComm, &req));
                      xmpi(MPI_Send(data, size, MPI_DOUBLE,
                                    writerRank, COLLBUFTX,
                                    collComm));
                      xmpiStat(MPI_Wait(&req, &stat), &stat);
                    }
            }
886
887
888
#endif
            break;
#endif
889
890
891
          default:
            xabort("unhandled filetype in parallel I/O.");
          }
892
      }
Thomas Jahns's avatar
Thomas Jahns committed
893
894
#ifdef HAVE_NETCDF4
    free(varIsWritten);
Thomas Jahns's avatar
Thomas Jahns committed
895
896
897
#ifdef HAVE_PARALLEL_NC4
    free(writeBuf);
#endif
Thomas Jahns's avatar
Thomas Jahns committed
898
#endif
899
    free(map.entries);
Thomas Jahns's avatar
Thomas Jahns committed
900
    free(data);
901
  }
902
  xdebug("%s", "RETURN");
903
904
905
906
} 

/************************************************************************/

Deike Kleberg's avatar
Deike Kleberg committed
907

Thomas Jahns's avatar
Thomas Jahns committed
908
909
static
void clearModelWinBuffer(int modelID)
Deike Kleberg's avatar
Deike Kleberg committed
910
911
912
{
  int nProcsModel = commInqNProcsModel ();

Deike Kleberg's avatar
Deike Kleberg committed
913
914
  xassert ( modelID                >= 0           &&
            modelID                 < nProcsModel &&
915
            rxWin != NULL && rxWin[modelID].buffer != NULL &&
916
917
            rxWin[modelID].size > 0 &&
            rxWin[modelID].size <= MAXWINBUFFERSIZE );
918
  memset(rxWin[modelID].buffer, 0, rxWin[modelID].size);
Deike Kleberg's avatar
Deike Kleberg committed
919
920
921
922
923
924
}


/************************************************************************/


925
static
926
void getTimeStepData()
Deike Kleberg's avatar
Deike Kleberg committed
927
{
928
  int modelID;
929
  char text[1024];
930
  int nProcsModel = commInqNProcsModel ();
Thomas Jahns's avatar
Thomas Jahns committed
931
932
  void *getWinBaseAddr;
  int attrFound;
933

934
  xdebug("%s", "START");
Deike Kleberg's avatar
Deike Kleberg committed
935

936
937
  for ( modelID = 0; modelID < nProcsModel; modelID++ )
    clearModelWinBuffer(modelID);
Deike Kleberg's avatar
Deike Kleberg committed
938
  // todo put in correct lbs and ubs
939
  xmpi(MPI_Win_start(groupModel, 0, getWin));
940
941
  xmpi(MPI_Win_get_attr(getWin, MPI_WIN_BASE, &getWinBaseAddr, &attrFound));
  xassert(attrFound);
Deike Kleberg's avatar
Deike Kleberg committed
942
943
  for ( modelID = 0; modelID < nProcsModel; modelID++ )
    {
944
      xdebug("modelID=%d, nProcsModel=%d, rxWin[%d].size=%zu,"
Thomas Jahns's avatar
Thomas Jahns committed
945
             " getWin=%p, sizeof(int)=%u",
946
             modelID, nProcsModel, modelID, rxWin[modelID].size,
Thomas Jahns's avatar
Thomas Jahns committed
947
             getWinBaseAddr, (unsigned)sizeof(int));
948
      /* FIXME: this needs to use MPI_PACK for portability */
949
950
951
      xmpi(MPI_Get(rxWin[modelID].buffer, rxWin[modelID].size,
                   MPI_UNSIGNED_CHAR, modelID, 0,
                   rxWin[modelID].size, MPI_UNSIGNED_CHAR, getWin));
Deike Kleberg's avatar
Deike Kleberg committed
952
    }
953
  xmpi ( MPI_Win_complete ( getWin ));
Deike Kleberg's avatar
Deike Kleberg committed
954

955
  if ( ddebug > 2 )
Deike Kleberg's avatar
Deike Kleberg committed
956
    for ( modelID = 0; modelID < nProcsModel; modelID++ )
957
      {
958
        sprintf(text, "rxWin[%d].size=%zu from PE%d rxWin[%d].buffer",
959
                modelID, rxWin[modelID].size, modelID, modelID);
960
        xprintArray(text, rxWin[modelID].buffer,
961
962
                    rxWin[modelID].size / sizeof (double),
                    DATATYPE_FLT);
963
      }
964
965
  readGetBuffers();

966
  xdebug("%s", "RETURN");
Deike Kleberg's avatar
Deike Kleberg committed
967
}
Deike Kleberg's avatar
Deike Kleberg committed
968
969
970

/************************************************************************/

971
972
973
974
975
976
977
978
979
980
981
982
#if defined (HAVE_LIBNETCDF) && ! defined (HAVE_PARALLEL_NC4)
static int
cdiPioStreamCDFOpenWrap(const char *filename, const char *filemode,
                        int filetype, stream_t *streamptr,
                        int recordBufIsToBeCreated)
{
  switch (filetype)
    {
    case FILETYPE_NC4:
    case FILETYPE_NC4C:
      {
        int rank, fileID;
Thomas Jahns's avatar
Thomas Jahns committed
983
984
        int ioMode = commInqIOMode();
        if (ioMode == PIO_NONE
985
986
987
988
            || commInqRankColl() == (rank = cdiPioNextOpenRank()))
          fileID = cdiStreamOpenDefaultDelegate(filename, filemode, filetype,
                                                streamptr,
                                                recordBufIsToBeCreated);
Thomas Jahns's avatar
Thomas Jahns committed
989
        if (ioMode != PIO_NONE)
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
          xmpi(MPI_Bcast(&fileID, 1, MPI_INT, rank, commInqCommColl()));
        streamptr->ownerRank = rank;
        return fileID;
      }
    default:
      return cdiStreamOpenDefaultDelegate(filename, filemode, filetype,
                                          streamptr, recordBufIsToBeCreated);
    }
}

static void
cdiPioStreamCDFCloseWrap(stream_t *streamptr, int recordBufIsToBeDeleted)
{
  int fileID   = streamptr->fileID;
  int filetype = streamptr->filetype;
  if ( fileID == CDI_UNDEFID )
    Warning("File %s not open!", streamptr->filename);
  else
    switch (filetype)
      {
      case FILETYPE_NC:
      case FILETYPE_NC2:
      case FILETYPE_NC4:
      case FILETYPE_NC4C:
        {
          int rank, rankOpen;
          if (commInqIOMode() == PIO_NONE
              || ((rank = commInqRankColl())
                  == (rankOpen = cdiPioSerialOpenFileMap(streamptr->self))))
            cdiStreamCloseDefaultDelegate(streamptr, recordBufIsToBeDeleted);
          break;
        }
      default:
        cdiStreamCloseDefaultDelegate(streamptr, recordBufIsToBeDeleted);
      }
}
Thomas Jahns's avatar
Thomas Jahns committed
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036

static void
cdiPioCdfDefTimestep(stream_t *streamptr, int tsID)
{
  int rank, rankOpen, streamID = streamptr->self;
  if (commInqIOMode() == PIO_NONE
      || ((rank = commInqRankColl())
          == (rankOpen = cdiPioSerialOpenFileMap(streamID))))
    cdfDefTimestep(streamptr, tsID);
}

1037
1038
#endif

Deike Kleberg's avatar
Deike Kleberg committed
1039
1040
1041
1042
1043
1044
1045
1046
/**
  @brief is encapsulated in CDI library and run on I/O PEs.

  @param

  @return
*/

1047
void IOServer(void (*postCommSetupActions)(void))
Deike Kleberg's avatar
Deike Kleberg committed
1048
{
1049
  int source, tag, size, nProcsModel=commInqNProcsModel();
1050
  static int nfinished = 0;
Deike Kleberg's avatar
Deike Kleberg committed
1051
  char * buffer;
1052
1053
  MPI_Comm commCalc;
  MPI_Status status;
Deike Kleberg's avatar
Deike Kleberg committed
1054

1055
  xdebug("%s", "START");
Deike Kleberg's avatar
Deike Kleberg committed
1056

1057
  backendInit(postCommSetupActions);
1058
1059
  if ( commInqRankNode () == commInqSpecialRankNode ()) 
    backendFinalize ();
1060
  commCalc = commInqCommCalc ();
1061
#ifdef HAVE_PARALLEL_NC4
1062
  cdiPioEnableNetCDFParAccess();
1063
1064
  numPioPrimes = PPM_prime_factorization_32((uint32_t)commInqSizeColl(),
                                            &pioPrimes);
1065
1066
1067
#elif defined (HAVE_LIBNETCDF)
  cdiSerialOpenFileCount = xcalloc(sizeof (cdiSerialOpenFileCount[0]),
                                   commInqSizeColl());
1068
1069
1070
1071
1072
1073
1074
1075
  namespaceSwitchSet(NSSWITCH_STREAM_OPEN_BACKEND,
                     NSSW_FUNC(cdiPioStreamCDFOpenWrap));
  namespaceSwitchSet(NSSWITCH_STREAM_CLOSE_BACKEND,
                     NSSW_FUNC(cdiPioStreamCDFCloseWrap));
  namespaceSwitchSet(NSSWITCH_CDF_DEF_TIMESTEP,
                     NSSW_FUNC(cdiPioCdfDefTimestep));
  namespaceSwitchSet(NSSWITCH_CDF_STREAM_SETUP,
                     NSSW_FUNC(cdiPioServerCdfDefVars));
1076
#endif
1077
1078
1079
  namespaceSwitchSet(NSSWITCH_FILE_WRITE,
                     NSSW_FUNC(cdiPioFileWrite));

Deike Kleberg's avatar
Deike Kleberg committed
1080
  for ( ;; )
1081
    {
Deike Kleberg's avatar
Deike Kleberg committed
1082
      xmpi ( MPI_Probe ( MPI_ANY_SOURCE, MPI_ANY_TAG, commCalc, &status ));
1083
      
Deike Kleberg's avatar
Deike Kleberg committed
1084
1085
      source = status.MPI_SOURCE;
      tag    = status.MPI_TAG;
1086
      
Deike Kleberg's avatar
Deike Kleberg committed
1087
      switch ( tag )
1088
        {
1089
1090
1091
1092
1093
1094
1095
        case FINALIZE:
          {
            int i;
            xdebugMsg(tag, source, nfinished);
            xmpi(MPI_Recv(&i, 1, MPI_INTEGER, source,
                          tag, commCalc, &status));
          }
1096
          xdebug("%s", "RECEIVED MESSAGE WITH TAG \"FINALIZE\"");
1097
          nfinished++;
Thomas Jahns's avatar
Thomas Jahns committed
1098
1099
1100
          xdebug("nfinished=%d, nProcsModel=%d", nfinished, nProcsModel);
          if ( nfinished == nProcsModel )
            {
1101
              {
Deike Kleberg's avatar
Deike Kleberg committed
1102
                int nStreams = streamSize ();
Thomas Jahns's avatar
Thomas Jahns committed
1103

Deike Kleberg's avatar
Deike Kleberg committed
1104
1105
1106
1107
                if ( nStreams > 0 )
                  {
                    int streamNo;
                    int * resHs;
Thomas Jahns's avatar
Thomas Jahns committed
1108

Deike Kleberg's avatar
Deike Kleberg committed
1109
                    resHs       = xmalloc ( nStreams * sizeof ( resHs[0] ));
1110
                    streamGetIndexList ( nStreams, resHs );
Deike Kleberg's avatar
Deike Kleberg committed
1111
1112
1113
1114
                    for ( streamNo = 0; streamNo < nStreams; streamNo++ )
                      streamClose ( resHs[streamNo] );
                    free ( resHs );
                  }
Deike Kleberg's avatar
Deike Kleberg committed
1115
              }
Thomas Jahns's avatar
Thomas Jahns committed
1116
1117
              backendCleanup();
              serverWinCleanup();
1118
              /* listDestroy(); */
1119
              xdebug("%s", "RETURN");
Deike Kleberg's avatar
Deike Kleberg committed
1120
1121
              return;
            }
1122
	  
1123
1124
          break;
          
Deike Kleberg's avatar
Deike Kleberg committed
1125
	case RESOURCES:
1126
	  xdebugMsg (  tag, source, nfinished );
1127
	  xmpi ( MPI_Get_count ( &status, MPI_CHAR, &size ));
Thomas Jahns's avatar
Thomas Jahns committed
1128
	  buffer = xmalloc(size);
1129
1130
	  xmpi ( MPI_Recv ( buffer, size, MPI_PACKED, source,
                            tag, commCalc, &status ));
1131
          xdebug("%s", "RECEIVED MESSAGE WITH TAG \"RESOURCES\"");
1132
	  reshUnpackResources(buffer, size, &commCalc);
1133
          xdebug("%s", "");
Deike Kleberg's avatar
Deike Kleberg committed
1134
	  free ( buffer );
Thomas Jahns's avatar
Thomas Jahns committed
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
          {
            int rankGlob = commInqRankGlob();
            if ( ddebug > 0 && rankGlob == nProcsModel)
              {
                static const char baseName[] = "reshListIOServer.",
                  suffix[] = ".txt"