Skip to content
GitLab
Projects
Groups
Snippets
/
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
mpim-sw
libcdi
Commits
8938d4a3
Commit
8938d4a3
authored
Nov 19, 2013
by
Thomas Jahns
🤸
Browse files
Change API of common partition routine to that of PPM.
* And use that if available.
parent
5c11a77d
Changes
8
Hide whitespace changes
Inline
Side-by-side
configure
View file @
8938d4a3
...
...
@@ -645,6 +645,8 @@ FC_MOD_FLAG
CREATE_ISOC_FALSE
CREATE_ISOC_TRUE
USE_MPI
USE_PPM_CORE_FALSE
USE_PPM_CORE_TRUE
USE_MPI_FALSE
USE_MPI_TRUE
HAVE_PARALLEL_NC4
...
...
@@ -26758,6 +26760,7 @@ if test x"${enable_mpi}" = x"yes"; then :
USE_MPI=yes
fi
HAVE_PARALLEL_NC4=0
enable_ppm=no
...
...
@@ -27105,7 +27108,6 @@ fi
done
if test $HAVE_PARALLEL_NC4 = 1; then :
pkg_failed=no
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for PPM_CORE" >&5
...
...
@@ -27180,8 +27182,6 @@ $as_echo "yes" >&6; }
$as_echo "#define HAVE_PPM_CORE /**/" >>confdefs.h
fi
fi
if test x$enable_ppm != xyes; then :
HAVE_PARALLEL_NC4=0
...
...
@@ -27207,6 +27207,14 @@ else
USE_MPI_FALSE=
fi
if test $enable_ppm = yes; then
USE_PPM_CORE_TRUE=
USE_PPM_CORE_FALSE='#'
else
USE_PPM_CORE_TRUE='#'
USE_PPM_CORE_FALSE=
fi
# ----------------------------------------------------------------------
# Create the Fortran Interface via iso_c_binding module (Fortran 2003 Standard)
...
...
@@ -27834,6 +27842,10 @@ if test -z "${USE_MPI_TRUE}" && test -z "${USE_MPI_FALSE}"; then
as_fn_error $? "conditional \"USE_MPI\" was never defined.
Usually this means the macro was only invoked conditionally." "$LINENO" 5
fi
if test -z "${USE_PPM_CORE_TRUE}" && test -z "${USE_PPM_CORE_FALSE}"; then
as_fn_error $? "conditional \"USE_PPM_CORE\" was never defined.
Usually this means the macro was only invoked conditionally." "$LINENO" 5
fi
if test -z "${CREATE_ISOC_TRUE}" && test -z "${CREATE_ISOC_FALSE}"; then
as_fn_error $? "conditional \"CREATE_ISOC\" was never defined.
Usually this means the macro was only invoked conditionally." "$LINENO" 5
...
...
configure.ac
View file @
8938d4a3
...
...
@@ -113,6 +113,7 @@ AC_ARG_ENABLE(mpi,AS_HELP_STRING([--enable-mpi],[Compile with MPI compiler [defa
AS_IF([test x"${enable_mpi}" = x"yes"],
[USE_MPI=yes])
HAVE_PARALLEL_NC4=0
enable_ppm=no
AS_IF([test x"$USE_MPI" = xyes],
[AC_DEFINE([USE_MPI],[1],[parallel I/O requested and available])
AC_PATH_PROGS([MPI_LAUNCH],[mpirun mpiexec],[true])
...
...
@@ -176,14 +177,12 @@ main(int argc, char **argv)
])
dnl parallel netCDF support still requires ScalES-PPM and YAXT to
dnl re-arrange the data when running with more than one collector
AS_IF([test $HAVE_PARALLEL_NC4 = 1],
[PKG_CHECK_MODULES([PPM_CORE],[scales-ppm-core],
[enable_ppm=yes
AC_DEFINE([HAVE_PPM_CORE],,
[ScalES PPM C core library is available])
],
[enable_ppm=no])
])
PKG_CHECK_MODULES([PPM_CORE],[scales-ppm-core],
[enable_ppm=yes
AC_DEFINE([HAVE_PPM_CORE],,
[ScalES PPM C core library is available])
],
[enable_ppm=no])
dnl if not both scales-ppm and yaxt are available, netcdf can only be
dnl used in serial mode
AS_IF([test x$enable_ppm != xyes],
...
...
@@ -197,6 +196,7 @@ AS_IF([test $HAVE_PARALLEL_NC4 -gt 0],
[netCDF library does support MPI parallel invocations])])
AC_SUBST([HAVE_PARALLEL_NC4])
AM_CONDITIONAL([USE_MPI],[test x"$USE_MPI" = xyes])
AM_CONDITIONAL([USE_PPM_CORE],[test $enable_ppm = yes])
AC_SUBST([USE_MPI])
# ----------------------------------------------------------------------
# Create the Fortran Interface via iso_c_binding module (Fortran 2003 Standard)
...
...
tests/Makefile.am
View file @
8938d4a3
...
...
@@ -27,7 +27,7 @@ test_resource_copy_SOURCES = test_resource_copy.c \
$(top_srcdir)
/src/resource_unpack.c
test_resource_copy_mpi_SOURCES
=
test_resource_copy.c
#
AM_CFLAGS
=
$(YAXT_CFLAGS)
AM_CFLAGS
=
$(PPM_CORE_CFLAGS)
$(YAXT_CFLAGS)
if
USE_MPI
pio_write_LDADD
=
../src/libcdipio.la
pio_write_deco2d_LDADD
=
../src/libcdipio.la
...
...
tests/Makefile.in
View file @
8938d4a3
...
...
@@ -398,7 +398,7 @@ test_resource_copy_SOURCES = test_resource_copy.c \
test_resource_copy_mpi_SOURCES
=
test_resource_copy.c
#
AM_CFLAGS
=
$(YAXT_CFLAGS)
AM_CFLAGS
=
$(PPM_CORE_CFLAGS)
$(YAXT_CFLAGS)
@USE_MPI_FALSE@
pio_write_LDADD
=
$(LDADD)
@USE_MPI_TRUE@
pio_write_LDADD
=
../src/libcdipio.la
@USE_MPI_FALSE@
pio_write_deco2d_LDADD
=
$(LDADD)
...
...
tests/deco2d_model.c
View file @
8938d4a3
...
...
@@ -18,6 +18,9 @@ typedef int MPI_Comm;
#ifdef USE_MPI
#include
"cdipio.h"
#include
"pio_util.h"
#ifdef HAVE_PPM_CORE
#include
<ppm/ppm_uniform_partition.h>
#endif
#endif
#include
"cksum.h"
...
...
@@ -151,12 +154,11 @@ modelRun(struct model_config setup, MPI_Comm comm)
varDesc
[
varIdx
].
size
=
nlon
*
nlat
*
varDesc
[
varIdx
].
nlev
;
#ifdef USE_MPI
{
int
start
=
uniform_partition_start
((
int
[
2
]){
0
,
varDesc
[
varIdx
].
size
-
1
},
comm_size
,
rank
),
chunkSize
=
uniform_partition_start
((
int
[
2
]){
0
,
varDesc
[
varIdx
].
size
-
1
},
comm_size
,
rank
+
1
)
-
start
;
struct
PPM_extent
range
=
PPM_uniform_partition
((
struct
PPM_extent
){
0
,
(
int32_t
)
varDesc
[
varIdx
].
size
},
comm_size
,
rank
);
int
start
=
range
.
first
;
int
chunkSize
=
range
.
size
;
fprintf
(
stderr
,
"%d: start=%d, chunkSize = %d
\n
"
,
rank
,
start
,
chunkSize
);
Xt_idxlist
idxlist
...
...
tests/simple_model.c
View file @
8938d4a3
...
...
@@ -18,6 +18,9 @@ typedef int MPI_Comm;
#ifdef USE_MPI
#include
"cdipio.h"
#include
"pio_util.h"
#ifdef HAVE_PPM_CORE
#include
<ppm/ppm_uniform_partition.h>
#endif
#endif
#include
"cksum.h"
...
...
@@ -151,12 +154,11 @@ modelRun(struct model_config setup, MPI_Comm comm)
varDesc
[
varIdx
].
size
=
nlon
*
nlat
*
varDesc
[
varIdx
].
nlev
;
#ifdef USE_MPI
{
int
start
=
uniform_partition_start
((
int
[
2
]){
0
,
varDesc
[
varIdx
].
size
-
1
},
comm_size
,
rank
),
chunkSize
=
uniform_partition_start
((
int
[
2
]){
0
,
varDesc
[
varIdx
].
size
-
1
},
comm_size
,
rank
+
1
)
-
start
;
struct
PPM_extent
range
=
PPM_uniform_partition
((
struct
PPM_extent
){
0
,
(
int32_t
)
varDesc
[
varIdx
].
size
},
comm_size
,
rank
);
int
start
=
range
.
first
;
int
chunkSize
=
range
.
size
;
fprintf
(
stderr
,
"%d: start=%d, chunkSize = %d
\n
"
,
rank
,
start
,
chunkSize
);
Xt_idxlist
idxlist
...
...
tests/simple_model_helper.c
View file @
8938d4a3
...
...
@@ -75,14 +75,30 @@ time_t2cditime(time_t t, int *date, int *timeofday)
*
timeofday
=
t_s
->
tm_hour
*
10000
+
t_s
->
tm_min
*
100
+
t_s
->
tm_sec
;
}
#ifdef USE_MPI
int
uniform_partition_start
(
int
set_interval
[
2
],
int
nparts
,
int
part_idx
)
#if defined USE_MPI && ! defined HAVE_PPM_CORE
static
int32_t
uniform_partition_start
(
struct
PPM_extent
set_interval
,
int
nparts
,
int
part_idx
);
struct
PPM_extent
PPM_uniform_partition
(
struct
PPM_extent
set_interval
,
int
nparts
,
int
part_idx
)
{
struct
PPM_extent
range
;
range
.
first
=
uniform_partition_start
(
set_interval
,
nparts
,
part_idx
);
range
.
size
=
uniform_partition_start
(
set_interval
,
nparts
,
part_idx
+
1
)
-
range
.
first
;
return
range
;
}
static
int32_t
uniform_partition_start
(
struct
PPM_extent
set_interval
,
int
nparts
,
int
part_idx
)
{
int
part_offset
=
(((
long
long
)
set_interval
[
1
]
-
(
long
long
)
set_interval
[
0
]
+
1LL
)
*
(
long
long
)
part_idx
)
/
(
long
long
)
nparts
;
int
start
=
set_interval
[
0
]
+
part_offset
;
int32_t
part_offset
=
((
int64_t
)
set_interval
.
size
*
(
int64_t
)
part_idx
)
/
(
int64_t
)
nparts
;
int32_t
start
=
set_interval
.
first
+
part_offset
;
return
start
;
}
#endif
...
...
tests/simple_model_helper.h
View file @
8938d4a3
...
...
@@ -5,6 +5,7 @@
# include "config.h"
#endif
#include
<inttypes.h>
#include
<time.h>
void
...
...
@@ -23,9 +24,16 @@ cditime2time_t(int date, int timeofday);
void
time_t2cditime
(
time_t
t
,
int
*
date
,
int
*
timeofday
);
#ifdef USE_MPI
int
uniform_partition_start
(
int
set_interval
[
2
],
int
nparts
,
int
part_idx
);
#if defined (USE_MPI) && ! defined(HAVE_PPM_CORE)
struct
PPM_extent
{
int32_t
first
,
size
;
};
struct
PPM_extent
PPM_uniform_partition
(
struct
PPM_extent
set_interval
,
int
nparts
,
int
part_idx
);
#endif
#endif
Write
Preview
Supports
Markdown
0%
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment