Unverified Commit 51136251 authored by Dion Häfner's avatar Dion Häfner Committed by GitHub
Browse files

Merge pull request #73 from team-ocean/no-mpiexec

Allow MPI runs without mpiexec
parents f839bddb 794e2067
......@@ -186,6 +186,10 @@ Running Veros through MPI requires some addititonal dependencies:
After you have installed everything, you can start Veros on multiple processes like so:::
$ python my_setup.py -n 2 2
Or, by explicitly using mpirun (might be required on some architectures)::
$ mpirun -n 4 python my_setup.py -n 2 2
In this case, Veros would run on 4 processes, each process computing one-quarter of the domain. The arguments of the `-n` flag specify the number of chunks in x and y-direction, respectively.
......@@ -226,3 +226,18 @@ def test_acc(backend):
@pytest.mark.skipif(ON_GPU, reason='Cannot run MPI and OpenCL')
def test_acc_nompirun(backend):
from veros.setup.acc import acc
'-m', 'mpi4py',
'-n', '2', '2',
'-b', backend,
'-s' 'diskless_mode', '1',
'-s', 'runlen', '864000'
], stderr=subprocess.STDOUT)
import functools
import sys
import time
import click
......@@ -74,18 +76,52 @@ def cli(run):
@click.option('-p', '--profile-mode', is_flag=True, default=False, type=click.BOOL, envvar='VEROS_PROFILE',
help='Write a performance profile for debugging (default: false)')
@click.option('-n', '--num-proc', nargs=2, default=[1, 1], type=click.INT,
help='Number of processes in x and y dimension (requires execution via mpirun)')
help='Number of processes in x and y dimension')
@click.option('--slave', default=False, is_flag=True, hidden=True,
help='Indicates that this process is an MPI worker (for internal use)')
def wrapped(*args, **kwargs):
from veros import runtime_settings
def wrapped(*args, slave, **kwargs):
from veros import runtime_settings, runtime_state
total_proc = kwargs['num_proc'][0] * kwargs['num_proc'][1]
if total_proc > 1 and runtime_state.proc_num == 1 and not slave:
from mpi4py import MPI
comm = MPI.COMM_SELF.Spawn(
args=['-m', 'mpi4py'] + list(sys.argv) + ['--slave'],
futures = [comm.irecv(source=p) for p in range(total_proc)]
while True:
done, success = zip(*(f.test() for f in futures))
if any(s is False for s in success):
raise RuntimeError('An MPI worker encountered an error')
if all(done):
kwargs['override'] = dict(kwargs['override'])
for setting in ('backend', 'profile_mode', 'num_proc', 'loglevel'):
if setting not in kwargs:
setattr(runtime_settings, setting, kwargs.pop(setting))
run(*args, **kwargs)
run(*args, **kwargs)
except: # noqa: E722
status = False
status = True
if slave:
runtime_settings.mpi_comm.Get_parent().send(status, dest=0)
return wrapped
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment