Unverified Commit 51136251 authored by Dion Häfner's avatar Dion Häfner Committed by GitHub

Merge pull request #73 from team-ocean/no-mpiexec

Allow MPI runs without mpiexec
parents f839bddb 794e2067
...@@ -186,6 +186,10 @@ Running Veros through MPI requires some addititonal dependencies: ...@@ -186,6 +186,10 @@ Running Veros through MPI requires some addititonal dependencies:
After you have installed everything, you can start Veros on multiple processes like so::: After you have installed everything, you can start Veros on multiple processes like so:::
$ python my_setup.py -n 2 2
Or, by explicitly using mpirun (might be required on some architectures)::
$ mpirun -n 4 python my_setup.py -n 2 2 $ mpirun -n 4 python my_setup.py -n 2 2
In this case, Veros would run on 4 processes, each process computing one-quarter of the domain. The arguments of the `-n` flag specify the number of chunks in x and y-direction, respectively. In this case, Veros would run on 4 processes, each process computing one-quarter of the domain. The arguments of the `-n` flag specify the number of chunks in x and y-direction, respectively.
......
...@@ -226,3 +226,18 @@ def test_acc(backend): ...@@ -226,3 +226,18 @@ def test_acc(backend):
)) ))
run_dist_kernel(test_kernel) run_dist_kernel(test_kernel)
@pytest.mark.skipif(ON_GPU, reason='Cannot run MPI and OpenCL')
def test_acc_nompirun(backend):
from veros.setup.acc import acc
subprocess.check_call([
sys.executable,
'-m', 'mpi4py',
acc.__file__,
'-n', '2', '2',
'-b', backend,
'-s' 'diskless_mode', '1',
'-s', 'runlen', '864000'
], stderr=subprocess.STDOUT)
import functools import functools
import sys
import time
import click import click
...@@ -74,18 +76,52 @@ def cli(run): ...@@ -74,18 +76,52 @@ def cli(run):
@click.option('-p', '--profile-mode', is_flag=True, default=False, type=click.BOOL, envvar='VEROS_PROFILE', @click.option('-p', '--profile-mode', is_flag=True, default=False, type=click.BOOL, envvar='VEROS_PROFILE',
help='Write a performance profile for debugging (default: false)') help='Write a performance profile for debugging (default: false)')
@click.option('-n', '--num-proc', nargs=2, default=[1, 1], type=click.INT, @click.option('-n', '--num-proc', nargs=2, default=[1, 1], type=click.INT,
help='Number of processes in x and y dimension (requires execution via mpirun)') help='Number of processes in x and y dimension')
@click.option('--slave', default=False, is_flag=True, hidden=True,
help='Indicates that this process is an MPI worker (for internal use)')
@functools.wraps(run) @functools.wraps(run)
def wrapped(*args, **kwargs): def wrapped(*args, slave, **kwargs):
from veros import runtime_settings from veros import runtime_settings, runtime_state
total_proc = kwargs['num_proc'][0] * kwargs['num_proc'][1]
if total_proc > 1 and runtime_state.proc_num == 1 and not slave:
from mpi4py import MPI
comm = MPI.COMM_SELF.Spawn(
sys.executable,
args=['-m', 'mpi4py'] + list(sys.argv) + ['--slave'],
maxprocs=total_proc
)
futures = [comm.irecv(source=p) for p in range(total_proc)]
while True:
done, success = zip(*(f.test() for f in futures))
if any(s is False for s in success):
raise RuntimeError('An MPI worker encountered an error')
if all(done):
break
time.sleep(0.1)
return
kwargs['override'] = dict(kwargs['override']) kwargs['override'] = dict(kwargs['override'])
for setting in ('backend', 'profile_mode', 'num_proc', 'loglevel'): for setting in ('backend', 'profile_mode', 'num_proc', 'loglevel'):
if setting not in kwargs:
continue
setattr(runtime_settings, setting, kwargs.pop(setting)) setattr(runtime_settings, setting, kwargs.pop(setting))
run(*args, **kwargs) try:
run(*args, **kwargs)
except: # noqa: E722
status = False
raise
else:
status = True
finally:
if slave:
runtime_settings.mpi_comm.Get_parent().send(status, dest=0)
return wrapped return wrapped
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment