Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • k202009/ssh_scripts
  • k204213/ssh_scripts
2 results
Show changes
Commits on Source (6)
......@@ -6,4 +6,12 @@ tunnel set-up, and start the client application on your local system.
As bash scripts they run natively on Linux, MacOS, and other Unix-like
systems. After installing WSL (Windows Subsystem for Linux), they also run
on Microsoft Windows 10.
\ No newline at end of file
on Microsoft Windows 10.
## start-jupyter
The script is being modified to allow further features:
* connecting to different DKRZ systems: mistral, vader, levante (?)
* start jupyter notebooks in singularity containers
......@@ -43,6 +43,8 @@
#
# In case of problems contact Mathis Rosenhauer <rosenhauer@dkrz.de>.
#
# 2020, Oct. 7, S. Bendoukha, added support for singularity containers.
# 2021, Apr. 14, S. Bendoukha, added option to specify frontend.
set -eufo pipefail
......@@ -91,8 +93,18 @@ SJ_INCFILE=""
#
# Must be directly accessible from client. The frontend and the node
# where jupyter is running need a shared home file system.
readonly SJ_FRONTEND_HOST="mistralpp.dkrz.de"
SJ_FRONTEND_HOST="mistralpp.dkrz.de"
# Image
#
# start the jupyter notebook in a singularity container from a given
# image name.
SJ_IMAGE=""
# Slurm Options
# additional slurm options that users
# can pass to the sbatch script
SJ_SLURM_OPTIONS=""
function clean_up () {
trap - ERR EXIT
......@@ -131,18 +143,22 @@ Available values for OPTION include:
-A acctcode start a job with acctcode
-c command invoke jupyter with command
-d check for presence of jupyter
-f system mistral, vader
-i file source file prior to running jupyter
-o options slurm options
-n ntasks request ntasks tasks for job
-p partition run job on partition
-s image singularity image
-t time job runtime
-u username use username for login
EOF
}
function parse_options () {
local option
while getopts 'A:c:di:n:p:t:u:' option; do
while getopts 'A:c:di:o:f:n:p:t:u:s:' option; do
case ${option} in
A) SJ_ACCTCODE="$OPTARG"
;;
......@@ -150,12 +166,18 @@ function parse_options () {
;;
d) SJ_DEBUG=1
;;
f) SJ_FRONTEND_HOST="$OPTARG"
;;
i) SJ_INCFILE="$OPTARG"
;;
o) SJ_SLURM_OPTIONS="$OPTARG"
;;
n) SJ_NTASKS="$OPTARG"
;;
p) SJ_PARTITION="$OPTARG"
;;
s) SJ_IMAGE="$OPTARG"
;;
t) SJ_RUNTIME="$OPTARG"
;;
u) SJ_USERNAME="$OPTARG"
......@@ -173,6 +195,9 @@ function parse_options () {
readonly SJ_PARTITION
readonly SJ_RUNTIME
readonly SJ_USERNAME
readonly SJ_IMAGE
readonly SJ_FRONTEND_HOST
readonly SJ_SLURM_OPTIONS
}
function ssh_frontend () {
......@@ -196,7 +221,7 @@ function source_incfile() {
fi
commandline="source ${incfile}; ${commandline}"
else
commandline="module load python3/unstable; ${commandline}"
commandline="module use /sw/spack-rhel6/spack/modules/linux-rhel6-haswell/; module load python3/unstable; ${commandline}"
fi
echo "${commandline}"
}
......@@ -206,16 +231,19 @@ function which_jupyter() {
local which
which="$(source_incfile "which jupyter")"
ssh_frontend "/bin/bash -lc \"${which}\""
}
}
function assemble_commandline () {
local logfile="$1"
local commandline="jupyter ${SJ_COMMAND} --no-browser 2>> ${logfile}"
local commandline="jupyter ${SJ_COMMAND} --no-browser 2>> ${logfile}"
# If we are not running a job, we have to perform our own scheduling
if [[ -z ${SJ_ACCTCODE} ]]; then
commandline="nohup ${commandline} > /dev/null & echo \$!"
fi
if [[ -n ${SJ_IMAGE} ]]; then
commandline="singularity exec --nv ${SJ_IMAGE} ${commandline}"
fi
commandline="$(source_incfile "${commandline}")"
echo "${commandline}"
}
......@@ -233,7 +261,8 @@ function submit_jupyter_job () {
#SBATCH --account=${SJ_ACCTCODE}
#SBATCH --output=/dev/null
#SBATCH --parsable
#SBATCH --dkrzepilog=0
#SBATCH ${SJ_SLURM_OPTIONS}
##SBATCH --dkrzepilog=0
cd \${HOME}
echo "NODE:\${SLURM_JOB_NODELIST}" > ${logfile}
${commandline}
......