Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • k202009/ssh_scripts
  • k204213/ssh_scripts
2 results
Show changes
Commits on Source (6)
...@@ -6,4 +6,12 @@ tunnel set-up, and start the client application on your local system. ...@@ -6,4 +6,12 @@ tunnel set-up, and start the client application on your local system.
As bash scripts they run natively on Linux, MacOS, and other Unix-like As bash scripts they run natively on Linux, MacOS, and other Unix-like
systems. After installing WSL (Windows Subsystem for Linux), they also run systems. After installing WSL (Windows Subsystem for Linux), they also run
on Microsoft Windows 10. on Microsoft Windows 10.
\ No newline at end of file
## start-jupyter
The script is being modified to allow further features:
* connecting to different DKRZ systems: mistral, vader, levante (?)
* start jupyter notebooks in singularity containers
...@@ -43,6 +43,8 @@ ...@@ -43,6 +43,8 @@
# #
# In case of problems contact Mathis Rosenhauer <rosenhauer@dkrz.de>. # In case of problems contact Mathis Rosenhauer <rosenhauer@dkrz.de>.
# #
# 2020, Oct. 7, S. Bendoukha, added support for singularity containers.
# 2021, Apr. 14, S. Bendoukha, added option to specify frontend.
set -eufo pipefail set -eufo pipefail
...@@ -91,8 +93,18 @@ SJ_INCFILE="" ...@@ -91,8 +93,18 @@ SJ_INCFILE=""
# #
# Must be directly accessible from client. The frontend and the node # Must be directly accessible from client. The frontend and the node
# where jupyter is running need a shared home file system. # where jupyter is running need a shared home file system.
readonly SJ_FRONTEND_HOST="mistralpp.dkrz.de" SJ_FRONTEND_HOST="mistralpp.dkrz.de"
# Image
#
# start the jupyter notebook in a singularity container from a given
# image name.
SJ_IMAGE=""
# Slurm Options
# additional slurm options that users
# can pass to the sbatch script
SJ_SLURM_OPTIONS=""
function clean_up () { function clean_up () {
trap - ERR EXIT trap - ERR EXIT
...@@ -131,18 +143,22 @@ Available values for OPTION include: ...@@ -131,18 +143,22 @@ Available values for OPTION include:
-A acctcode start a job with acctcode -A acctcode start a job with acctcode
-c command invoke jupyter with command -c command invoke jupyter with command
-d check for presence of jupyter -d check for presence of jupyter
-f system mistral, vader
-i file source file prior to running jupyter -i file source file prior to running jupyter
-o options slurm options
-n ntasks request ntasks tasks for job -n ntasks request ntasks tasks for job
-p partition run job on partition -p partition run job on partition
-s image singularity image
-t time job runtime -t time job runtime
-u username use username for login -u username use username for login
EOF EOF
} }
function parse_options () { function parse_options () {
local option local option
while getopts 'A:c:di:n:p:t:u:' option; do while getopts 'A:c:di:o:f:n:p:t:u:s:' option; do
case ${option} in case ${option} in
A) SJ_ACCTCODE="$OPTARG" A) SJ_ACCTCODE="$OPTARG"
;; ;;
...@@ -150,12 +166,18 @@ function parse_options () { ...@@ -150,12 +166,18 @@ function parse_options () {
;; ;;
d) SJ_DEBUG=1 d) SJ_DEBUG=1
;; ;;
f) SJ_FRONTEND_HOST="$OPTARG"
;;
i) SJ_INCFILE="$OPTARG" i) SJ_INCFILE="$OPTARG"
;; ;;
o) SJ_SLURM_OPTIONS="$OPTARG"
;;
n) SJ_NTASKS="$OPTARG" n) SJ_NTASKS="$OPTARG"
;; ;;
p) SJ_PARTITION="$OPTARG" p) SJ_PARTITION="$OPTARG"
;; ;;
s) SJ_IMAGE="$OPTARG"
;;
t) SJ_RUNTIME="$OPTARG" t) SJ_RUNTIME="$OPTARG"
;; ;;
u) SJ_USERNAME="$OPTARG" u) SJ_USERNAME="$OPTARG"
...@@ -173,6 +195,9 @@ function parse_options () { ...@@ -173,6 +195,9 @@ function parse_options () {
readonly SJ_PARTITION readonly SJ_PARTITION
readonly SJ_RUNTIME readonly SJ_RUNTIME
readonly SJ_USERNAME readonly SJ_USERNAME
readonly SJ_IMAGE
readonly SJ_FRONTEND_HOST
readonly SJ_SLURM_OPTIONS
} }
function ssh_frontend () { function ssh_frontend () {
...@@ -196,7 +221,7 @@ function source_incfile() { ...@@ -196,7 +221,7 @@ function source_incfile() {
fi fi
commandline="source ${incfile}; ${commandline}" commandline="source ${incfile}; ${commandline}"
else else
commandline="module load python3/unstable; ${commandline}" commandline="module use /sw/spack-rhel6/spack/modules/linux-rhel6-haswell/; module load python3/unstable; ${commandline}"
fi fi
echo "${commandline}" echo "${commandline}"
} }
...@@ -206,16 +231,19 @@ function which_jupyter() { ...@@ -206,16 +231,19 @@ function which_jupyter() {
local which local which
which="$(source_incfile "which jupyter")" which="$(source_incfile "which jupyter")"
ssh_frontend "/bin/bash -lc \"${which}\"" ssh_frontend "/bin/bash -lc \"${which}\""
} }
function assemble_commandline () { function assemble_commandline () {
local logfile="$1" local logfile="$1"
local commandline="jupyter ${SJ_COMMAND} --no-browser 2>> ${logfile}" local commandline="jupyter ${SJ_COMMAND} --no-browser 2>> ${logfile}"
# If we are not running a job, we have to perform our own scheduling # If we are not running a job, we have to perform our own scheduling
if [[ -z ${SJ_ACCTCODE} ]]; then if [[ -z ${SJ_ACCTCODE} ]]; then
commandline="nohup ${commandline} > /dev/null & echo \$!" commandline="nohup ${commandline} > /dev/null & echo \$!"
fi fi
if [[ -n ${SJ_IMAGE} ]]; then
commandline="singularity exec --nv ${SJ_IMAGE} ${commandline}"
fi
commandline="$(source_incfile "${commandline}")" commandline="$(source_incfile "${commandline}")"
echo "${commandline}" echo "${commandline}"
} }
...@@ -233,7 +261,8 @@ function submit_jupyter_job () { ...@@ -233,7 +261,8 @@ function submit_jupyter_job () {
#SBATCH --account=${SJ_ACCTCODE} #SBATCH --account=${SJ_ACCTCODE}
#SBATCH --output=/dev/null #SBATCH --output=/dev/null
#SBATCH --parsable #SBATCH --parsable
#SBATCH --dkrzepilog=0 #SBATCH ${SJ_SLURM_OPTIONS}
##SBATCH --dkrzepilog=0
cd \${HOME} cd \${HOME}
echo "NODE:\${SLURM_JOB_NODELIST}" > ${logfile} echo "NODE:\${SLURM_JOB_NODELIST}" > ${logfile}
${commandline} ${commandline}
......