├── .gitignore ├── CONTRIBUTORS.md ├── sbatches ├── sherlock │ ├── jupyter.sbatch │ ├── py2-jupyter.sbatch │ ├── tensorboard.sbatch │ ├── py3-jupyter.sbatch │ ├── py3-tensorflow.sbatch │ ├── py2-tensorflow.sbatch │ ├── singularity-exec.sbatch │ ├── singularity-run.sbatch │ ├── jupyter-gpu.sbatch │ ├── r-jupyter.sbatch │ ├── r3.4-jupyter.sbatch │ ├── singularity-jupyter.sbatch │ ├── singularity-notebook.sbatch │ └── containershare-notebook.sbatch ├── singularity-run.sbatch ├── singularity-exec.sbatch └── farmshare │ ├── README.md │ ├── singularity-jupyterlab.sbatch │ └── singularity-jupyter.sbatch ├── hosts ├── sherlock_ssh.sh └── farmshare_ssh.sh ├── end.sh ├── resume.sh ├── LICENSE ├── CHANGELOG.md ├── start-node.sh ├── start.sh ├── examples └── recipe-sherlock-gpu.md ├── setup.sh ├── helpers.sh ├── CONTRIBUTING.md └── README.md /.gitignore: -------------------------------------------------------------------------------- 1 | params.sh 2 | -------------------------------------------------------------------------------- /CONTRIBUTORS.md: -------------------------------------------------------------------------------- 1 | # Project Lead: 2 | 3 | - Raphael Townshend <@raphtown> 4 | 5 | # Contributors: 6 | 7 | - Vanessa Sochat <@vsoch> 8 | 9 | -------------------------------------------------------------------------------- /sbatches/sherlock/jupyter.sbatch: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | PORT=$1 4 | NOTEBOOK_DIR=$2 5 | cd $NOTEBOOK_DIR 6 | 7 | module load py-jupyter/1.0.0_py27 8 | jupyter notebook --no-browser --port=$PORT 9 | -------------------------------------------------------------------------------- /sbatches/sherlock/py2-jupyter.sbatch: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | PORT=$1 4 | NOTEBOOK_DIR=$2 5 | cd $NOTEBOOK_DIR 6 | 7 | module load py-jupyter/1.0.0_py27 8 | jupyter notebook --no-browser --port=$PORT 9 | -------------------------------------------------------------------------------- /sbatches/sherlock/tensorboard.sbatch: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | PORT=$1 3 | TENSORBOARD_DIR=$2 4 | 5 | module load py-tensorflow/1.8.0_py27 6 | python -m tensorboard.main --logdir $TENSORBOARD_DIR --debug --port=$PORT 7 | -------------------------------------------------------------------------------- /sbatches/sherlock/py3-jupyter.sbatch: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | PORT=$1 4 | NOTEBOOK_DIR=${2:-${SCRATCH}} 5 | cd $NOTEBOOK_DIR 6 | 7 | module load py-jupyter/1.0.0_py36 8 | jupyter notebook --no-browser --port=$PORT 9 | -------------------------------------------------------------------------------- /sbatches/sherlock/py3-tensorflow.sbatch: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | PORT=$1 4 | NOTEBOOK_DIR=$2 5 | cd $NOTEBOOK_DIR 6 | 7 | module load py-jupyter/1.0.0_py36 8 | module load py-tensorflow/1.9.0_py36 9 | 10 | jupyter notebook --no-browser --port=$PORT 11 | -------------------------------------------------------------------------------- /sbatches/sherlock/py2-tensorflow.sbatch: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | PORT=$1 3 | NOTEBOOK_DIR=$2 4 | cd $NOTEBOOK_DIR 5 | 6 | module load protobuf/3.4.0 7 | module load py-jupyter/1.0.0_py27 8 | module load py-tensorflow/1.8.0_py27 9 | 10 | jupyter notebook --no-browser --port=$PORT 11 | -------------------------------------------------------------------------------- /hosts/sherlock_ssh.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Sherlock cluster at Stanford 4 | # Prints an ssh configuration for the user, selecting a login node at random 5 | # Sample usage: bash sherlock_ssh.sh 6 | echo 7 | read -p "Sherlock username > " FORWARD_USERNAME 8 | 9 | echo "Host sherlock 10 | User ${FORWARD_USERNAME} 11 | Hostname login.sherlock.stanford.edu 12 | GSSAPIDelegateCredentials yes 13 | GSSAPIAuthentication yes 14 | ControlMaster auto 15 | ControlPersist yes 16 | ControlPath ~/.ssh/%C" 17 | -------------------------------------------------------------------------------- /sbatches/singularity-run.sbatch: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # The singularity-run script will run a container. 4 | # To execute a command to a container, use singularity-exec 5 | 6 | CONTAINER=${1} 7 | shift 8 | 9 | if [ "$#" -gr 0 ]; then 10 | NOTEBOOK_DIR=${1} 11 | shift 1 12 | else 13 | if [ -d "${SCRATCH}" ] 14 | then 15 | NOTEBOOK_DIR=${SCRATCH} 16 | else 17 | NOTEBOOK_DIR=$HOME 18 | fi 19 | fi 20 | 21 | cd $NOTEBOOK_DIR 22 | 23 | # Script assumes Singularity is already available 24 | 25 | singularity run ${CONTAINER} "${@}" 26 | -------------------------------------------------------------------------------- /sbatches/sherlock/singularity-exec.sbatch: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # The singularity-run script will run a container, and it would use the 4 | # container's runscript as entrypoint. To execute a command, 5 | # use singularity-exec 6 | 7 | CONTAINER=${1} 8 | shift 9 | 10 | if [ "$#" -gr 0 ]; then 11 | NOTEBOOK_DIR=${1} 12 | shift 1 13 | else 14 | NOTEBOOK_DIR=${SCRATCH} 15 | fi 16 | 17 | cd $NOTEBOOK_DIR 18 | 19 | module use system 20 | module load singularity 21 | export SINGULARITY_CACHEDIR=$SCRATCH/.singularity 22 | singularity exec ${CONTAINER} "${@}" 23 | -------------------------------------------------------------------------------- /sbatches/sherlock/singularity-run.sbatch: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # The singularity-run script will run a container, and it would use the 4 | # container's runscript as entrypoint. To execute a command, 5 | # use singularity-exec 6 | 7 | CONTAINER=${1} 8 | shift 9 | 10 | if [ "$#" -gr 0 ]; then 11 | NOTEBOOK_DIR=${1} 12 | shift 1 13 | else 14 | NOTEBOOK_DIR=${SCRATCH} 15 | fi 16 | 17 | cd $NOTEBOOK_DIR 18 | 19 | module use system 20 | module load singularity 21 | export SINGULARITY_CACHEDIR=$SCRATCH/.singularity 22 | singularity run ${CONTAINER} "${@}" 23 | -------------------------------------------------------------------------------- /sbatches/singularity-exec.sbatch: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # The singularity-exec script will execute a command to a container. 4 | # To run a container, use singularity-run 5 | 6 | CONTAINER=${1} 7 | shift 8 | 9 | if [ "$#" -gr 0 ]; then 10 | NOTEBOOK_DIR=${1} 11 | shift 1 12 | else 13 | if [ -d "${SCRATCH}" ] 14 | then 15 | NOTEBOOK_DIR=${SCRATCH} 16 | else 17 | NOTEBOOK_DIR=$HOME 18 | fi 19 | fi 20 | 21 | cd $NOTEBOOK_DIR 22 | 23 | # Script assumes Singularity is already available 24 | 25 | singularity exec ${CONTAINER} "${@}" 26 | -------------------------------------------------------------------------------- /sbatches/sherlock/jupyter-gpu.sbatch: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | PORT=$1 4 | NOTEBOOK_DIR=$2 5 | if [ -z "$NOTEBOOK_DIR" ]; then 6 | cd $SCRATCH 7 | else 8 | cd $NOTEBOOK_DIR 9 | fi 10 | 11 | ## to compile libtorch C++ code, load these modules 12 | # module load gcc/7.3.0 13 | # module load gdb 14 | # module load cmake 15 | # export CC=$(which gcc) 16 | # export CXX=$(which g++) 17 | 18 | # please select correct cuda version match to the pytorch/tensorflow 19 | module load cuda/10.1.168 20 | module load cudnn/7.6.4 21 | module load nccl 22 | 23 | source activate base 24 | jupyter lab --no-browser --port=$PORT 25 | -------------------------------------------------------------------------------- /sbatches/sherlock/r-jupyter.sbatch: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | PORT=$1 4 | NOTEBOOK_DIR=$2 5 | cd $NOTEBOOK_DIR 6 | 7 | module load py-jupyter/1.0.0_py36 8 | module load R/3.5.1 9 | 10 | # Note to user: it's helpful to install these before running the script, and then 11 | # have the script just load (and not need to compile and install!) 12 | 13 | # Install devtools and IRkernel 14 | Rscript -e "install.packages('devtools', repos='http://cran.us.r-project.org');" 15 | Rscript -e "library('devtools'); devtools::install_github('IRkernel/IRkernel')" 16 | 17 | # register the kernel in the current R installation 18 | Rscript -e "IRkernel::installspec()" 19 | 20 | jupyter notebook --no-browser --port=$PORT 21 | -------------------------------------------------------------------------------- /hosts/farmshare_ssh.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Farmshare cluster at Stanford 4 | # Prints an ssh configuration for the user, selecting a login node at random 5 | # Sample usage: bash farmshare_ssh.sh 6 | echo 7 | read -p "Farmshare username > " FORWARD_USERNAME 8 | 9 | # The FarmShare login node is (as of 2018) rice.stanford.edu. That is a 10 | # load-balanced DNS. The use of ControlMaster will ensure that multiple 11 | # connections to rice.stanford.edu all go to the same host. 12 | 13 | echo "Host farmshare 14 | User ${FORWARD_USERNAME} 15 | Hostname rice.stanford.edu 16 | GSSAPIDelegateCredentials yes 17 | GSSAPIAuthentication yes 18 | ControlMaster auto 19 | ControlPersist yes 20 | ControlPath ~/.ssh/%C" 21 | -------------------------------------------------------------------------------- /end.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Starts a remote sbatch jobs and sets up correct port forwarding. 4 | # Sample usage: bash end.sh jupyter 5 | # bash end.sh tensorboard 6 | 7 | if [ ! -f params.sh ] 8 | then 9 | echo "Need to configure params before first run, run setup.sh!" 10 | exit 11 | fi 12 | source params.sh 13 | 14 | if [ "$#" -eq 0 ] 15 | then 16 | echo "Need to give name of sbatch job to kill!" 17 | exit 18 | fi 19 | 20 | NAME=$1 21 | 22 | echo "Killing $NAME slurm job on ${RESOURCE}" 23 | ssh ${RESOURCE} "squeue --name=$NAME --user=$FORWARD_USERNAME -o '%A' -h | xargs --no-run-if-empty /usr/bin/scancel" 24 | 25 | echo "Killing listeners on ${RESOURCE}" 26 | ssh ${RESOURCE} "${USE_LSOF} -i :$PORT -t | xargs --no-run-if-empty kill" 27 | -------------------------------------------------------------------------------- /sbatches/sherlock/r3.4-jupyter.sbatch: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | PORT=$1 4 | NOTEBOOK_DIR=$2 5 | cd $NOTEBOOK_DIR 6 | 7 | module load py-jupyter/1.0.0_py36 8 | module load R/3.4.0 9 | 10 | # You will need to set up a jupyter notebook password first, see the mini 11 | # tutorial at https://vsoch.github.io/lessons/sherlock-juputer-r/ 12 | # Note to user: it's helpful to install these before running the script, and then 13 | # have the script just load (and not need to compile and install!) 14 | 15 | # Install devtools and IRkernel 16 | Rscript -e "install.packages('devtools', repos='http://cran.us.r-project.org');" 17 | Rscript -e "library('devtools'); devtools::install_github('IRkernel/IRkernel')" 18 | 19 | # register the kernel in the current R installation 20 | Rscript -e "IRkernel::installspec(name = 'ir34', displayname = 'R 3.4')" 21 | 22 | jupyter notebook --no-browser --port=$PORT 23 | -------------------------------------------------------------------------------- /resume.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Resumes an already running remote sbatch job. 4 | # Sample usage: bash resume.sh 5 | 6 | if [ ! -f params.sh ] 7 | then 8 | echo "Need to configure params before first run, run setup.sh!" 9 | exit 10 | fi 11 | source params.sh 12 | 13 | NAME="${1}" 14 | 15 | # The user is required to specify port 16 | 17 | echo "ssh ${RESOURCE} squeue --name=$NAME --user=$FORWARD_USERNAME -o "%N" -h" 18 | MACHINE=`ssh ${RESOURCE} squeue --name=$NAME --user=$FORWARD_USERNAME -o "%N" -h` 19 | 20 | if $ISOLATEDCOMPUTENODE 21 | then 22 | echo "ssh -L $PORT:localhost:$PORT ${RESOURCE} ssh -L $PORT:localhost:$PORT -N $MACHINE &" 23 | ssh -L $PORT:localhost:$PORT ${RESOURCE} ssh -L $PORT:localhost:$PORT -N $MACHINE & 24 | else 25 | echo "ssh $DOMAINNAME -l $FORWARD_USERNAME -K -L $PORT:$MACHINE:$PORT -N &" 26 | ssh "$DOMAINNAME" -l $FORWARD_USERNAME -K -L $PORT:$MACHINE:$PORT -N & 27 | fi 28 | -------------------------------------------------------------------------------- /sbatches/farmshare/README.md: -------------------------------------------------------------------------------- 1 | # Farmshare 2 | 3 | 1. Go to your home directory in rice. Type `module load singularity`. 4 | 2. On Rice,while still in your home directory, type `singularity exec library://sohams/default/farmsharejupyter:latest jupyter notebook --generate-config` 5 | 3. Next type, `singularity exec library://sohams/default/farmsharejupyter:latest jupyter notebook password`. Choose a password and verify it. This will serve as the login password for the notebooks. 6 | 4. Follow the original tutorial to setup ssh, and fill out the params.sh file by running `bash setup.sh`. Choose a port that is higher than 32768 for the tunnel to work. 7 | 5. In order to start type `bash start_farmshare.sh singularity-jupyter` for classic notebook or `bash start_farmshare.sh singularity-jupyterlab` for Jupyter Lab. 8 | 6. During establishing the tunnel to the compute node, there will be a prompt for user password and duo factor authentication. 9 | 7. See where the notebook is running is at the end of the prompt and type in a browser (http://localhost:(your chosen port number)). The default location of the notebook will be at your scratch location - /farmshare/scratch/users/yourusername 10 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License 2 | 3 | Copyright (c) 2018-2021 Vanessa Sochat 4 | Copyright (c) 2018 Raphael Townshend 5 | 6 | Permission is hereby granted, free of charge, to any person obtaining a copy 7 | of this software and associated documentation files (the "Software"), to deal 8 | in the Software without restriction, including without limitation the rights 9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | copies of the Software, and to permit persons to whom the Software is 11 | furnished to do so, subject to the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be included in 14 | all copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 | THE SOFTWARE. 23 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # CHANGELOG 2 | 3 | This is a manually generated log to track changes to the repository. While 4 | Each section should include general headers such as **Implemented enhancements** 5 | and **Merged pull requests**. All closed issued and bug fixes should be 6 | represented by the pull requests that fixed them. Critical items to know are: 7 | 8 | - renamed commands 9 | - deprecated / removed commands 10 | - changed defaults 11 | - backward incompatible changes (recipe file format? image file format?) 12 | - migration guidance (how to convert images?) 13 | - changed behaviour (recipe sections work differently) 14 | 15 | ## [master](https://github.com/drorlab/forward/tree/master) (master) 16 | - R kernel in jupyter notebooks (sherlock) script [@vsoch 10-29-2018] 17 | - containershare notebooks script [@vsoch 8-6-2018] 18 | - adding support for multiple hosts [@vsoch 7-31-2018] 19 | - adding Singularity, tensorflow jupyter examples [@vsoch 7-22-2018] 20 | - adding CONTRIBUTORS, CONTRIBUTING, LICENSE, CHANGELOG [@vsoch 7-9-2018] 21 | - using exponential backoff to get node, to not spam login servers 22 | - adding initial check if session exists, and advising user to end or resume 23 | - Initial release and announcement to sherlock users list [@raphtown 7-8-2018] 24 | 25 | -------------------------------------------------------------------------------- /sbatches/farmshare/singularity-jupyterlab.sbatch: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Usage 4 | 5 | # 1. Default Jupyter notebook (with your scratch to work in) 6 | # $ bash start.sh singularity-jupyter 7 | 8 | # 2. Default Jupyter notebook with custom working directory 9 | # $ bash start.sh singularity-jupyter /farmshare/scratch/users/ 10 | 11 | # 3. Select your own jupyter container on Sherlock! 12 | # $ bash start.sh singularity-jupyter /farmshare/scratch/users/ /path/to/container 13 | 14 | # 4. Or any singularity container... 15 | # $ bash start.sh singularity /path/to/container 16 | 17 | PORT=$1 18 | NOTEBOOK_DIR=${2:-/farmshare/scratch/users/$USER} 19 | CONTAINER=${3:-library://sohams/default/farmsharejupyter:latest} 20 | 21 | export SINGULARITY_CACHEDIR=/farmshare/user_data/${USER}/.singularity 22 | echo "Container is ${CONTAINER}" 23 | echo "Notebook directory is ${NOTEBOOK_DIR}" 24 | cd ${NOTEBOOK_DIR} 25 | 26 | #Create .local folder for default modules, if doesn't exist 27 | if [ ! -d "${HOME}/.local" ]; 28 | then 29 | echo "Creating local python modules folder to map at ${HOME}/.local"; 30 | mkdir -p "${HOME}/.local"; 31 | fi 32 | 33 | . /etc/profile 34 | module load singularity/3.4.0 35 | singularity exec --home ${HOME} --bind ${HOME}/.local:/home/username/.local ${CONTAINER} jupyter-lab --no-browser --port=$PORT --ip 0.0.0.0 36 | -------------------------------------------------------------------------------- /sbatches/farmshare/singularity-jupyter.sbatch: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Usage 4 | 5 | # 1. Default Jupyter notebook (with your scratch to work in) 6 | # $ bash start.sh singularity-jupyter 7 | 8 | # 2. Default Jupyter notebook with custom working directory 9 | # $ bash start.sh singularity-jupyter /farmshare/scratch/users/ 10 | 11 | # 3. Select your own jupyter container on Sherlock! 12 | # $ bash start.sh singularity-jupyter /farmshare/scratch/users/ /path/to/container 13 | 14 | # 4. Or any singularity container... 15 | # $ bash start.sh singularity /path/to/container 16 | 17 | PORT=$1 18 | NOTEBOOK_DIR=${2:-/farmshare/scratch/users/$USER} 19 | CONTAINER=${3:-library://sohams/default/farmsharejupyter:latest} 20 | 21 | export SINGULARITY_CACHEDIR=/farmshare/user_data/${USER}/.singularity 22 | echo "Container is ${CONTAINER}" 23 | echo "Notebook directory is ${NOTEBOOK_DIR}" 24 | cd ${NOTEBOOK_DIR} 25 | 26 | # Create .local folder for default modules, if doesn't exist 27 | if [ ! -d "${HOME}/.local" ]; 28 | then 29 | echo "Creating local python modules folder to map at ${HOME}/.local"; 30 | mkdir -p "${HOME}/.local"; 31 | fi 32 | 33 | . /etc/profile 34 | module load singularity/3.4.0 35 | singularity exec --home ${HOME} --bind ${HOME}/.local:/home/username/.local ${CONTAINER} jupyter notebook --no-browser --port=$PORT --ip 0.0.0.0 36 | -------------------------------------------------------------------------------- /sbatches/sherlock/singularity-jupyter.sbatch: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Usage 4 | 5 | # 1. Default Jupyter notebook (with your scratch to work in) 6 | # $ bash start.sh singularity-jupyter 7 | 8 | # 2. Default Jupyter notebook with custom working directory 9 | # $ bash start.sh singularity-jupyter /scratch/users/ 10 | 11 | # 3. Select your own jupyter container on Sherlock! 12 | # $ bash start.sh singularity-jupyter /scratch/users/ /path/to/container 13 | 14 | # 4. Or any singularity container... 15 | # $ bash start.sh singularity /path/to/container 16 | 17 | PORT=$1 18 | NOTEBOOK_DIR=${2:-${SCRATCH}} 19 | CONTAINER=${3:-/scratch/users/vsochat/share/repo2docker.simg} 20 | 21 | export SINGULARITY_CACHEDIR="${SCRATCH}/.singularity" 22 | echo "Container is ${CONTAINER}" 23 | echo "Notebook directory is ${NOTEBOOK_DIR}" 24 | cd ${NOTEBOOK_DIR} 25 | 26 | # Create .local folder for default modules, if doesn't exist 27 | if [ ! -d "${HOME}/.local" ]; then 28 | echo "Creating local python modules folder to map at ${HOME}/.local"; 29 | mkdir -p "${HOME}/.local"; 30 | fi 31 | 32 | # If the container is from Docker Hub, pull it first 33 | if [[ "${CONTAINER}" == docker* ]]; then 34 | singularity pull --name $(basename ${CONTAINER}).sif "${CONTAINER}" 35 | CONTAINER=$(basename ${CONTAINER}).sif 36 | fi 37 | 38 | singularity exec --home ${HOME} --bind ${HOME}/.local:/home/username/.local ${CONTAINER} jupyter notebook --no-browser --port=$PORT --ip 0.0.0.0 39 | -------------------------------------------------------------------------------- /start-node.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Starts a remote sbatch jobs without port forwarding. 4 | # Sample usage: bash start-node.sh singularity docker://ubuntu 5 | 6 | if [ ! -f params.sh ] 7 | then 8 | echo "Need to configure params before first run, run setup.sh!" 9 | exit 10 | fi 11 | . params.sh 12 | 13 | if [ ! -f helpers.sh ] 14 | then 15 | echo "Cannot find helpers.sh script!" 16 | exit 17 | fi 18 | . helpers.sh 19 | 20 | if [ "$#" -eq 0 ] 21 | then 22 | echo "Need to give name of sbatch job to run!" 23 | exit 24 | fi 25 | 26 | NAME="${1:-}" 27 | 28 | # The user could request either /