This function builds a template for a Simple Linux Utility for Resource Management (SLURM) job script including array jobs. Check this blog post by John Muschelli to learn more about array jobs: https://hopstat.wordpress.com/2013/11/05/array-and-sequential-cluster-jobs/.

job_single(
  name,
  create_shell = FALSE,
  partition = "shared",
  memory = "10G",
  cores = 1L,
  time_limit = "1-00:00:00",
  email = "ALL",
  logdir = "logs",
  task_num = NULL,
  tc = 20,
  command = "Rscript -e \"options(width = 120); sessioninfo::session_info()\"",
  create_logdir = TRUE
)

Arguments

name

A character(1) vector giving either the name or path (relative or absolute) to the shell script to create.

create_shell

A logical(1) vector specifying whether to create a shell file for the script.

partition

A character(1) vector with the name of the SLURM partition. Check how busy a given partition is by running squeue -p [partition].

memory

character(1): the amount of memory in total to request, as a number and unit accepted by the '--mem' SLURM parameter (e.g. '10G')

cores

The number of cores to request. Note that the total memory your job will request is cores multiplied by memory.

time_limit

character(1): time limit specified in a format accepted by the --time parameter to sbatch (e.g. "4:00:00"). Defaults to 1 day, following the JHPCE default (https://jhpce.jhu.edu/slurm/time-limits/).

email

The email reporting option for the email address ("BEGIN", "END", "FAIL", or "ALL")

logdir

The directory to contain the log, as an absolute or relative path.

task_num

The number of tasks for your job, which will make it into an array job. If NULL this is ignored.

tc

If task_num is specified, this option controls the number of concurrent tasks.

command

An example command to start your script.

create_logdir

A logical(1) vector specifying whether to create the logdir directory. Note that if logdir doesn't exist and you submit your job with sbatch, it will immediately fail.

Value

A character vector with the script contents. If create_shell was specified then it also creates the actual script.

Details

For a given SLURM job that is currently running you can alter the options using scontrol alter.

See also

Other shell-script creation and submission functions: array_submit(), job_loop()

Author

Leonardo Collado-Torres

Nicholas J. Eagles

Examples


## A regular job, specified by name or path
job_single("jhpce_job", create_logdir = FALSE)
#> #!/bin/bash
#> #SBATCH -p shared
#> #SBATCH --mem=10G
#> #SBATCH --job-name=jhpce_job
#> #SBATCH -c 1
#> #SBATCH -t 1-00:00:00
#> #SBATCH -o logs/jhpce_job.txt
#> #SBATCH -e logs/jhpce_job.txt
#> #SBATCH --mail-type=ALL
#> 
#> set -e
#> 
#> echo "**** Job starts ****"
#> date
#> 
#> echo "**** JHPCE info ****"
#> echo "User: ${USER}"
#> echo "Job id: ${SLURM_JOB_ID}"
#> echo "Job name: ${SLURM_JOB_NAME}"
#> echo "Node name: ${HOSTNAME}"
#> echo "Task id: ${SLURM_ARRAY_TASK_ID}"
#> 
#> ## Load the R module
#> module load conda_R/4.4
#> 
#> ## List current modules for reproducibility
#> module list
#> 
#> ## Edit with your job command
#> Rscript -e "options(width = 120); sessioninfo::session_info()"
#> 
#> echo "**** Job ends ****"
#> date
#> 
#> ## This script was made using slurmjobs version 1.2.5
#> ## available from http://research.libd.org/slurmjobs/
#> 
job_single("~/jhpce_job.sh", create_logdir = FALSE)
#> #!/bin/bash
#> #SBATCH -p shared
#> #SBATCH --mem=10G
#> #SBATCH --job-name=jhpce_job
#> #SBATCH -c 1
#> #SBATCH -t 1-00:00:00
#> #SBATCH -o logs/jhpce_job.txt
#> #SBATCH -e logs/jhpce_job.txt
#> #SBATCH --mail-type=ALL
#> 
#> set -e
#> 
#> echo "**** Job starts ****"
#> date
#> 
#> echo "**** JHPCE info ****"
#> echo "User: ${USER}"
#> echo "Job id: ${SLURM_JOB_ID}"
#> echo "Job name: ${SLURM_JOB_NAME}"
#> echo "Node name: ${HOSTNAME}"
#> echo "Task id: ${SLURM_ARRAY_TASK_ID}"
#> 
#> ## Load the R module
#> module load conda_R/4.4
#> 
#> ## List current modules for reproducibility
#> module list
#> 
#> ## Edit with your job command
#> Rscript -e "options(width = 120); sessioninfo::session_info()"
#> 
#> echo "**** Job ends ****"
#> date
#> 
#> ## This script was made using slurmjobs version 1.2.5
#> ## available from http://research.libd.org/slurmjobs/
#> 

## A regular job with 10 cores on the 'imaginary' partition
job_single("jhpce_job",
    cores = 10, partition = "imaginary",
    create_logdir = FALSE
)
#> #!/bin/bash
#> #SBATCH -p imaginary
#> #SBATCH --mem=10G
#> #SBATCH --job-name=jhpce_job
#> #SBATCH -c 10
#> #SBATCH -t 1-00:00:00
#> #SBATCH -o logs/jhpce_job.txt
#> #SBATCH -e logs/jhpce_job.txt
#> #SBATCH --mail-type=ALL
#> 
#> set -e
#> 
#> echo "**** Job starts ****"
#> date
#> 
#> echo "**** JHPCE info ****"
#> echo "User: ${USER}"
#> echo "Job id: ${SLURM_JOB_ID}"
#> echo "Job name: ${SLURM_JOB_NAME}"
#> echo "Node name: ${HOSTNAME}"
#> echo "Task id: ${SLURM_ARRAY_TASK_ID}"
#> 
#> ## Load the R module
#> module load conda_R/4.4
#> 
#> ## List current modules for reproducibility
#> module list
#> 
#> ## Edit with your job command
#> Rscript -e "options(width = 120); sessioninfo::session_info()"
#> 
#> echo "**** Job ends ****"
#> date
#> 
#> ## This script was made using slurmjobs version 1.2.5
#> ## available from http://research.libd.org/slurmjobs/
#> 

## An array job
job_single("jhpce_job_array", task_num = 20, create_logdir = FALSE)
#> #!/bin/bash
#> #SBATCH -p shared
#> #SBATCH --mem=10G
#> #SBATCH --job-name=jhpce_job_array
#> #SBATCH -c 1
#> #SBATCH -t 1-00:00:00
#> #SBATCH -o logs/jhpce_job_array.%a.txt
#> #SBATCH -e logs/jhpce_job_array.%a.txt
#> #SBATCH --mail-type=ALL
#> #SBATCH --array=1-20%20
#> 
#> set -e
#> 
#> echo "**** Job starts ****"
#> date
#> 
#> echo "**** JHPCE info ****"
#> echo "User: ${USER}"
#> echo "Job id: ${SLURM_JOB_ID}"
#> echo "Job name: ${SLURM_JOB_NAME}"
#> echo "Node name: ${HOSTNAME}"
#> echo "Task id: ${SLURM_ARRAY_TASK_ID}"
#> 
#> ## Load the R module
#> module load conda_R/4.4
#> 
#> ## List current modules for reproducibility
#> module list
#> 
#> ## Edit with your job command
#> Rscript -e "options(width = 120); sessioninfo::session_info()"
#> 
#> echo "**** Job ends ****"
#> date
#> 
#> ## This script was made using slurmjobs version 1.2.5
#> ## available from http://research.libd.org/slurmjobs/
#>