This function builds a template for a Simple Linux Utility for Resource Management (SLURM) job script including array jobs. Check this blog post by John Muschelli to learn more about array jobs: https://hopstat.wordpress.com/2013/11/05/array-and-sequential-cluster-jobs/.
job_single(
name,
create_shell = FALSE,
partition = "shared",
memory = "10G",
cores = 1L,
time_limit = "1-00:00:00",
email = "ALL",
logdir = "logs",
task_num = NULL,
tc = 20,
command = "Rscript -e \"options(width = 120); sessioninfo::session_info()\"",
create_logdir = TRUE
)
A character(1)
vector giving either the name or path (relative
or absolute) to the shell script to create.
A logical(1)
vector specifying whether to create a
shell file for the script.
A character(1)
vector with the name of the SLURM
partition. Check how busy a given partition is by running
squeue -p [partition]
.
character(1): the amount of memory in total to request, as a number and unit accepted by the '--mem' SLURM parameter (e.g. '10G')
The number of cores to request. Note that the total memory
your job will request is cores
multiplied by memory
.
character(1): time limit specified in a format accepted by
the --time
parameter to sbatch
(e.g. "4:00:00"). Defaults to 1
day, following the JHPCE default (https://jhpce.jhu.edu/slurm/time-limits/).
The email reporting option for the email address ("BEGIN", "END", "FAIL", or "ALL")
The directory to contain the log, as an absolute or relative path.
The number of tasks for your job, which will make it into an
array job. If NULL
this is ignored.
If task_num
is specified, this option controls the number of
concurrent tasks.
An example command to start your script.
A logical(1)
vector specifying whether to create the
logdir
directory. Note that if logdir
doesn't exist and you submit your
job with sbatch
, it will immediately fail.
A character vector with the script contents. If create_shell
was
specified then it also creates the actual script.
For a given SLURM job that is currently running you can alter
the options using scontrol alter
.
Other shell-script creation and submission functions:
array_submit()
,
job_loop()
## A regular job, specified by name or path
job_single("jhpce_job", create_logdir = FALSE)
#> #!/bin/bash
#> #SBATCH -p shared
#> #SBATCH --mem=10G
#> #SBATCH --job-name=jhpce_job
#> #SBATCH -c 1
#> #SBATCH -t 1-00:00:00
#> #SBATCH -o logs/jhpce_job.txt
#> #SBATCH -e logs/jhpce_job.txt
#> #SBATCH --mail-type=ALL
#>
#> set -e
#>
#> echo "**** Job starts ****"
#> date
#>
#> echo "**** JHPCE info ****"
#> echo "User: ${USER}"
#> echo "Job id: ${SLURM_JOB_ID}"
#> echo "Job name: ${SLURM_JOB_NAME}"
#> echo "Node name: ${HOSTNAME}"
#> echo "Task id: ${SLURM_ARRAY_TASK_ID}"
#>
#> ## Load the R module
#> module load conda_R/4.4
#>
#> ## List current modules for reproducibility
#> module list
#>
#> ## Edit with your job command
#> Rscript -e "options(width = 120); sessioninfo::session_info()"
#>
#> echo "**** Job ends ****"
#> date
#>
#> ## This script was made using slurmjobs version 1.2.5
#> ## available from http://research.libd.org/slurmjobs/
#>
job_single("~/jhpce_job.sh", create_logdir = FALSE)
#> #!/bin/bash
#> #SBATCH -p shared
#> #SBATCH --mem=10G
#> #SBATCH --job-name=jhpce_job
#> #SBATCH -c 1
#> #SBATCH -t 1-00:00:00
#> #SBATCH -o logs/jhpce_job.txt
#> #SBATCH -e logs/jhpce_job.txt
#> #SBATCH --mail-type=ALL
#>
#> set -e
#>
#> echo "**** Job starts ****"
#> date
#>
#> echo "**** JHPCE info ****"
#> echo "User: ${USER}"
#> echo "Job id: ${SLURM_JOB_ID}"
#> echo "Job name: ${SLURM_JOB_NAME}"
#> echo "Node name: ${HOSTNAME}"
#> echo "Task id: ${SLURM_ARRAY_TASK_ID}"
#>
#> ## Load the R module
#> module load conda_R/4.4
#>
#> ## List current modules for reproducibility
#> module list
#>
#> ## Edit with your job command
#> Rscript -e "options(width = 120); sessioninfo::session_info()"
#>
#> echo "**** Job ends ****"
#> date
#>
#> ## This script was made using slurmjobs version 1.2.5
#> ## available from http://research.libd.org/slurmjobs/
#>
## A regular job with 10 cores on the 'imaginary' partition
job_single("jhpce_job",
cores = 10, partition = "imaginary",
create_logdir = FALSE
)
#> #!/bin/bash
#> #SBATCH -p imaginary
#> #SBATCH --mem=10G
#> #SBATCH --job-name=jhpce_job
#> #SBATCH -c 10
#> #SBATCH -t 1-00:00:00
#> #SBATCH -o logs/jhpce_job.txt
#> #SBATCH -e logs/jhpce_job.txt
#> #SBATCH --mail-type=ALL
#>
#> set -e
#>
#> echo "**** Job starts ****"
#> date
#>
#> echo "**** JHPCE info ****"
#> echo "User: ${USER}"
#> echo "Job id: ${SLURM_JOB_ID}"
#> echo "Job name: ${SLURM_JOB_NAME}"
#> echo "Node name: ${HOSTNAME}"
#> echo "Task id: ${SLURM_ARRAY_TASK_ID}"
#>
#> ## Load the R module
#> module load conda_R/4.4
#>
#> ## List current modules for reproducibility
#> module list
#>
#> ## Edit with your job command
#> Rscript -e "options(width = 120); sessioninfo::session_info()"
#>
#> echo "**** Job ends ****"
#> date
#>
#> ## This script was made using slurmjobs version 1.2.5
#> ## available from http://research.libd.org/slurmjobs/
#>
## An array job
job_single("jhpce_job_array", task_num = 20, create_logdir = FALSE)
#> #!/bin/bash
#> #SBATCH -p shared
#> #SBATCH --mem=10G
#> #SBATCH --job-name=jhpce_job_array
#> #SBATCH -c 1
#> #SBATCH -t 1-00:00:00
#> #SBATCH -o logs/jhpce_job_array.%a.txt
#> #SBATCH -e logs/jhpce_job_array.%a.txt
#> #SBATCH --mail-type=ALL
#> #SBATCH --array=1-20%20
#>
#> set -e
#>
#> echo "**** Job starts ****"
#> date
#>
#> echo "**** JHPCE info ****"
#> echo "User: ${USER}"
#> echo "Job id: ${SLURM_JOB_ID}"
#> echo "Job name: ${SLURM_JOB_NAME}"
#> echo "Node name: ${HOSTNAME}"
#> echo "Task id: ${SLURM_ARRAY_TASK_ID}"
#>
#> ## Load the R module
#> module load conda_R/4.4
#>
#> ## List current modules for reproducibility
#> module list
#>
#> ## Edit with your job command
#> Rscript -e "options(width = 120); sessioninfo::session_info()"
#>
#> echo "**** Job ends ****"
#> date
#>
#> ## This script was made using slurmjobs version 1.2.5
#> ## available from http://research.libd.org/slurmjobs/
#>