Follow the following three steps to create a valid submission script for the targetted cluster.
- Step 1: fill in the form in the left-most box with all relevant information, specify the parallalization mechanism, and list the needed resources.
- Step 2: Based on that information, the list of clusters in the middle box will be automatically adapted to the clusters that can offer the needed resources. Choose the one you intend to use.
- Step 3: Copy-paste the script that is shown in the right-most box to the intended cluster, and adapt it with possibly more Slurm options, and more importantly, with the actual program you want to run. Then you are ready to submit the job.
1. Describe your job
Email address:
Job name:
Project:
Output file:
'$' not allowed.
'$' not allowed.
Parallelization paradigm(s)
Job resources
Number of jobs in the array:Duration : days, hours, minutes.
Number of processes:
Number of threads per process:
Memory per thread per process:
Number of GPUs:
Process distribution:
Number of nodes:
Number of nodes:
Filesystem
Filesystem:
Total CPUs: |
Total Memory: MB |
Total CPU.Hours:
| Total GPUs:
| Total BUhours:
2. Choose a cluster
* |
3. Copy-paste your script
#!/bin/bash
# Submission script for
#PBS -N
#PBS -q large
#PBS -r y
#PBS -q main
#PBS -r y
#PBS -W group_list=
#PBS -J 1-
#
#PBS -l walltime=:::00
#PBS -l select=
#PBS -l select=:ncpus=:ompthreads=:mem=mb
#PBS -l select=:ncpus=:mpiprocs=:ompthreads=:mem=mb
#PBS -l place=packed
#PBS -l place=scatter
#PBS -l place=scatter
#
#PBS -M
#PBS -m abe
exec > ${PBS_O_WORKDIR}/${PBS_JOBNAME}_${PBS_JOBID}.log
echo "------------------ Work dir --------------------"
cd ${PBS_O_WORKDIR} && echo ${PBS_O_WORKDIR}
echo "------------------ Job Info --------------------"
echo "Task ID: $PBS_ARRAYID"
echo "jobid : $PBS_JOBID"
echo "jobname : $PBS_JOBNAME"
echo "job type : $PBS_ENVIRONMENT"
echo "submit dir : $PBS_O_WORKDIR"
echo "queue : $PBS_O_QUEUE"
echo "user : $PBS_O_LOGNAME"
echo "threads : $OMP_NUM_THREADS"
YOUR CODE HERE
qstat -f $PBS_JOBID
# Submission script for
#PBS -N
#PBS -q large
#PBS -r y
#PBS -q main
#PBS -r y
#PBS -W group_list=
#PBS -J 1-
#
#PBS -l walltime=:::00
#PBS -l select=
#PBS -l select=:ncpus=:ompthreads=:mem=mb
#PBS -l select=:ncpus=:mpiprocs=:ompthreads=:mem=mb
#PBS -l place=packed
#PBS -l place=scatter
#PBS -l place=scatter
#
#PBS -M
#PBS -m abe
exec > ${PBS_O_WORKDIR}/${PBS_JOBNAME}_${PBS_JOBID}.log
echo "------------------ Work dir --------------------"
cd ${PBS_O_WORKDIR} && echo ${PBS_O_WORKDIR}
echo "------------------ Job Info --------------------"
echo "Task ID: $PBS_ARRAYID"
echo "jobid : $PBS_JOBID"
echo "jobname : $PBS_JOBNAME"
echo "job type : $PBS_ENVIRONMENT"
echo "submit dir : $PBS_O_WORKDIR"
echo "queue : $PBS_O_QUEUE"
echo "user : $PBS_O_LOGNAME"
echo "threads : $OMP_NUM_THREADS"
YOUR CODE HERE
qstat -f $PBS_JOBID
#!/bin/bash
# Submission script for
#SBATCH --job-name=
#SBATCH --array=1-
#SBATCH --time=-::00 # days-hh:mm:ss
#
#SBATCH --ntasks=
#SBATCH --ntasks-per-node=
#SBATCH --nodes=
#SBATCH --cpus-per-task=
#SBATCH --gres="gpu:"
#SBATCH --mem-per-cpu= # megabytes GB
#SBATCH --partition=
#
#SBATCH --mail-user=
#SBATCH --mail-type=ALL
#
#SBATCH --account=
#
#SBATCH --output=
module purge
module load LIST_THE_MODULES_YOU_NEED_HERE
export OMP_NUM_THREADS=
export MKL_NUM_THREADS=
echo "Task ID: $SLURM_ARRAY_TASK_ID"
cd $CECIHOME
cd $CECIPROJ/
mkdir -p $GLOBALSCRATCH/$SLURM_JOB_ID
mkdir -p "$LOCALSCRATCH/$SLURM_JOB_ID"
cp -r "$SLURM_SUBMIT_DIR/{your_code,your_input_data}" "$LOCALSCRATCH/$SLURM_JOB_ID"
cp -r "$LOCALSCRATCH/$SLURM_JOB_ID/your_ouput_data" "$SLURM_SUBMIT_DIR/" &&\
rm -rf "$LOCALSCRATCH/$SLURM_JOB_ID"
mpirun ARGUMENTS_FOR_MPIRUN YOUR_PROGRAM_HERE ARGUMENTS_FOR_YOUR_PROGAM
# Submission script for
#SBATCH --job-name=
#SBATCH --array=1-
#SBATCH --time=-::00 # days-hh:mm:ss
#
#SBATCH --ntasks=
#SBATCH --ntasks-per-node=
#SBATCH --nodes=
#SBATCH --cpus-per-task=
#SBATCH --gres="gpu:"
#SBATCH --mem-per-cpu= # megabytes GB
#SBATCH --partition=
#
#SBATCH --mail-user=
#SBATCH --mail-type=ALL
#
#SBATCH --account=
#
#SBATCH --output=
module purge
module load LIST_THE_MODULES_YOU_NEED_HERE
export OMP_NUM_THREADS=
export MKL_NUM_THREADS=
echo "Task ID: $SLURM_ARRAY_TASK_ID"
cd $CECIHOME
cd $CECIPROJ/
mkdir -p $GLOBALSCRATCH/$SLURM_JOB_ID
mkdir -p "$LOCALSCRATCH/$SLURM_JOB_ID"
cp -r "$SLURM_SUBMIT_DIR/{your_code,your_input_data}" "$LOCALSCRATCH/$SLURM_JOB_ID"
cp -r "$LOCALSCRATCH/$SLURM_JOB_ID/your_ouput_data" "$SLURM_SUBMIT_DIR/" &&\
rm -rf "$LOCALSCRATCH/$SLURM_JOB_ID"
mpirun ARGUMENTS_FOR_MPIRUN YOUR_PROGRAM_HERE ARGUMENTS_FOR_YOUR_PROGAM
# Please note that nodes are allocated exclusively on the 'large' queue so the number of CPUs should ideally be a multiple of 24.
# Please note that the memory per CPU is preferably limited to 2625MB on the compute nodes.