#!/bin/bash #SBATCH --job-name=MEANINGFUL_NAME # Common name for the running jobs; can be customized with https://slurm.schedmd.com/sbatch.html#SECTION_%3CB%3Efilename-pattern%3C/B%3E #SBATCH --mail-type=FAIL # Type of email notification- BEGIN,END,FAIL,ALL #### #SBATCH --mail-user=EMAIL_ADDRES #SBATCH --output=%x_%j.out # STDOUT file with format: [JOB_NAME]_[JOB_ID].out #SBATCH --error=%x_%j.err # STDERR file with format: [JOB_NAME]_[JOB_ID].err #SBATCH --ntasks=1 # We are using only 1 task per job #SBATCH --cpus-per-task=1 # !!! if needed change, to 2 !!! ; ensuing job steps will require ncpus number of processors per task #SBATCH --hint=nomultithread # [don't] use extra threads with in-core multi-threading; to be checked if this enabled can work with 1 above ## #SBATCH --nodelist=issaf-0-2.issaf,issaf-0-3.issaf,issaf-0-4.issaf,issaf-0-5.issaf # select a given list of nodes for allocation ## #SBATCH --exclude=issaf-0-0.issaf,issaf-0-1.issaf # exclude these nodes from allocation # Define and create a unique scratch directory for this job export SCRATCH_DIRECTORY=/scratch/workdir_${USER}/${SLURM_JOBID} mkdir -p ${SCRATCH_DIRECTORY} cd ${SCRATCH_DIRECTORY} # You can copy everything you need to the scratch directory # ${SLURM_SUBMIT_DIR} points to the path where this script was submitted from rsync -azWHAXS4 ${SLURM_SUBMIT_DIR}/ ${SCRATCH_DIRECTORY}/ # This is where the actual work is done. scontrol show jobid -ddd ${SLURM_JOB_ID} | sed 's/^[ \t]*//g' > job_info_${SLURM_JOB_ID}.txt echo "Starting job @ $(date +%Y%m%d_%H%M%S)"