Importing scripts#
This tutorials imports all the example scripts for Raven.
[1]:
from slurm_script_generator.slurm_script import SlurmScript
%load_ext autoreload
%autoreload 2
[2]:
# https://docs.mpcdf.mpg.de/doc/computing/raven-user-guide.html#mpi-batch-job-without-hyperthreading
script = """
#!/bin/bash -l
# Standard output and error:
#SBATCH -o ./job.out.%j
#SBATCH -e ./job.err.%j
# Initial working directory:
#SBATCH -D ./
# Job Name:
#SBATCH -J test_slurm
#
# Number of nodes and MPI tasks per node:
#SBATCH --nodes=16
#SBATCH --ntasks-per-node=72
#
#SBATCH --mail-type=none
#SBATCH --mail-user=userid@example.mpg.de
#
# Wall clock limit (max. is 24 hours):
#SBATCH --time=12:00:00
# Load compiler and MPI modules (must be the same as used for compiling the code)
module purge
module load intel/21.2.0 impi/2021.2
# Run the program:
srun ./myprog > prog.out
"""
slurm_script = SlurmScript.from_script(script)
print(slurm_script)
#!/bin/bash
########################################################
# This script was generated using #
# slurm-script-generator v0.3.2 #
# https://github.com/max-models/slurm-script-generator #
# `pip install slurm-script-generator==0.3.2` #
########################################################
########################################################
# Pragmas for Job Config #
#SBATCH --job-name=test_slurm # name of job
# #
# Pragmas for Time And Priority #
#SBATCH --time=12:00:00 # time limit
# #
# Pragmas for Io And Directory #
#SBATCH --chdir=./ # change working directory
#SBATCH --stdout=./job.out.%j # File to redirect stdout (%%x=jobname, %%j=jobid)
#SBATCH --stderr=./job.err.%j # File to redirect stderr (%%x=jobname, %%j=jobid)
# #
# Pragmas for Notifications #
#SBATCH --mail-user=userid@example.mpg.de # who to send email notification for job state changes
#SBATCH --mail-type=none # notify on state change
# #
# Pragmas for Core Node And Task Allocation #
#SBATCH --nodes=16 # number of nodes on which to run
#SBATCH --ntasks-per-node=72 # number of tasks to invoke on each node
########################################################
module purge # Purge modules
module load intel/21.2.0 impi/2021.2 # modules
module list # List loaded modules
srun ./myprog > prog.out
[3]:
# https://docs.mpcdf.mpg.de/doc/computing/raven-user-guide.html#hybrid-mpi-openmp-batch-job-without-hyperthreading
script = """
#!/bin/bash -l
# Standard output and error:
#SBATCH -o ./job_hybrid.out.%j
#SBATCH -e ./job_hybrid.err.%j
# Initial working directory:
#SBATCH -D ./
# Job Name:
#SBATCH -J test_slurm
#
# Number of nodes and MPI tasks per node:
#SBATCH --nodes=16
#SBATCH --ntasks-per-node=4
# for OpenMP:
#SBATCH --cpus-per-task=18
#
#SBATCH --mail-type=none
#SBATCH --mail-user=userid@example.mpg.de
#
# Wall clock limit (max. is 24 hours):
#SBATCH --time=12:00:00
# Load compiler and MPI modules (must be the same as used for compiling the code)
module purge
module load intel/21.2.0 impi/2021.2
export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK
# For pinning threads correctly:
export OMP_PLACES=cores
# Run the program:
srun ./myprog > prog.out
"""
slurm_script = SlurmScript.from_script(script)
print(slurm_script)
#!/bin/bash
########################################################
# This script was generated using #
# slurm-script-generator v0.3.2 #
# https://github.com/max-models/slurm-script-generator #
# `pip install slurm-script-generator==0.3.2` #
########################################################
########################################################
# Pragmas for Job Config #
#SBATCH --job-name=test_slurm # name of job
# #
# Pragmas for Time And Priority #
#SBATCH --time=12:00:00 # time limit
# #
# Pragmas for Io And Directory #
#SBATCH --chdir=./ # change working directory
#SBATCH --stdout=./job_hybrid.out.%j # File to redirect stdout (%%x=jobname, %%j=jobid)
#SBATCH --stderr=./job_hybrid.err.%j # File to redirect stderr (%%x=jobname, %%j=jobid)
# #
# Pragmas for Notifications #
#SBATCH --mail-user=userid@example.mpg.de # who to send email notification for job state changes
#SBATCH --mail-type=none # notify on state change
# #
# Pragmas for Core Node And Task Allocation #
#SBATCH --nodes=16 # number of nodes on which to run
#SBATCH --ntasks-per-node=4 # number of tasks to invoke on each node
#SBATCH --cpus-per-task=18 # number of cpus required per task
########################################################
module purge # Purge modules
module load intel/21.2.0 impi/2021.2 # modules
module list # List loaded modules
export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK
export OMP_PLACES=cores
srun ./myprog > prog.out
[4]:
# https://docs.mpcdf.mpg.de/doc/computing/raven-user-guide.html#hybrid-mpi-openmp-batch-job-in-hyperthreading-mode
script = """
#!/bin/bash -l
# Standard output and error:
#SBATCH -o ./job_hybrid.out.%j
#SBATCH -e ./job_hybrid.err.%j
# Initial working directory:
#SBATCH -D ./
# Job Name:
#SBATCH -J test_slurm
#
# Number of nodes and MPI tasks per node:
#SBATCH --nodes=32
#SBATCH --ntasks-per-node=4
# Enable Hyperthreading:
#SBATCH --ntasks-per-core=2
# for OpenMP:
#SBATCH --cpus-per-task=36
#
#SBATCH --mail-type=none
#SBATCH --mail-user=userid@example.mpg.de
#
# Wall clock Limit (max. is 24 hours):
#SBATCH --time=12:00:00
# Load compiler and MPI modules (must be the same as used for compiling the code)
module purge
module load intel/21.2.0 impi/2021.2
export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK
# For pinning threads correctly:
export OMP_PLACES=threads
# Run the program:
srun ./myprog > prog.out
"""
slurm_script = SlurmScript.from_script(script)
print(slurm_script)
#!/bin/bash
########################################################
# This script was generated using #
# slurm-script-generator v0.3.2 #
# https://github.com/max-models/slurm-script-generator #
# `pip install slurm-script-generator==0.3.2` #
########################################################
########################################################
# Pragmas for Job Config #
#SBATCH --job-name=test_slurm # name of job
# #
# Pragmas for Time And Priority #
#SBATCH --time=12:00:00 # time limit
# #
# Pragmas for Io And Directory #
#SBATCH --chdir=./ # change working directory
#SBATCH --stdout=./job_hybrid.out.%j # File to redirect stdout (%%x=jobname, %%j=jobid)
#SBATCH --stderr=./job_hybrid.err.%j # File to redirect stderr (%%x=jobname, %%j=jobid)
# #
# Pragmas for Notifications #
#SBATCH --mail-user=userid@example.mpg.de # who to send email notification for job state changes
#SBATCH --mail-type=none # notify on state change
# #
# Pragmas for Core Node And Task Allocation #
#SBATCH --nodes=32 # number of nodes on which to run
#SBATCH --ntasks-per-node=4 # number of tasks to invoke on each node
#SBATCH --cpus-per-task=36 # number of cpus required per task
# #
# Pragmas for Cpu Topology And Binding #
#SBATCH --ntasks-per-core=2 # number of tasks to invoke on each core
########################################################
module purge # Purge modules
module load intel/21.2.0 impi/2021.2 # modules
module list # List loaded modules
export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK
export OMP_PLACES=threads
srun ./myprog > prog.out
[5]:
# https://docs.mpcdf.mpg.de/doc/computing/raven-user-guide.html#small-mpi-batch-job-on-a-shared-node
script = """
#!/bin/bash -l
# Standard output and error:
#SBATCH -o ./job.out.%j
#SBATCH -e ./job.err.%j
# Initial working directory:
#SBATCH -D ./
# Job Name:
#SBATCH -J test_slurm
#
# Number of MPI Tasks, e.g. 8:
#SBATCH --ntasks=8
# Memory usage [MB] of the job is required, e.g. 3000 MB per task:
#SBATCH --mem=24000
#
#SBATCH --mail-type=none
#SBATCH --mail-user=userid@example.mpg.de
#
# Wall clock limit (max. is 24 hours):
#SBATCH --time=12:00:00
# Load compiler and MPI modules (must be the same as used for compiling the code)
module purge
module load intel/21.2.0 impi/2021.2
# Run the program:
srun ./myprog > prog.out
"""
slurm_script = SlurmScript.from_script(script)
print(slurm_script)
#!/bin/bash
########################################################
# This script was generated using #
# slurm-script-generator v0.3.2 #
# https://github.com/max-models/slurm-script-generator #
# `pip install slurm-script-generator==0.3.2` #
########################################################
########################################################
# Pragmas for Job Config #
#SBATCH --job-name=test_slurm # name of job
# #
# Pragmas for Time And Priority #
#SBATCH --time=12:00:00 # time limit
# #
# Pragmas for Io And Directory #
#SBATCH --chdir=./ # change working directory
#SBATCH --stdout=./job.out.%j # File to redirect stdout (%%x=jobname, %%j=jobid)
#SBATCH --stderr=./job.err.%j # File to redirect stderr (%%x=jobname, %%j=jobid)
# #
# Pragmas for Notifications #
#SBATCH --mail-user=userid@example.mpg.de # who to send email notification for job state changes
#SBATCH --mail-type=none # notify on state change
# #
# Pragmas for Core Node And Task Allocation #
#SBATCH --ntasks=8 # number of processors required
# #
# Pragmas for Memory #
#SBATCH --mem=24000 # minimum amount of real memory
########################################################
module purge # Purge modules
module load intel/21.2.0 impi/2021.2 # modules
module list # List loaded modules
srun ./myprog > prog.out
[6]:
# https://docs.mpcdf.mpg.de/doc/computing/raven-user-guide.html#gpu-job-using-1-2-or-4-gpus-on-a-single-node
script = """
#!/bin/bash -l
# Standard output and error:
#SBATCH -o ./job.out.%j
#SBATCH -e ./job.err.%j
# Initial working directory:
#SBATCH -D ./
# Job name
#SBATCH -J test_gpu
#
#SBATCH --ntasks=1
#SBATCH --constraint="gpu"
#
# --- default case: use a single GPU on a shared node ---
#SBATCH --gres=gpu:a100:1
#SBATCH --cpus-per-task=18
#SBATCH --mem=125000
#
# --- uncomment to use 2 GPUs on a shared node ---
# #SBATCH --gres=gpu:a100:2
# #SBATCH --cpus-per-task=36
# #SBATCH --mem=250000
#
# --- uncomment to use 4 GPUs on a full node ---
# #SBATCH --gres=gpu:a100:4
# #SBATCH --cpus-per-task=72
# #SBATCH --mem=500000
#
#SBATCH --mail-type=none
#SBATCH --mail-user=userid@example.mpg.de
#SBATCH --time=12:00:00
module purge
module load intel/21.2.0 impi/2021.2 cuda/11.2
export OMP_NUM_THREADS=${SLURM_CPUS_PER_TASK}
srun ./cuda_executable
"""
slurm_script = SlurmScript.from_script(script)
print(slurm_script)
#!/bin/bash
########################################################
# This script was generated using #
# slurm-script-generator v0.3.2 #
# https://github.com/max-models/slurm-script-generator #
# `pip install slurm-script-generator==0.3.2` #
########################################################
########################################################
# Pragmas for Job Config #
#SBATCH --job-name=test_gpu # name of job
# #
# Pragmas for Time And Priority #
#SBATCH --time=12:00:00 # time limit
# #
# Pragmas for Io And Directory #
#SBATCH --chdir=./ # change working directory
#SBATCH --stdout=./job.out.%j # File to redirect stdout (%%x=jobname, %%j=jobid)
#SBATCH --stderr=./job.err.%j # File to redirect stderr (%%x=jobname, %%j=jobid)
# #
# Pragmas for Notifications #
#SBATCH --mail-user=userid@example.mpg.de # who to send email notification for job state changes
#SBATCH --mail-type=none # notify on state change
# #
# Pragmas for Core Node And Task Allocation #
#SBATCH --ntasks=1 # number of processors required
#SBATCH --cpus-per-task=18 # number of cpus required per task
# #
# Pragmas for Memory #
#SBATCH --mem=125000 # minimum amount of real memory
# #
# Pragmas for Generic Resources And Licenses #
#SBATCH --gres=gpu:a100:1 # required generic resources
# #
# Pragmas for Node Constraints And Selection #
#SBATCH --constraint="gpu" # specify a list of constraints
########################################################
module purge # Purge modules
module load intel/21.2.0 impi/2021.2 cuda/11.2 # modules
module list # List loaded modules
export OMP_NUM_THREADS=${SLURM_CPUS_PER_TASK}
srun ./cuda_executable
[7]:
# https://docs.mpcdf.mpg.de/doc/computing/raven-user-guide.html#hybrid-mpi-openmp-job-using-one-or-more-nodes-with-4-gpus-each
script = """
#!/bin/bash -l
# Standard output and error:
#SBATCH -o ./job.out.%j
#SBATCH -e ./job.err.%j
# Initial working directory:
#SBATCH -D ./
# Job name
#SBATCH -J test_gpu
#
#SBATCH --nodes=1 # Request 1 or more full nodes
#SBATCH --constraint="gpu" # providing GPUs.
#SBATCH --gres=gpu:a100:4 # Request 4 GPUs per node.
#SBATCH --ntasks-per-node=4 # Run one task per GPU
#SBATCH --cpus-per-task=18 # using 18 cores each.
#SBATCH --mail-type=none
#SBATCH --mail-user=userid@example.mpg.de
#SBATCH --time=12:00:00
module purge
module load intel/21.2.0 impi/2021.2 cuda/11.2
export OMP_NUM_THREADS=${SLURM_CPUS_PER_TASK}
srun ./mpi_openmp_cuda_executable
"""
slurm_script = SlurmScript.from_script(script)
print(slurm_script)
#!/bin/bash
########################################################
# This script was generated using #
# slurm-script-generator v0.3.2 #
# https://github.com/max-models/slurm-script-generator #
# `pip install slurm-script-generator==0.3.2` #
########################################################
########################################################
# Pragmas for Job Config #
#SBATCH --job-name=test_gpu # name of job
# #
# Pragmas for Time And Priority #
#SBATCH --time=12:00:00 # time limit
# #
# Pragmas for Io And Directory #
#SBATCH --chdir=./ # change working directory
#SBATCH --stdout=./job.out.%j # File to redirect stdout (%%x=jobname, %%j=jobid)
#SBATCH --stderr=./job.err.%j # File to redirect stderr (%%x=jobname, %%j=jobid)
# #
# Pragmas for Notifications #
#SBATCH --mail-user=userid@example.mpg.de # who to send email notification for job state changes
#SBATCH --mail-type=none # notify on state change
# #
# Pragmas for Core Node And Task Allocation #
#SBATCH --nodes=1 # number of nodes on which to run
#SBATCH --ntasks-per-node=4 # number of tasks to invoke on each node
#SBATCH --cpus-per-task=18 # number of cpus required per task
# #
# Pragmas for Generic Resources And Licenses #
#SBATCH --gres=gpu:a100:4 # required generic resources
# #
# Pragmas for Node Constraints And Selection #
#SBATCH --constraint="gpu" # specify a list of constraints
########################################################
module purge # Purge modules
module load intel/21.2.0 impi/2021.2 cuda/11.2 # modules
module list # List loaded modules
export OMP_NUM_THREADS=${SLURM_CPUS_PER_TASK}
srun ./mpi_openmp_cuda_executable
[8]:
# https://docs.mpcdf.mpg.de/doc/computing/raven-user-guide.html#plain-mpi-job-using-gpus
script = """
#!/bin/bash -l
# Standard output and error:
#SBATCH -o ./job.out.%j
#SBATCH -e ./job.err.%j
# Initial working directory:
#SBATCH -D ./
# Job name
#SBATCH -J test_slurm
#
#SBATCH --nodes=1 # Request 1 (or more) node(s)
#SBATCH --constraint="gpu" # providing GPUs.
#SBATCH --ntasks-per-node=72 # Launch 72 tasks per node
#SBATCH --gres=gpu:a100:4 # Request all 4 GPUs of each node
#SBATCH --nvmps # Launch NVIDIA MPS to enable concurrent access to the GPUs from multiple processes efficiently
#
#SBATCH --mail-type=none
#SBATCH --mail-user=userid@example.mpg.de
#SBATCH --time=12:00:00
module purge
module load intel/21.2.0 impi/2021.2 cuda/11.2
srun ./mpi_cuda_executable
"""
slurm_script = SlurmScript.from_script(script)
print(slurm_script)
#!/bin/bash
########################################################
# This script was generated using #
# slurm-script-generator v0.3.2 #
# https://github.com/max-models/slurm-script-generator #
# `pip install slurm-script-generator==0.3.2` #
########################################################
########################################################
# Pragmas for Job Config #
#SBATCH --job-name=test_slurm # name of job
# #
# Pragmas for Time And Priority #
#SBATCH --time=12:00:00 # time limit
# #
# Pragmas for Io And Directory #
#SBATCH --chdir=./ # change working directory
#SBATCH --stdout=./job.out.%j # File to redirect stdout (%%x=jobname, %%j=jobid)
#SBATCH --stderr=./job.err.%j # File to redirect stderr (%%x=jobname, %%j=jobid)
# #
# Pragmas for Notifications #
#SBATCH --mail-user=userid@example.mpg.de # who to send email notification for job state changes
#SBATCH --mail-type=none # notify on state change
# #
# Pragmas for Core Node And Task Allocation #
#SBATCH --nodes=1 # number of nodes on which to run
#SBATCH --ntasks-per-node=72 # number of tasks to invoke on each node
# #
# Pragmas for Gpus #
#SBATCH --nvmps= # launching NVIDIA MPS for job
# #
# Pragmas for Generic Resources And Licenses #
#SBATCH --gres=gpu:a100:4 # required generic resources
# #
# Pragmas for Node Constraints And Selection #
#SBATCH --constraint="gpu" # specify a list of constraints
########################################################
module purge # Purge modules
module load intel/21.2.0 impi/2021.2 cuda/11.2 # modules
module list # List loaded modules
srun ./mpi_cuda_executable
[9]:
# https://docs.mpcdf.mpg.de/doc/computing/raven-user-guide.html#batch-jobs-with-dependencies
script = """
#!/bin/bash
# Submit a sequence of batch jobs with dependencies
#
# Number of jobs to submit:
NR_OF_JOBS=6
# Batch job script:
JOB_SCRIPT=./my_batch_script
echo "Submitting job chain of ${NR_OF_JOBS} jobs for batch script ${JOB_SCRIPT}:"
JOBID=$(sbatch ${JOB_SCRIPT} 2>&1 | awk '{print $(NF)}')
echo " " ${JOBID}
I=1
while [ ${I} -lt ${NR_OF_JOBS} ]; do
JOBID=$(sbatch --dependency=afterany:${JOBID} ${JOB_SCRIPT} 2>&1 | awk '{print $(NF)}')
echo " " ${JOBID}
let I=${I}+1
done
"""
slurm_script = SlurmScript.from_script(script)
print(slurm_script)
#!/bin/bash
########################################################
# This script was generated using #
# slurm-script-generator v0.3.2 #
# https://github.com/max-models/slurm-script-generator #
# `pip install slurm-script-generator==0.3.2` #
########################################################
########################################################
########################################################
NR_OF_JOBS=6
JOB_SCRIPT=./my_batch_script
echo "Submitting job chain of ${NR_OF_JOBS} jobs for batch script ${JOB_SCRIPT}:"
JOBID=$(sbatch ${JOB_SCRIPT} 2>&1 | awk '{print $(NF)}')
echo " " ${JOBID}
I=1
while [ ${I} -lt ${NR_OF_JOBS} ]; do
JOBID=$(sbatch --dependency=afterany:${JOBID} ${JOB_SCRIPT} 2>&1 | awk '{print $(NF)}')
echo " " ${JOBID}
let I=${I}+1
done
[10]:
# https://docs.mpcdf.mpg.de/doc/computing/raven-user-guide.html#batch-job-using-a-job-array
script = """
#!/bin/bash -l
# specify the indexes (max. 30000) of the job array elements (max. 300 - the default job submit limit per user)
#SBATCH --array=1-20
# Standard output and error:
#SBATCH -o job_%A_%a.out # Standard output, %A = job ID, %a = job array index
#SBATCH -e job_%A_%a.err # Standard error, %A = job ID, %a = job array index
# Initial working directory:
#SBATCH -D ./
# Job Name:
#SBATCH -J test_array
#
# Number of nodes and MPI tasks per node:
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=72
#
#SBATCH --mail-type=none
#SBATCH --mail-user=userid@example.mpg.de
#
# Wall clock limit (max. is 24 hours):
#SBATCH --time=12:00:00
# Load compiler and MPI modules (must be the same as used for compiling the code)
module purge
module load intel/21.2.0 impi/2021.2
# Run the program:
# the environment variable $SLURM_ARRAY_TASK_ID holds the index of the job array and
# can be used to discriminate between individual elements of the job array
srun ./myprog > prog.out
"""
slurm_script = SlurmScript.from_script(script)
print(slurm_script)
#!/bin/bash
########################################################
# This script was generated using #
# slurm-script-generator v0.3.2 #
# https://github.com/max-models/slurm-script-generator #
# `pip install slurm-script-generator==0.3.2` #
########################################################
########################################################
# Pragmas for Job Config #
#SBATCH --job-name=test_array # name of job
# #
# Pragmas for Time And Priority #
#SBATCH --time=12:00:00 # time limit
# #
# Pragmas for Io And Directory #
#SBATCH --chdir=./ # change working directory
#SBATCH --stdout=job ID, %a = job array index # File to redirect stdout (%%x=jobname, %%j=jobid)
#SBATCH --stderr=job ID, %a = job array index # File to redirect stderr (%%x=jobname, %%j=jobid)
# #
# Pragmas for Notifications #
#SBATCH --mail-user=userid@example.mpg.de # who to send email notification for job state changes
#SBATCH --mail-type=none # notify on state change
# #
# Pragmas for Dependencies And Arrays #
#SBATCH --array=1-20 # submit a job array
# #
# Pragmas for Core Node And Task Allocation #
#SBATCH --nodes=1 # number of nodes on which to run
#SBATCH --ntasks-per-node=72 # number of tasks to invoke on each node
########################################################
module purge # Purge modules
module load intel/21.2.0 impi/2021.2 # modules
module list # List loaded modules
srun ./myprog > prog.out
[11]:
# https://docs.mpcdf.mpg.de/doc/computing/raven-user-guide.html#single-core-job
script = """
#!/bin/bash -l
#
# Single-core example job script for MPCDF Raven.
# In addition to the Python example shown here, the script
# is valid for any single-threaded program, including
# sequential Matlab, Mathematica, Julia, and similar cases.
#
#SBATCH -J PYTHON_SEQ
#SBATCH -o ./out.%j
#SBATCH -e ./err.%j
#SBATCH -D ./
#SBATCH --ntasks=1 # launch job on a single core
#SBATCH --cpus-per-task=1 # on a shared node
#SBATCH --mem=2000MB # memory limit for the job
#SBATCH --time=0:10:00
module purge
module load gcc/10 impi/2021.2
module load anaconda/3/2021.05
# Set number of OMP threads to fit the number of available cpus, if applicable.
export OMP_NUM_THREADS=1
# Run single-core program
srun python3 ./python_sequential.py
"""
slurm_script = SlurmScript.from_script(script)
print(slurm_script)
#!/bin/bash
########################################################
# This script was generated using #
# slurm-script-generator v0.3.2 #
# https://github.com/max-models/slurm-script-generator #
# `pip install slurm-script-generator==0.3.2` #
########################################################
########################################################
# Pragmas for Job Config #
#SBATCH --job-name=PYTHON_SEQ # name of job
# #
# Pragmas for Time And Priority #
#SBATCH --time=0:10:00 # time limit
# #
# Pragmas for Io And Directory #
#SBATCH --chdir=./ # change working directory
#SBATCH --stdout=./out.%j # File to redirect stdout (%%x=jobname, %%j=jobid)
#SBATCH --stderr=./err.%j # File to redirect stderr (%%x=jobname, %%j=jobid)
# #
# Pragmas for Core Node And Task Allocation #
#SBATCH --ntasks=1 # number of processors required
#SBATCH --cpus-per-task=1 # number of cpus required per task
# #
# Pragmas for Memory #
#SBATCH --mem=2000MB # minimum amount of real memory
########################################################
module purge # Purge modules
module load gcc/10 impi/2021.2 anaconda/3/2021.05 # modules
module list # List loaded modules
export OMP_NUM_THREADS=1
srun python3 ./python_sequential.py
[12]:
# https://docs.mpcdf.mpg.de/doc/computing/raven-user-guide.html#small-job-with-multithreading-applicable-to-python-julia-and-matlab-plain-openmp-or-any-threaded-application
script = """
#!/bin/bash -l
#
# Multithreading example job script for MPCDF Raven.
# In addition to the Python example shown here, the script
# is valid for any multi-threaded program, including
# Matlab, Mathematica, Julia, and similar cases.
#
#SBATCH -J PYTHON_MT
#SBATCH -o ./out.%j
#SBATCH -e ./err.%j
#SBATCH -D ./
#SBATCH --ntasks=1 # launch job on
#SBATCH --cpus-per-task=8 # 8 cores on a shared node
#SBATCH --mem=16000MB # memory limit for the job
#SBATCH --time=0:10:00
module purge
module load gcc/10 impi/2021.2
module load anaconda/3/2021.05
# Set number of OMP threads to fit the number of available cpus, if applicable.
export OMP_NUM_THREADS=${SLURM_CPUS_PER_TASK}
srun python3 ./python_multithreading.py
"""
slurm_script = SlurmScript.from_script(script)
print(slurm_script)
#!/bin/bash
########################################################
# This script was generated using #
# slurm-script-generator v0.3.2 #
# https://github.com/max-models/slurm-script-generator #
# `pip install slurm-script-generator==0.3.2` #
########################################################
########################################################
# Pragmas for Job Config #
#SBATCH --job-name=PYTHON_MT # name of job
# #
# Pragmas for Time And Priority #
#SBATCH --time=0:10:00 # time limit
# #
# Pragmas for Io And Directory #
#SBATCH --chdir=./ # change working directory
#SBATCH --stdout=./out.%j # File to redirect stdout (%%x=jobname, %%j=jobid)
#SBATCH --stderr=./err.%j # File to redirect stderr (%%x=jobname, %%j=jobid)
# #
# Pragmas for Core Node And Task Allocation #
#SBATCH --ntasks=1 # number of processors required
#SBATCH --cpus-per-task=8 # number of cpus required per task
# #
# Pragmas for Memory #
#SBATCH --mem=16000MB # minimum amount of real memory
########################################################
module purge # Purge modules
module load gcc/10 impi/2021.2 anaconda/3/2021.05 # modules
module list # List loaded modules
export OMP_NUM_THREADS=${SLURM_CPUS_PER_TASK}
srun python3 ./python_multithreading.py
[13]:
# https://docs.mpcdf.mpg.de/doc/computing/raven-user-guide.html#python-numpy-multitheading-applicable-to-julia-and-matlab-plain-openmp-or-any-threaded-application
script = """
#!/bin/bash -l
#
# Multithreading example job script for MPCDF Raven.
# In addition to the Python example shown here, the script
# is valid for any multi-threaded program, including
# parallel Matlab, Julia, and similar cases.
#
#SBATCH -o ./out.%j
#SBATCH -e ./err.%j
#SBATCH -D ./
#SBATCH -J PY_MULTITHREADING
#SBATCH --nodes=1 # request a full node
#SBATCH --ntasks-per-node=1 # only start 1 task via srun because Python multiprocessing starts more tasks internally
#SBATCH --cpus-per-task=72 # assign all the cores to that first task to make room for multithreading
#SBATCH --time=00:10:00
module purge
module load gcc/10 impi/2021.2
module load anaconda/3/2021.05
# set number of OMP threads *per process*
export OMP_NUM_THREADS=${SLURM_CPUS_PER_TASK}
srun python3 ./python_multithreading.py
"""
slurm_script = SlurmScript.from_script(script)
print(slurm_script)
#!/bin/bash
########################################################
# This script was generated using #
# slurm-script-generator v0.3.2 #
# https://github.com/max-models/slurm-script-generator #
# `pip install slurm-script-generator==0.3.2` #
########################################################
########################################################
# Pragmas for Job Config #
#SBATCH --job-name=PY_MULTITHREADING # name of job
# #
# Pragmas for Time And Priority #
#SBATCH --time=00:10:00 # time limit
# #
# Pragmas for Io And Directory #
#SBATCH --chdir=./ # change working directory
#SBATCH --stdout=./out.%j # File to redirect stdout (%%x=jobname, %%j=jobid)
#SBATCH --stderr=./err.%j # File to redirect stderr (%%x=jobname, %%j=jobid)
# #
# Pragmas for Core Node And Task Allocation #
#SBATCH --nodes=1 # number of nodes on which to run
#SBATCH --ntasks-per-node=1 # number of tasks to invoke on each node
#SBATCH --cpus-per-task=72 # number of cpus required per task
########################################################
module purge # Purge modules
module load gcc/10 impi/2021.2 anaconda/3/2021.05 # modules
module list # List loaded modules
export OMP_NUM_THREADS=${SLURM_CPUS_PER_TASK}
srun python3 ./python_multithreading.py
[14]:
# https://docs.mpcdf.mpg.de/doc/computing/raven-user-guide.html#python-multiprocessing
script = """
#!/bin/bash -l
#
# Python multiprocessing example job script for MPCDF Raven.
#
#SBATCH -o ./out.%j
#SBATCH -e ./err.%j
#SBATCH -D ./
#SBATCH -J PYTHON_MP
#SBATCH --nodes=1 # request a full node
#SBATCH --ntasks-per-node=1 # only start 1 task via srun because Python multiprocessing starts more tasks internally
#SBATCH --cpus-per-task=72 # assign all the cores to that first task to make room for Python's multiprocessing tasks
#SBATCH --time=00:10:00
module purge
module load gcc/10 impi/2021.2
module load anaconda/3/2021.05
# Important:
# Set the number of OMP threads *per process* to avoid overloading of the node!
export OMP_NUM_THREADS=1
# Use the environment variable SLURM_CPUS_PER_TASK to have multiprocessing
# spawn exactly as many processes as you have CPUs available.
srun python3 ./python_multiprocessing.py $SLURM_CPUS_PER_TASK
"""
slurm_script = SlurmScript.from_script(script)
print(slurm_script)
#!/bin/bash
########################################################
# This script was generated using #
# slurm-script-generator v0.3.2 #
# https://github.com/max-models/slurm-script-generator #
# `pip install slurm-script-generator==0.3.2` #
########################################################
########################################################
# Pragmas for Job Config #
#SBATCH --job-name=PYTHON_MP # name of job
# #
# Pragmas for Time And Priority #
#SBATCH --time=00:10:00 # time limit
# #
# Pragmas for Io And Directory #
#SBATCH --chdir=./ # change working directory
#SBATCH --stdout=./out.%j # File to redirect stdout (%%x=jobname, %%j=jobid)
#SBATCH --stderr=./err.%j # File to redirect stderr (%%x=jobname, %%j=jobid)
# #
# Pragmas for Core Node And Task Allocation #
#SBATCH --nodes=1 # number of nodes on which to run
#SBATCH --ntasks-per-node=1 # number of tasks to invoke on each node
#SBATCH --cpus-per-task=72 # number of cpus required per task
########################################################
module purge # Purge modules
module load gcc/10 impi/2021.2 anaconda/3/2021.05 # modules
module list # List loaded modules
export OMP_NUM_THREADS=1
srun python3 ./python_multiprocessing.py $SLURM_CPUS_PER_TASK
[15]:
# https://docs.mpcdf.mpg.de/doc/computing/raven-user-guide.html#python-mpi4py
script = """
#!/bin/bash -l
#
# Python MPI4PY example job script for MPCDF Raven.
# May use more than one node.
#
#SBATCH -o ./out.%j
#SBATCH -e ./err.%j
#SBATCH -D ./
#SBATCH -J MPI4PY
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=72
#SBATCH --time=00:10:00
module purge
module load gcc/10 impi/2021.2
module load anaconda/3/2021.05
module load mpi4py/3.0.3
# Important:
# Set the number of OMP threads *per process* to avoid overloading of the node!
export OMP_NUM_THREADS=1
srun python3 ./python_mpi4py.py
"""
slurm_script = SlurmScript.from_script(script)
print(slurm_script)
#!/bin/bash
########################################################
# This script was generated using #
# slurm-script-generator v0.3.2 #
# https://github.com/max-models/slurm-script-generator #
# `pip install slurm-script-generator==0.3.2` #
########################################################
########################################################
# Pragmas for Job Config #
#SBATCH --job-name=MPI4PY # name of job
# #
# Pragmas for Time And Priority #
#SBATCH --time=00:10:00 # time limit
# #
# Pragmas for Io And Directory #
#SBATCH --chdir=./ # change working directory
#SBATCH --stdout=./out.%j # File to redirect stdout (%%x=jobname, %%j=jobid)
#SBATCH --stderr=./err.%j # File to redirect stderr (%%x=jobname, %%j=jobid)
# #
# Pragmas for Core Node And Task Allocation #
#SBATCH --nodes=1 # number of nodes on which to run
#SBATCH --ntasks-per-node=72 # number of tasks to invoke on each node
########################################################
module purge # Purge modules
module load gcc/10 impi/2021.2 anaconda/3/2021.05 mpi4py/3.0.3 # modules
module list # List loaded modules
export OMP_NUM_THREADS=1
srun python3 ./python_mpi4py.py