Basics of Python API#

[1]:
from slurm_script_generator.slurm_script import SlurmScript

%load_ext autoreload
%autoreload 2
[2]:
slurm_script = SlurmScript(
    nodes=2,
    ntasks_per_core=16,
    custom_commands=[
        "# Run simulation",
        "srun ./bin > run.out",
    ],
)

print(slurm_script)
#!/bin/bash
########################################################
#            This script was generated using           #
#             slurm-script-generator v0.3.2            #
# https://github.com/max-models/slurm-script-generator #
#      `pip install slurm-script-generator==0.3.2`     #
########################################################

########################################################
# Pragmas for Core Node And Task Allocation            #
#SBATCH --nodes=2                                      # number of nodes on which to run
#                                                      #
# Pragmas for Cpu Topology And Binding                 #
#SBATCH --ntasks-per-core=16                           # number of tasks to invoke on each core
########################################################
# Run simulation
srun ./bin > run.out

You can also retreive the string representation

[3]:
script_str = slurm_script.to_string()
print(script_str)
#!/bin/bash
########################################################
#            This script was generated using           #
#             slurm-script-generator v0.3.2            #
# https://github.com/max-models/slurm-script-generator #
#      `pip install slurm-script-generator==0.3.2`     #
########################################################

########################################################
# Pragmas for Core Node And Task Allocation            #
#SBATCH --nodes=2                                      # number of nodes on which to run
#                                                      #
# Pragmas for Cpu Topology And Binding                 #
#SBATCH --ntasks-per-core=16                           # number of tasks to invoke on each core
########################################################
# Run simulation
srun ./bin > run.out

or save the script to file with

[4]:
slurm_script.save("slurm_script.sh")

MPI batch job without hyperthreading (Raven)#

Here is a sligtly more detailed example

[5]:
# Template from Raven user guide:
# https://docs.mpcdf.mpg.de/doc/computing/raven-user-guide.html#mpi-batch-job-without-hyperthreading

slurm_script = SlurmScript(
    stderr="./job.err.%j",
    stdout="./job.out.%j",
    chdir="./",
    job_name="test_slurm",
    nodes=16,
    ntasks_per_node=72,
    mail_type="none",
    mail_user="userid@example.mpg.de",
    time="12:00:00",
    modules=["intel/21.2.0", "impi/2021.2"],
    custom_commands=[
        "# Run the program:",
        "srun ./myprog > prog.out",
    ],
)

print(slurm_script)
#!/bin/bash
########################################################
#            This script was generated using           #
#             slurm-script-generator v0.3.2            #
# https://github.com/max-models/slurm-script-generator #
#      `pip install slurm-script-generator==0.3.2`     #
########################################################

########################################################
# Pragmas for Job Config                               #
#SBATCH --job-name=test_slurm                          # name of job
#                                                      #
# Pragmas for Time And Priority                        #
#SBATCH --time=12:00:00                                # time limit
#                                                      #
# Pragmas for Io And Directory                         #
#SBATCH --chdir=./                                     # change working directory
#SBATCH --stdout=./job.out.%j                          # File to redirect stdout (%%x=jobname, %%j=jobid)
#SBATCH --stderr=./job.err.%j                          # File to redirect stderr (%%x=jobname, %%j=jobid)
#                                                      #
# Pragmas for Notifications                            #
#SBATCH --mail-user=userid@example.mpg.de              # who to send email notification for job state changes
#SBATCH --mail-type=none                               # notify on state change
#                                                      #
# Pragmas for Core Node And Task Allocation            #
#SBATCH --nodes=16                                     # number of nodes on which to run
#SBATCH --ntasks-per-node=72                           # number of tasks to invoke on each node
########################################################
module purge                                           # Purge modules
module load intel/21.2.0 impi/2021.2                   # modules
module list                                            # List loaded modules
# Run the program:
srun ./myprog > prog.out

Hybrid MPI/OpenMP batch job without hyperthreading on Raven#

[6]:
# Template from Raven user guide:
# https://docs.mpcdf.mpg.de/doc/computing/raven-user-guide.html#mpi-batch-job-without-hyperthreading

slurm_script = SlurmScript(
    stderr="./job_hybrid.err.%j",
    stdout="./job_hybrid.out.%j",
    chdir="./",
    job_name="test_slurm",
    nodes=16,
    ntasks_per_node=4,
    cpus_per_task=18,
    mail_type="none",
    mail_user="userid@example.mpg.de",
    time="12:00:00",
    modules=["intel/21.2.0", "impi/2021.2"],
    custom_commands=[
        "\nexport OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK",
        "\n# For pinning threads correctly:",
        "export OMP_PLACES=cores",
        "\n# Run the program:",
        "srun ./myprog > prog.out",
    ],
)

print(slurm_script)
#!/bin/bash
########################################################
#            This script was generated using           #
#             slurm-script-generator v0.3.2            #
# https://github.com/max-models/slurm-script-generator #
#      `pip install slurm-script-generator==0.3.2`     #
########################################################

########################################################
# Pragmas for Job Config                               #
#SBATCH --job-name=test_slurm                          # name of job
#                                                      #
# Pragmas for Time And Priority                        #
#SBATCH --time=12:00:00                                # time limit
#                                                      #
# Pragmas for Io And Directory                         #
#SBATCH --chdir=./                                     # change working directory
#SBATCH --stdout=./job_hybrid.out.%j                   # File to redirect stdout (%%x=jobname, %%j=jobid)
#SBATCH --stderr=./job_hybrid.err.%j                   # File to redirect stderr (%%x=jobname, %%j=jobid)
#                                                      #
# Pragmas for Notifications                            #
#SBATCH --mail-user=userid@example.mpg.de              # who to send email notification for job state changes
#SBATCH --mail-type=none                               # notify on state change
#                                                      #
# Pragmas for Core Node And Task Allocation            #
#SBATCH --nodes=16                                     # number of nodes on which to run
#SBATCH --ntasks-per-node=4                            # number of tasks to invoke on each node
#SBATCH --cpus-per-task=18                             # number of cpus required per task
########################################################
module purge                                           # Purge modules
module load intel/21.2.0 impi/2021.2                   # modules
module list                                            # List loaded modules

export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK

# For pinning threads correctly:
export OMP_PLACES=cores

# Run the program:
srun ./myprog > prog.out

[7]:
# Try out the repr which can be used to
# re-create the object from the string
# representation using the python API
slurm_script = SlurmScript(
    nodes=2,
    ntasks_per_core=16,
    custom_commands=[
        "# Run simulation",
        "srun ./bin > run.out",
    ],
)
slurm_script
[7]:
SlurmScript(
    nodes=2,
    ntasks_per_core=16,
    custom_commands=['# Run simulation', 'srun ./bin > run.out'],
)
[8]:
script = SlurmScript(
    nodes=2,
    ntasks_per_core=16,
    custom_commands=["# Run simulation", "srun ./bin > run.out"],
)