Unterschiede
Hier werden die Unterschiede zwischen zwei Versionen angezeigt.
Beide Seiten der vorigen Revision Vorhergehende Überarbeitung | |||
hpc:tutorials:sbatch_examples [2024/06/13 18:16] – gelöscht - Externe Bearbeitung (Unbekanntes Datum) 127.0.0.1 | hpc:tutorials:sbatch_examples [2024/06/13 18:17] (aktuell) – ↷ Seite von hpc:tutorials:scheduling:sbatch_examples nach hpc:tutorials:sbatch_examples verschoben | ||
---|---|---|---|
Zeile 1: | Zeile 1: | ||
+ | ==== Example sbatch Scripts ==== | ||
+ | Copy and paste a script when you are using Windows will cause problems with line endings. | ||
+ | You can found more info [[https:// | ||
+ | |||
+ | ==== MPI-Job ==== | ||
+ | < | ||
+ | #!/bin/bash | ||
+ | |||
+ | #SBATCH -o / | ||
+ | #SBATCH -D / | ||
+ | #SBATCH -J Hello-World_MPI # | ||
+ | #SBATCH --nodes=2 # Anzahl Knoten N | ||
+ | #SBATCH --ntasks-per-node=20 # Prozesse n pro Knoten | ||
+ | #SBATCH --ntasks-per-core=1 # | ||
+ | #SBATCH --mem=500M | ||
+ | |||
+ | ##Max Walltime vorgeben: | ||
+ | #SBATCH --time=72: | ||
+ | |||
+ | #Auf Standard-Knoten rechnen: | ||
+ | #SBATCH --partition=standard | ||
+ | |||
+ | #Job-Status per Mail: | ||
+ | #SBATCH --mail-type=ALL | ||
+ | #SBATCH --mail-user=vorname.nachname@tu-berlin.de | ||
+ | |||
+ | module load $MODULES | ||
+ | |||
+ | mpirun $myApplication | ||
+ | </ | ||
+ | |||
+ | ==== Single-Node-Job ==== | ||
+ | < | ||
+ | #!/bin/bash | ||
+ | |||
+ | #SBATCH -o / | ||
+ | #SBATCH -D / | ||
+ | #SBATCH -J Hello-World # Job Name | ||
+ | #SBATCH --ntasks=1 # Anzahl Prozesse (CPU-Cores) | ||
+ | #SBATCH --mem=500M | ||
+ | |||
+ | ##Max Walltime vorgeben: | ||
+ | #SBATCH --time=72: | ||
+ | |||
+ | #Auf Standard-Knoten rechnen: | ||
+ | #SBATCH --partition=standard | ||
+ | |||
+ | #Job-Status per Mail: | ||
+ | #SBATCH --mail-type=ALL | ||
+ | #SBATCH --mail-user=vorname.nachname@tu-berlin.de | ||
+ | |||
+ | # benötigte SW / Bibliotheken laden | ||
+ | module load $MODULES | ||
+ | |||
+ | echo " | ||
+ | $myApplication | ||
+ | </ | ||
+ | |||
+ | ==== GPU-Job ==== | ||
+ | < | ||
+ | #!/bin/bash | ||
+ | |||
+ | #SBATCH -o / | ||
+ | #SBATCH -D / | ||
+ | #SBATCH -J Hello-World_GPU # | ||
+ | #SBATCH --ntasks=2 # Anzahl Prozesse P (CPU-Cores) | ||
+ | #SBATCH --cpus-per-task=1 # | ||
+ | #SBATCH --gres=gpu: | ||
+ | #SBATCH --mem=500G | ||
+ | |||
+ | ##Max Walltime vorgeben: | ||
+ | #SBATCH --time=72: | ||
+ | |||
+ | #Auf GPU-Knoten rechnen: | ||
+ | #SBATCH --partition=gpu | ||
+ | |||
+ | #Job-Status per Mail: | ||
+ | #SBATCH --mail-type=ALL | ||
+ | #SBATCH --mail-user=vorname.nachname@tu-berlin.de | ||
+ | |||
+ | # benötigte SW / Bibliotheken laden (CUDA, etc.) | ||
+ | module load $MODULES | ||
+ | |||
+ | $myCUDA_Application | ||
+ | </ | ||
+ | |||
+ | ==== openMP-Job ==== | ||
+ | < | ||
+ | #!/bin/bash | ||
+ | |||
+ | #SBATCH -o / | ||
+ | #SBATCH -D / | ||
+ | #SBATCH -J Hello-World_OpenMP # | ||
+ | #SBATCH --nodes=1 # Anzahl Knoten N | ||
+ | #SBATCH --ntasks=1 # Anzahl Prozesse P | ||
+ | #SBATCH --cpus-per-task=4 # Anzahl CPU-Cores pro Prozess P | ||
+ | #SBATCH --mem=500M | ||
+ | |||
+ | ##Max Walltime vorgeben: | ||
+ | #SBATCH --time=72: | ||
+ | |||
+ | #Auf Standard-Knoten rechnen: | ||
+ | #SBATCH --partition=standard | ||
+ | |||
+ | #Job-Status per Mail: | ||
+ | #SBATCH --mail-type=ALL | ||
+ | #SBATCH --mail-user=vorname.nachname@tu-berlin.de | ||
+ | |||
+ | module load $MODULES | ||
+ | |||
+ | # z. B. 1 Prozess mit 4 OpenMP-Threads | ||
+ | export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK | ||
+ | |||
+ | $myApplication | ||
+ | |||
+ | </ |