-
Notifications
You must be signed in to change notification settings - Fork 0
/
slurm_engine.sbatch
executable file
·38 lines (35 loc) · 1.18 KB
/
slurm_engine.sbatch
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
#! /bin/bash -l
#
# Standard output and error: [filename pattern]
#SBATCH -o ./logs/current.ipengine.log
#SBATCH -e ./logs/current.ipengine-err.log
#
# Initial working directory:
#SBATCH -D ./
#
# Job Name:
#SBATCH -J active_eng
#
# Queue (Partition):
#SBATCH --partition=gpu
#
# Process management (number of parallel executions is specified using the --array option):
# * possible formats: `--array=0-9`, `--array=1,3,5,7`, `--array=1-7:2`
# * reduce maximum number of simultaneously running tasks using a "%" separator (e.g. `--array=0-9%4`)
# * to start only one instance, use --array=0 or (better) leave the --array option away completely
##SBATCH --array=0
#SBATCH --gres=gpu:1 # specify number of GPUs
##SBATCH --nodes=10
##SBATCH --ntasks-per-node=1 # or use directly ntasks
#SBATCH --ntasks=10
#SBATCH --cpus-per-task=10 # specify number of CPU cores (as a rule of thumb, 4 per GPU)
#
# Memory requirement (default is 64GB):
#SBATCH --mem=32GB # 64
#
# Wall clock limit:
#SBATCH --time=120:10:00
source load_conda
module load intel/21.2.0
module load impi/2021.2
srun ipengine --profile-dir=/zeropoint/u/lsarra/.ipython/profile_parallel_jpt --cluster-id=jupyter