diff --git a/conf/vsc_kul_uhasselt.config b/conf/vsc_kul_uhasselt.config index 5c841cc5..91d629f3 100644 --- a/conf/vsc_kul_uhasselt.config +++ b/conf/vsc_kul_uhasselt.config @@ -115,9 +115,10 @@ def determineGeniusGpuQueue = { task -> return task.time >= TIME_THRESHOLD ? 'gpu_v100_long' : 'gpu_v100' } if (task.time >= TIME_THRESHOLD) { - return AVAILABLE_QUEUES.contains('dedicated_rega_gpu') ? 'dedicated_rega_gpu' : 'gpu_p100_long,amd_long' + return AVAILABLE_QUEUES.contains('dedicated_rega_gpu') ? 'dedicated_rega_gpu' : + AVAILABLE_QUEUES.contains('amd') ? 'amd_long' : 'gpu_p100_long' } - return 'gpu_p100,amd' + return AVAILABLE_QUEUES.contains('amd') ? 'amd' : 'gpu_p100' } /* @@ -183,7 +184,7 @@ profiles { process { // 768 - 65 so 65GB for overhead, max is 720000MB resourceLimits = [ memory: 703.GB, cpus: 36, time: 168.h ] - beforeScript = 'module load cluster/genius' + beforeScript = { 'module load cluster/genius/' + determineGeniusQueue(task).toString().split(',')[0] } queue = { determineGeniusQueue(task) } clusterOptions = { determineGeniusQueue(task) =~ /dedicated/ ? @@ -193,6 +194,7 @@ profiles { withLabel: '.*gpu.*'{ resourceLimits = [ memory: 703.GB, cpus: 36 , time: 168.h ] + beforeScript = { 'module load cluster/genius/' + determineGeniusGpuQueue(task).toString().split(',')[0] } apptainer.runOptions = '--containall --cleanenv --nv' singularity.runOptions = '--containall --cleanenv --nv' queue = { determineGeniusGpuQueue(task) } @@ -212,8 +214,8 @@ profiles { process { // 768 - 65 so 65GB for overhead, max is 720000MB resourceLimits = [ memory: 703.GB, cpus: 36, time: 168.h] - beforeScript = 'module load cluster/genius' - queue = { determineGeniusGpuQueue(task) } + beforeScript = { 'module load cluster/genius/' + determineGeniusGpuQueue(task).toString().split(',')[0] } + queue = { determineGeniusGpuQueue(task) } clusterOptions = { def gpus = task.accelerator?.request ?: Math.max(1, Math.floor((task.cpus ?:1)/9) as int) "--gres=gpu:${gpus} --clusters=genius --account=$TIER2_PROJECT" @@ -227,7 +229,7 @@ profiles { process { // max is 2016000 resourceLimits = [ memory: 1968.GB, cpus: 72, time: 168.h ] - beforeScript = 'module load cluster/wice' + beforeScript = { 'module load cluster/wice/' + determineWiceQueue(task).toString().split(',')[0] } queue = { determineWiceQueue(task) } clusterOptions = { determineWiceQueue(task) =~ /dedicated/ ? @@ -239,6 +241,7 @@ profiles { resourceLimits = [ memory: 703.GB, cpus: 64, time: 168.h ] apptainer.runOptions = '--containall --cleanenv --nv' singularity.runOptions = '--containall --cleanenv --nv' + beforeScript = { 'module load cluster/wice/' + determineWiceGpuQueue(task).toString().split(',')[0] } queue = { determineWiceGpuQueue(task) } clusterOptions = { def gpus = task.accelerator?.request ?: Math.max(1, Math.floor((task.cpus ?:1)/16) as int) @@ -258,10 +261,10 @@ profiles { process { // 768 - 65 so 65GB for overhead, max is 720000MB - beforeScript = 'module load cluster/wice' - resourceLimits = [ memory: 703.GB, cpus: 64, time: 168.h ] - queue = { determineWiceGpuQueue(task) } - clusterOptions = { + beforeScript = { 'module load cluster/wice/' + determineWiceGpuQueue(task).toString().split(',')[0] } + resourceLimits = [ memory: 703.GB, cpus: 64, time: 168.h ] + queue = { determineWiceGpuQueue(task) } + clusterOptions = { def gpus = task.accelerator?.request ?: Math.max(1, Math.floor((task.cpus ?:1)/16) as int) def queueValue = determineWiceGpuQueue(task) queueValue =~ /dedicated_big_gpu_h100/ ? "--clusters=wice --account=lp_big_wice_gpu_h100 --gres=gpu:${gpus}" : diff --git a/docs/vsc_kul_uhasselt.md b/docs/vsc_kul_uhasselt.md index 7e1af924..fd9c676d 100644 --- a/docs/vsc_kul_uhasselt.md +++ b/docs/vsc_kul_uhasselt.md @@ -15,7 +15,7 @@ A nextflow module is available that can be loaded `module load Nextflow` but it 2. Set up the environment variables in `~/.bashrc` or `~/.bash_profile`: :::note -If you have access to dedicated nodes, you can export these as a command separated list. These queues will only be used if specified task requirements are not available in the normal partitions but they are available in dedicated partitions. +If you have access to dedicated nodes, you can export these as a command separated list. These queues will only be used if specified task requirements are not available in the normal partitions but they are available in dedicated partitions. AMD is considered a dedicated partition. ::: ```bash