Skip to content

Commit

Permalink
include beforescript logic to accomodate new module load logic
Browse files Browse the repository at this point in the history
  • Loading branch information
Joon-Klaps committed Jan 9, 2025
1 parent 6d07e08 commit 1240bbb
Show file tree
Hide file tree
Showing 2 changed files with 14 additions and 11 deletions.
23 changes: 13 additions & 10 deletions conf/vsc_kul_uhasselt.config
Original file line number Diff line number Diff line change
Expand Up @@ -115,9 +115,10 @@ def determineGeniusGpuQueue = { task ->
return task.time >= TIME_THRESHOLD ? 'gpu_v100_long' : 'gpu_v100'
}
if (task.time >= TIME_THRESHOLD) {
return AVAILABLE_QUEUES.contains('dedicated_rega_gpu') ? 'dedicated_rega_gpu' : 'gpu_p100_long,amd_long'
return AVAILABLE_QUEUES.contains('dedicated_rega_gpu') ? 'dedicated_rega_gpu' :
AVAILABLE_QUEUES.contains('amd') ? 'amd_long' : 'gpu_p100_long'
}
return 'gpu_p100,amd'
return AVAILABLE_QUEUES.contains('amd') ? 'amd' : 'gpu_p100'
}

/*
Expand Down Expand Up @@ -183,7 +184,7 @@ profiles {
process {
// 768 - 65 so 65GB for overhead, max is 720000MB
resourceLimits = [ memory: 703.GB, cpus: 36, time: 168.h ]
beforeScript = 'module load cluster/genius'
beforeScript = { 'module load cluster/genius/' + determineGeniusQueue(task).toString().split(',')[0] }
queue = { determineGeniusQueue(task) }
clusterOptions = {
determineGeniusQueue(task) =~ /dedicated/ ?
Expand All @@ -193,6 +194,7 @@ profiles {

withLabel: '.*gpu.*'{
resourceLimits = [ memory: 703.GB, cpus: 36 , time: 168.h ]
beforeScript = { 'module load cluster/genius/' + determineGeniusGpuQueue(task).toString().split(',')[0] }
apptainer.runOptions = '--containall --cleanenv --nv'
singularity.runOptions = '--containall --cleanenv --nv'
queue = { determineGeniusGpuQueue(task) }
Expand All @@ -212,8 +214,8 @@ profiles {
process {
// 768 - 65 so 65GB for overhead, max is 720000MB
resourceLimits = [ memory: 703.GB, cpus: 36, time: 168.h]
beforeScript = 'module load cluster/genius'
queue = { determineGeniusGpuQueue(task) }
beforeScript = { 'module load cluster/genius/' + determineGeniusGpuQueue(task).toString().split(',')[0] }
queue = { determineGeniusGpuQueue(task) }
clusterOptions = {
def gpus = task.accelerator?.request ?: Math.max(1, Math.floor((task.cpus ?:1)/9) as int)
"--gres=gpu:${gpus} --clusters=genius --account=$TIER2_PROJECT"
Expand All @@ -227,7 +229,7 @@ profiles {
process {
// max is 2016000
resourceLimits = [ memory: 1968.GB, cpus: 72, time: 168.h ]
beforeScript = 'module load cluster/wice'
beforeScript = { 'module load cluster/wice/' + determineWiceQueue(task).toString().split(',')[0] }
queue = { determineWiceQueue(task) }
clusterOptions = {
determineWiceQueue(task) =~ /dedicated/ ?
Expand All @@ -239,6 +241,7 @@ profiles {
resourceLimits = [ memory: 703.GB, cpus: 64, time: 168.h ]
apptainer.runOptions = '--containall --cleanenv --nv'
singularity.runOptions = '--containall --cleanenv --nv'
beforeScript = { 'module load cluster/wice/' + determineWiceGpuQueue(task).toString().split(',')[0] }
queue = { determineWiceGpuQueue(task) }
clusterOptions = {
def gpus = task.accelerator?.request ?: Math.max(1, Math.floor((task.cpus ?:1)/16) as int)
Expand All @@ -258,10 +261,10 @@ profiles {

process {
// 768 - 65 so 65GB for overhead, max is 720000MB
beforeScript = 'module load cluster/wice'
resourceLimits = [ memory: 703.GB, cpus: 64, time: 168.h ]
queue = { determineWiceGpuQueue(task) }
clusterOptions = {
beforeScript = { 'module load cluster/wice/' + determineWiceGpuQueue(task).toString().split(',')[0] }
resourceLimits = [ memory: 703.GB, cpus: 64, time: 168.h ]
queue = { determineWiceGpuQueue(task) }
clusterOptions = {
def gpus = task.accelerator?.request ?: Math.max(1, Math.floor((task.cpus ?:1)/16) as int)
def queueValue = determineWiceGpuQueue(task)
queueValue =~ /dedicated_big_gpu_h100/ ? "--clusters=wice --account=lp_big_wice_gpu_h100 --gres=gpu:${gpus}" :
Expand Down
2 changes: 1 addition & 1 deletion docs/vsc_kul_uhasselt.md
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ A nextflow module is available that can be loaded `module load Nextflow` but it
2. Set up the environment variables in `~/.bashrc` or `~/.bash_profile`:

:::note
If you have access to dedicated nodes, you can export these as a command separated list. These queues will only be used if specified task requirements are not available in the normal partitions but they are available in dedicated partitions.
If you have access to dedicated nodes, you can export these as a command separated list. These queues will only be used if specified task requirements are not available in the normal partitions but they are available in dedicated partitions. AMD is considered a dedicated partition.
:::

```bash
Expand Down

0 comments on commit 1240bbb

Please sign in to comment.