nf-core/configs: Alliance Canada Configuration

Configuration for running nextflow on the clusters of the Digital Research Alliance of Canada. Invoke by specifying -profile alliance_canada.

You also need to supply the name of the group under which you are working or whose resource allocation you want to use by running export SLURM_ACCOUNT=<def-user> before you run any nf-core pipeline. If you run nf-core frequently and always use the same resource allocation, you may find it more convenient to add the SLURM_ACCOUNT environment variable to your ~/.bashrc file.

For detailed information on running nf-core pipelines on Alliance clusters, please visit the documentation: https://docs.alliancecan.ca/wiki/Nextflow

If you run into issues, please contact Alliance Support: https://docs.alliancecan.ca/wiki/Technical_support

Config file

See config file on GitHub

conf/alliance_canada
params {
    config_profile_contact      = 'Cian Monnin (github/CMonnin)'
    config_profile_url          = 'https://docs.alliancecan.ca/wiki/Nextflow'
 
    // default to narval settings if no cluster_name defined
    cluster_name = System.getenv('CC_CLUSTER') ?: 'narval'
 
    config_profile_description  = params.cluster_name == 'narval' ? 'Alliance Canada (Narval) cluster profile provided by nf-core/configs.' :
        params.cluster_name == 'fir' ? 'Alliance Canada (Fir) cluster profile provided by nf-core/configs.' :
        params.cluster_name == 'nibi' ? 'Alliance Canada (Nibi) cluster profile provided by nf-core/configs.' :
        params.cluster_name == 'rorqual' ? 'Alliance Canada (Rorqual) cluster profile provided by nf-core/configs.' :
        params.cluster_name == 'trillium' ? 'Alliance Canada (Trillium) cluster profile provided by nf-core/configs.' :
        'Alliance Canada HPC config'
 
    max_cpus                    = params.cluster_name == 'nibi' ? 192 :
        params.cluster_name == 'rorqual' ? 192 :
        params.cluster_name == 'fir' ? 192 :
        params.cluster_name == 'trillium' ? null :
        64
    max_memory                  =
        params.cluster_name == 'narval' ? 249.GB :
        params.cluster_name == 'nibi' ? 750.GB :
        params.cluster_name == 'rorqual' ? 750.GB :
        params.cluster_name == 'fir' ? 750.GB :
        params.cluster_name == 'trillium' ? null :
        240.GB
    max_time                    = 168.h
}
 
cleanup = true
 
singularity {
    enabled     = true
    autoMounts  = true
}
 
apptainer {
    autoMounts = true
}
 
// Group name for resource allocation must be supplied as environment variable
process {
    executor        = 'slurm'
    clusterOptions  = params.cluster_name == 'trillium' ? "--account=${System.getenv('SLURM_ACCOUNT')} --nodes=1": "--account=${System.getenv('SLURM_ACCOUNT')}"
    maxRetries      = 1
    errorStrategy   = { task.exitStatus in [125,139] ? 'retry' : 'finish' }
    cpus            = 1
    time            = '1h'
 
// NOTE:
// these resourceLimits are set to baseline CPU for each cluser
// Currently missing are configs for GPUs
 
    resourceLimits = [
        cpus: params.cluster_name == 'nibi'? 192 :
            params.cluster_name == 'rorqual' ? 192 :
            params.cluster_name == 'fir' ? 192 :
            params.cluster_name == 'trillium' ? null:
            64,
        memory: params.cluster_name == 'narval' ? 249.GB :
            params.cluster_name == 'nibi' ? 750.GB :
            params.cluster_name == 'rorqual' ? 750.GB :
            params.cluster_name == 'fir' ? 750.GB :
            params.cluster_name == 'trillium' ? null :
            240.GB,
        time: 168.h
    ]
 
}
 
executor {
    pollInterval    = '60 sec'
    submitRateLimit = '60/1min'
    queueSize       = params.cluster_name == 'trillium' ? 500 : 100
}