12 |
13 |

14 |
15 |
nf-core/coproid ${version}
16 |
Run Name: $runName
17 |
18 | <% if (!success){
19 | out << """
20 |
21 |
nf-core/coproid execution completed unsuccessfully!
22 |
The exit status of the task that caused the workflow execution to fail was: $exitStatus.
23 |
The full error message was:
24 |
${errorReport}
25 |
26 | """
27 | } else {
28 | out << """
29 |
30 | nf-core/coproid execution completed successfully!
31 |
32 | """
33 | }
34 | %>
35 |
36 |
The workflow was completed at $dateComplete (duration: $duration)
37 |
The command used to launch the workflow was as follows:
38 |
$commandLine
39 |
40 |
Pipeline Configuration:
41 |
42 |
43 | <% out << summary.collect{ k,v -> "| $k | $v |
" }.join("\n") %>
44 |
45 |
46 |
47 |
nf-core/coproid
48 |
https://github.com/nf-core/coproid
49 |
50 |
51 |
52 |
53 |
54 |
--------------------------------------------------------------------------------
/modules/local/sam2lca/updatedb/meta.yml:
--------------------------------------------------------------------------------
1 | name: "sam2lca_updatedb"
2 | description: Build sam2lca database for calling lowest common ancestors from multi-mapped reads in SAM/BAM/CRAM
3 | files
4 | keywords:
5 | - LCA
6 | - alignment
7 | - bam
8 | - metagenomics
9 | - Ancestor
10 | - multimapper
11 | - build
12 | - database
13 | tools:
14 | - "sam2lca":
15 | description: "Lowest Common Ancestor on SAM/BAM/CRAM alignment files"
16 | homepage: "https://github.com/maxibor/sam2lca"
17 | documentation: "https://sam2lca.readthedocs.io"
18 | doi: "10.21105/joss.04360"
19 | licence: ["GPL v3"]
20 | identifier: ""
21 |
22 | input:
23 | - - acc2tax_name:
24 | type: string
25 | description: Name of accession2taxid type to use
26 | - - taxo_db_name:
27 | type: string
28 | description: Name of taxonomy dabase type to use
29 | - - taxo_nodes:
30 | type: file
31 | description: "NCBI taxonomy nodes file"
32 | pattern: "*.dmp"
33 | ontologies:
34 | - edam: http://edamontology.org/format_2330
35 | - - taxo_names:
36 | type: file
37 | description: NCBI taxonomy names file
38 | pattern: "*.dmp"
39 | ontologies:
40 | - edam: http://edamontology.org/format_2330
41 | - - taxo_merged:
42 | type: file
43 | description: NCBI taxonomy merged file
44 | pattern: "*.dmp"
45 | ontologies:
46 | - edam: http://edamontology.org/format_2330
47 | - - acc2tax_json:
48 | type: file
49 | description: JSON file listing accession2taxid mapping files. Only required if using a custom database
50 | pattern: "*.json"
51 | ontologies:
52 | - edam: "http://edamontology.org/format_3464"
53 | - - acc2tax:
54 | type: string
55 | description: accession2taxid mapping file compressed with gzip. Only required if using a custom database
56 | pattern: "*.gz"
57 | ontologies:
58 | - edam: http://edamontology.org/format_3989
59 | - - acc2tax_md5:
60 | type: file
61 | description: MD5 checksum of the accession2taxid mapping file. Only required if using a custom database
62 | pattern: "*.md5"
63 | ontologies:
64 | - edam: http://edamontology.org/format_2330
65 |
66 | output:
67 | - sam2lca_db:
68 | - sam2lca_db:
69 | type: directory
70 | description: "sam2lca database"
71 | - versions:
72 | - "versions.yml":
73 | type: file
74 | description: File containing software versions
75 | pattern: "versions.yml"
76 |
77 | authors:
78 | - "@maxibor"
79 | maintainers:
80 | - "@maxibor"
81 |
--------------------------------------------------------------------------------
/modules/nf-core/multiqc/meta.yml:
--------------------------------------------------------------------------------
1 | name: multiqc
2 | description: Aggregate results from bioinformatics analyses across many samples into
3 | a single report
4 | keywords:
5 | - QC
6 | - bioinformatics tools
7 | - Beautiful stand-alone HTML report
8 | tools:
9 | - multiqc:
10 | description: |
11 | MultiQC searches a given directory for analysis logs and compiles a HTML report.
12 | It's a general use tool, perfect for summarising the output from numerous bioinformatics tools.
13 | homepage: https://multiqc.info/
14 | documentation: https://multiqc.info/docs/
15 | licence: ["GPL-3.0-or-later"]
16 | identifier: biotools:multiqc
17 | input:
18 | - - multiqc_files:
19 | type: file
20 | description: |
21 | List of reports / files recognised by MultiQC, for example the html and zip output of FastQC
22 | - - multiqc_config:
23 | type: file
24 | description: Optional config yml for MultiQC
25 | pattern: "*.{yml,yaml}"
26 | - - extra_multiqc_config:
27 | type: file
28 | description: Second optional config yml for MultiQC. Will override common sections
29 | in multiqc_config.
30 | pattern: "*.{yml,yaml}"
31 | - - multiqc_logo:
32 | type: file
33 | description: Optional logo file for MultiQC
34 | pattern: "*.{png}"
35 | - - replace_names:
36 | type: file
37 | description: |
38 | Optional two-column sample renaming file. First column a set of
39 | patterns, second column a set of corresponding replacements. Passed via
40 | MultiQC's `--replace-names` option.
41 | pattern: "*.{tsv}"
42 | - - sample_names:
43 | type: file
44 | description: |
45 | Optional TSV file with headers, passed to the MultiQC --sample_names
46 | argument.
47 | pattern: "*.{tsv}"
48 | output:
49 | - report:
50 | - "*multiqc_report.html":
51 | type: file
52 | description: MultiQC report file
53 | pattern: "multiqc_report.html"
54 | - data:
55 | - "*_data":
56 | type: directory
57 | description: MultiQC data dir
58 | pattern: "multiqc_data"
59 | - plots:
60 | - "*_plots":
61 | type: file
62 | description: Plots created by MultiQC
63 | pattern: "*_data"
64 | - versions:
65 | - versions.yml:
66 | type: file
67 | description: File containing software versions
68 | pattern: "versions.yml"
69 | authors:
70 | - "@abhi18av"
71 | - "@bunop"
72 | - "@drpatelh"
73 | - "@jfy133"
74 | maintainers:
75 | - "@abhi18av"
76 | - "@bunop"
77 | - "@drpatelh"
78 | - "@jfy133"
79 |
--------------------------------------------------------------------------------
/modules/nf-core/fastqc/main.nf:
--------------------------------------------------------------------------------
1 | process FASTQC {
2 | tag "${meta.id}"
3 | label 'process_medium'
4 |
5 | conda "${moduleDir}/environment.yml"
6 | container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
7 | 'https://depot.galaxyproject.org/singularity/fastqc:0.12.1--hdfd78af_0' :
8 | 'biocontainers/fastqc:0.12.1--hdfd78af_0' }"
9 |
10 | input:
11 | tuple val(meta), path(reads)
12 |
13 | output:
14 | tuple val(meta), path("*.html"), emit: html
15 | tuple val(meta), path("*.zip") , emit: zip
16 | path "versions.yml" , emit: versions
17 |
18 | when:
19 | task.ext.when == null || task.ext.when
20 |
21 | script:
22 | def args = task.ext.args ?: ''
23 | def prefix = task.ext.prefix ?: "${meta.id}"
24 | // Make list of old name and new name pairs to use for renaming in the bash while loop
25 | def old_new_pairs = reads instanceof Path || reads.size() == 1 ? [[ reads, "${prefix}.${reads.extension}" ]] : reads.withIndex().collect { entry, index -> [ entry, "${prefix}_${index + 1}.${entry.extension}" ] }
26 | def rename_to = old_new_pairs*.join(' ').join(' ')
27 | def renamed_files = old_new_pairs.collect{ _old_name, new_name -> new_name }.join(' ')
28 |
29 | // The total amount of allocated RAM by FastQC is equal to the number of threads defined (--threads) time the amount of RAM defined (--memory)
30 | // https://github.com/s-andrews/FastQC/blob/1faeea0412093224d7f6a07f777fad60a5650795/fastqc#L211-L222
31 | // Dividing the task.memory by task.cpu allows to stick to requested amount of RAM in the label
32 | def memory_in_mb = task.memory ? task.memory.toUnit('MB').toFloat() / task.cpus : null
33 | // FastQC memory value allowed range (100 - 10000)
34 | def fastqc_memory = memory_in_mb > 10000 ? 10000 : (memory_in_mb < 100 ? 100 : memory_in_mb)
35 |
36 | """
37 | printf "%s %s\\n" ${rename_to} | while read old_name new_name; do
38 | [ -f "\${new_name}" ] || ln -s \$old_name \$new_name
39 | done
40 |
41 | fastqc \\
42 | ${args} \\
43 | --threads ${task.cpus} \\
44 | --memory ${fastqc_memory} \\
45 | ${renamed_files}
46 |
47 | cat <<-END_VERSIONS > versions.yml
48 | "${task.process}":
49 | fastqc: \$( fastqc --version | sed '/FastQC v/!d; s/.*v//' )
50 | END_VERSIONS
51 | """
52 |
53 | stub:
54 | def prefix = task.ext.prefix ?: "${meta.id}"
55 | """
56 | touch ${prefix}.html
57 | touch ${prefix}.zip
58 |
59 | cat <<-END_VERSIONS > versions.yml
60 | "${task.process}":
61 | fastqc: \$( fastqc --version | sed '/FastQC v/!d; s/.*v//' )
62 | END_VERSIONS
63 | """
64 | }
65 |
--------------------------------------------------------------------------------
/modules/nf-core/sourcepredict/meta.yml:
--------------------------------------------------------------------------------
1 | name: "sourcepredict"
2 | description: Classifies and predicts the origin of metagenomic samples
3 | keywords:
4 | - source tracking
5 | - metagenomics
6 | - machine learning
7 | tools:
8 | - "sourcepredict":
9 | description: "Classification and prediction of the origin of metagenomic samples."
10 | homepage: "https://github.com/maxibor/sourcepredict"
11 | documentation: "https://sourcepredict.readthedocs.io/en/latest/index.html"
12 | tool_dev_url: "https://github.com/maxibor/sourcepredict"
13 | doi: "10.21105/joss.01540"
14 | licence: ["GPL v3-or-later"]
15 | identifier: biotools:sourcepredict
16 |
17 | input:
18 | - - meta:
19 | type: map
20 | description: |
21 | Groovy Map containing sample information
22 | e.g. `[ id:'sample1' ]`
23 | - kraken_parse:
24 | type: file
25 | description: Sink TAXID count table in csv format
26 | pattern: "*.csv"
27 |
28 | - - sources:
29 | type: file
30 | description: Sources TAXID count table in csv format. Default can be downloaded from
31 | https://raw.githubusercontent.com/maxibor/sourcepredict/master/data/modern_gut_microbiomes_sources.csv
32 | pattern: "*.csv"
33 |
34 | - - labels:
35 | type: file
36 | description: Labels for the sources table in csv format. Default can be downloaded from
37 | https://raw.githubusercontent.com/maxibor/sourcepredict/master/data/modern_gut_microbiomes_labels.csv
38 | pattern: "*.csv"
39 |
40 | - - taxa_sqlite:
41 | type: file
42 | description: taxa.sqlite file downloaded with ete3 toolkit
43 | pattern: "taxa.sqlite"
44 |
45 | - - taxa_sqlite_traverse_pkl:
46 | type: file
47 | description: taxa.sqlite.traverse.pkl file downloaded with ete3 toolkit
48 | pattern: "taxa.sqlite.traverse.pkl"
49 |
50 | - - save_embedding:
51 | type: string
52 | description: |
53 | If true, an optional command is added to save a file reporting the embedding file
54 |
55 | output:
56 | - report:
57 | - meta:
58 | type: map
59 | description: |
60 | Groovy Map containing sample information
61 | e.g. `[ id:'sample1', single_end:false ]`
62 | - "*.sourcepredict.csv":
63 | type: file
64 | description: Table containing the predicted proportion of each source in each sample
65 | pattern: "*.sourcepredict.csv"
66 |
67 | - versions:
68 | - "versions.yml":
69 | type: file
70 | description: File containing software versions
71 | pattern: "versions.yml"
72 |
73 | authors:
74 | - "@MeriamOs"
75 | maintainers:
76 | - "@MeriamOs"
77 |
--------------------------------------------------------------------------------
/modules/nf-core/sam2lca/analyze/tests/main.nf.test.snap:
--------------------------------------------------------------------------------
1 | {
2 | "test-sam2lca-analyze": {
3 | "content": [
4 | "test.sam2lca.csv",
5 | [
6 | [
7 | {
8 | "id": "test",
9 | "single_end": false
10 | },
11 | "test.sam2lca.json:md5,a49d71ab3aa4d06e3a3094409f71eff0"
12 | ]
13 | ],
14 | [
15 |
16 | ],
17 | [
18 | "versions.yml:md5,b6ea36b43b69f73dfa5d2fc7a1ddb1e2"
19 | ]
20 | ],
21 | "meta": {
22 | "nf-test": "0.8.4",
23 | "nextflow": "24.04.4"
24 | },
25 | "timestamp": "2024-08-27T16:10:41.635602"
26 | },
27 | "test-sam2lca-analyze-stub": {
28 | "content": [
29 | {
30 | "0": [
31 | [
32 | {
33 | "id": "test",
34 | "single_end": false
35 | },
36 | "test.csv:md5,d41d8cd98f00b204e9800998ecf8427e"
37 | ]
38 | ],
39 | "1": [
40 | [
41 | {
42 | "id": "test",
43 | "single_end": false
44 | },
45 | "test.json:md5,d41d8cd98f00b204e9800998ecf8427e"
46 | ]
47 | ],
48 | "2": [
49 |
50 | ],
51 | "3": [
52 | "versions.yml:md5,b6ea36b43b69f73dfa5d2fc7a1ddb1e2"
53 | ],
54 | "bam": [
55 |
56 | ],
57 | "csv": [
58 | [
59 | {
60 | "id": "test",
61 | "single_end": false
62 | },
63 | "test.csv:md5,d41d8cd98f00b204e9800998ecf8427e"
64 | ]
65 | ],
66 | "json": [
67 | [
68 | {
69 | "id": "test",
70 | "single_end": false
71 | },
72 | "test.json:md5,d41d8cd98f00b204e9800998ecf8427e"
73 | ]
74 | ],
75 | "versions": [
76 | "versions.yml:md5,b6ea36b43b69f73dfa5d2fc7a1ddb1e2"
77 | ]
78 | }
79 | ],
80 | "meta": {
81 | "nf-test": "0.8.4",
82 | "nextflow": "24.04.4"
83 | },
84 | "timestamp": "2024-08-27T16:10:47.050282"
85 | }
86 | }
--------------------------------------------------------------------------------
/modules/nf-core/samtools/sort/meta.yml:
--------------------------------------------------------------------------------
1 | name: samtools_sort
2 | description: Sort SAM/BAM/CRAM file
3 | keywords:
4 | - sort
5 | - bam
6 | - sam
7 | - cram
8 | tools:
9 | - samtools:
10 | description: |
11 | SAMtools is a set of utilities for interacting with and post-processing
12 | short DNA sequence read alignments in the SAM, BAM and CRAM formats, written by Heng Li.
13 | These files are generated as output by short read aligners like BWA.
14 | homepage: http://www.htslib.org/
15 | documentation: http://www.htslib.org/doc/samtools.html
16 | doi: 10.1093/bioinformatics/btp352
17 | licence: ["MIT"]
18 | identifier: biotools:samtools
19 | input:
20 | - - meta:
21 | type: map
22 | description: |
23 | Groovy Map containing sample information
24 | e.g. [ id:'test', single_end:false ]
25 | - bam:
26 | type: file
27 | description: BAM/CRAM/SAM file(s)
28 | pattern: "*.{bam,cram,sam}"
29 | - - meta2:
30 | type: map
31 | description: |
32 | Groovy Map containing reference information
33 | e.g. [ id:'genome' ]
34 | - fasta:
35 | type: file
36 | description: Reference genome FASTA file
37 | pattern: "*.{fa,fasta,fna}"
38 | optional: true
39 | output:
40 | - bam:
41 | - meta:
42 | type: map
43 | description: |
44 | Groovy Map containing sample information
45 | e.g. [ id:'test', single_end:false ]
46 | - "*.bam":
47 | type: file
48 | description: Sorted BAM file
49 | pattern: "*.{bam}"
50 | - cram:
51 | - meta:
52 | type: map
53 | description: |
54 | Groovy Map containing sample information
55 | e.g. [ id:'test', single_end:false ]
56 | - "*.cram":
57 | type: file
58 | description: Sorted CRAM file
59 | pattern: "*.{cram}"
60 | - crai:
61 | - meta:
62 | type: map
63 | description: |
64 | Groovy Map containing sample information
65 | e.g. [ id:'test', single_end:false ]
66 | - "*.crai":
67 | type: file
68 | description: CRAM index file (optional)
69 | pattern: "*.crai"
70 | - csi:
71 | - meta:
72 | type: map
73 | description: |
74 | Groovy Map containing sample information
75 | e.g. [ id:'test', single_end:false ]
76 | - "*.csi":
77 | type: file
78 | description: BAM index file (optional)
79 | pattern: "*.csi"
80 | - versions:
81 | - versions.yml:
82 | type: file
83 | description: File containing software versions
84 | pattern: "versions.yml"
85 | authors:
86 | - "@drpatelh"
87 | - "@ewels"
88 | - "@matthdsm"
89 | maintainers:
90 | - "@drpatelh"
91 | - "@ewels"
92 | - "@matthdsm"
93 |
--------------------------------------------------------------------------------
/.github/workflows/awsfulltest.yml:
--------------------------------------------------------------------------------
1 | name: nf-core AWS full size tests
2 | # This workflow is triggered on PRs opened against the main/master branch.
3 | # It can be additionally triggered manually with GitHub actions workflow dispatch button.
4 | # It runs the -profile 'test_full' on AWS batch
5 |
6 | on:
7 | pull_request:
8 | branches:
9 | - main
10 | - master
11 | workflow_dispatch:
12 | pull_request_review:
13 | types: [submitted]
14 |
15 | jobs:
16 | run-platform:
17 | name: Run AWS full tests
18 | # run only if the PR is approved by at least 2 reviewers and against the master branch or manually triggered
19 | if: github.repository == 'nf-core/coproid' && github.event.review.state == 'approved' && github.event.pull_request.base.ref == 'master' || github.event_name == 'workflow_dispatch'
20 | runs-on: ubuntu-latest
21 | steps:
22 | - name: Get PR reviews
23 | uses: octokit/request-action@v2.x
24 | if: github.event_name != 'workflow_dispatch'
25 | id: check_approvals
26 | continue-on-error: true
27 | with:
28 | route: GET /repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}/reviews?per_page=100
29 | env:
30 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
31 |
32 | - name: Check for approvals
33 | if: ${{ failure() && github.event_name != 'workflow_dispatch' }}
34 | run: |
35 | echo "No review approvals found. At least 2 approvals are required to run this action automatically."
36 | exit 1
37 |
38 | - name: Check for enough approvals (>=2)
39 | id: test_variables
40 | if: github.event_name != 'workflow_dispatch'
41 | run: |
42 | JSON_RESPONSE='${{ steps.check_approvals.outputs.data }}'
43 | CURRENT_APPROVALS_COUNT=$(echo $JSON_RESPONSE | jq -c '[.[] | select(.state | contains("APPROVED")) ] | length')
44 | test $CURRENT_APPROVALS_COUNT -ge 2 || exit 1 # At least 2 approvals are required
45 |
46 | - name: Launch workflow via Seqera Platform
47 | uses: seqeralabs/action-tower-launch@v2
48 | with:
49 | workspace_id: ${{ secrets.TOWER_WORKSPACE_ID }}
50 | access_token: ${{ secrets.TOWER_ACCESS_TOKEN }}
51 | compute_env: ${{ secrets.TOWER_COMPUTE_ENV }}
52 | revision: ${{ github.sha }}
53 | workdir: s3://${{ secrets.AWS_S3_BUCKET }}/work/coproid/work-${{ github.sha }}
54 | parameters: |
55 | {
56 | "hook_url": "${{ secrets.MEGATESTS_ALERTS_SLACK_HOOK_URL }}",
57 | "outdir": "s3://${{ secrets.AWS_S3_BUCKET }}/coproid/results-${{ github.sha }}"
58 | }
59 | profiles: test_full
60 |
61 | - uses: actions/upload-artifact@v4
62 | with:
63 | name: Seqera Platform debug log file
64 | path: |
65 | seqera_platform_action_*.log
66 | seqera_platform_action_*.json
67 |
--------------------------------------------------------------------------------
/assets/adaptivecard.json:
--------------------------------------------------------------------------------
1 | {
2 | "type": "message",
3 | "attachments": [
4 | {
5 | "contentType": "application/vnd.microsoft.card.adaptive",
6 | "contentUrl": null,
7 | "content": {
8 | "\$schema": "http://adaptivecards.io/schemas/adaptive-card.json",
9 | "msteams": {
10 | "width": "Full"
11 | },
12 | "type": "AdaptiveCard",
13 | "version": "1.2",
14 | "body": [
15 | {
16 | "type": "TextBlock",
17 | "size": "Large",
18 | "weight": "Bolder",
19 | "color": "<% if (success) { %>Good<% } else { %>Attention<%} %>",
20 | "text": "nf-core/coproid v${version} - ${runName}",
21 | "wrap": true
22 | },
23 | {
24 | "type": "TextBlock",
25 | "spacing": "None",
26 | "text": "Completed at ${dateComplete} (duration: ${duration})",
27 | "isSubtle": true,
28 | "wrap": true
29 | },
30 | {
31 | "type": "TextBlock",
32 | "text": "<% if (success) { %>Pipeline completed successfully!<% } else { %>Pipeline completed with errors. The full error message was: ${errorReport}.<% } %>",
33 | "wrap": true
34 | },
35 | {
36 | "type": "TextBlock",
37 | "text": "The command used to launch the workflow was as follows:",
38 | "wrap": true
39 | },
40 | {
41 | "type": "TextBlock",
42 | "text": "${commandLine}",
43 | "isSubtle": true,
44 | "wrap": true
45 | }
46 | ],
47 | "actions": [
48 | {
49 | "type": "Action.ShowCard",
50 | "title": "Pipeline Configuration",
51 | "card": {
52 | "type": "AdaptiveCard",
53 | "\$schema": "http://adaptivecards.io/schemas/adaptive-card.json",
54 | "body": [
55 | {
56 | "type": "FactSet",
57 | "facts": [<% out << summary.collect{ k,v -> "{\"title\": \"$k\", \"value\" : \"$v\"}"}.join(",\n") %>
58 | ]
59 | }
60 | ]
61 | }
62 | }
63 | ]
64 | }
65 | }
66 | ]
67 | }
68 |
--------------------------------------------------------------------------------
/modules/nf-core/untar/main.nf:
--------------------------------------------------------------------------------
1 | process UNTAR {
2 | tag "${archive}"
3 | label 'process_single'
4 |
5 | conda "${moduleDir}/environment.yml"
6 | container "${workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container
7 | ? 'https://community-cr-prod.seqera.io/docker/registry/v2/blobs/sha256/52/52ccce28d2ab928ab862e25aae26314d69c8e38bd41ca9431c67ef05221348aa/data'
8 | : 'community.wave.seqera.io/library/coreutils_grep_gzip_lbzip2_pruned:838ba80435a629f8'}"
9 |
10 | input:
11 | tuple val(meta), path(archive)
12 |
13 | output:
14 | tuple val(meta), path("${prefix}"), emit: untar
15 | path "versions.yml", emit: versions
16 |
17 | when:
18 | task.ext.when == null || task.ext.when
19 |
20 | script:
21 | def args = task.ext.args ?: ''
22 | def args2 = task.ext.args2 ?: ''
23 | prefix = task.ext.prefix ?: (meta.id ? "${meta.id}" : archive.baseName.toString().replaceFirst(/\.tar$/, ""))
24 |
25 | """
26 | mkdir ${prefix}
27 |
28 | ## Ensures --strip-components only applied when top level of tar contents is a directory
29 | ## If just files or multiple directories, place all in prefix
30 | if [[ \$(tar -taf ${archive} | grep -o -P "^.*?\\/" | uniq | wc -l) -eq 1 ]]; then
31 | tar \\
32 | -C ${prefix} --strip-components 1 \\
33 | -xavf \\
34 | ${args} \\
35 | ${archive} \\
36 | ${args2}
37 | else
38 | tar \\
39 | -C ${prefix} \\
40 | -xavf \\
41 | ${args} \\
42 | ${archive} \\
43 | ${args2}
44 | fi
45 |
46 | cat <<-END_VERSIONS > versions.yml
47 | "${task.process}":
48 | untar: \$(echo \$(tar --version 2>&1) | sed 's/^.*(GNU tar) //; s/ Copyright.*\$//')
49 | END_VERSIONS
50 | """
51 |
52 | stub:
53 | prefix = task.ext.prefix ?: (meta.id ? "${meta.id}" : archive.toString().replaceFirst(/\.[^\.]+(.gz)?$/, ""))
54 | """
55 | mkdir ${prefix}
56 | ## Dry-run untaring the archive to get the files and place all in prefix
57 | if [[ \$(tar -taf ${archive} | grep -o -P "^.*?\\/" | uniq | wc -l) -eq 1 ]]; then
58 | for i in `tar -tf ${archive}`;
59 | do
60 | if [[ \$(echo "\${i}" | grep -E "/\$") == "" ]];
61 | then
62 | touch \${i}
63 | else
64 | mkdir -p \${i}
65 | fi
66 | done
67 | else
68 | for i in `tar -tf ${archive}`;
69 | do
70 | if [[ \$(echo "\${i}" | grep -E "/\$") == "" ]];
71 | then
72 | touch ${prefix}/\${i}
73 | else
74 | mkdir -p ${prefix}/\${i}
75 | fi
76 | done
77 | fi
78 |
79 | cat <<-END_VERSIONS > versions.yml
80 | "${task.process}":
81 | untar: \$(echo \$(tar --version 2>&1) | sed 's/^.*(GNU tar) //; s/ Copyright.*\$//')
82 | END_VERSIONS
83 | """
84 | }
85 |
--------------------------------------------------------------------------------
/.github/workflows/linting.yml:
--------------------------------------------------------------------------------
1 | name: nf-core linting
2 | # This workflow is triggered on pushes and PRs to the repository.
3 | # It runs the `nf-core pipelines lint` and markdown lint tests to ensure
4 | # that the code meets the nf-core guidelines.
5 | on:
6 | push:
7 | branches:
8 | - dev
9 | pull_request:
10 | release:
11 | types: [published]
12 |
13 | jobs:
14 | pre-commit:
15 | runs-on: ubuntu-latest
16 | steps:
17 | - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
18 |
19 | - name: Set up Python 3.12
20 | uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b # v5
21 | with:
22 | python-version: "3.12"
23 |
24 | - name: Install pre-commit
25 | run: pip install pre-commit
26 |
27 | - name: Run pre-commit
28 | run: pre-commit run --all-files
29 |
30 | nf-core:
31 | runs-on: ubuntu-latest
32 | steps:
33 | - name: Check out pipeline code
34 | uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
35 |
36 | - name: Install Nextflow
37 | uses: nf-core/setup-nextflow@v2
38 |
39 | - uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b # v5
40 | with:
41 | python-version: "3.12"
42 | architecture: "x64"
43 |
44 | - name: read .nf-core.yml
45 | uses: pietrobolcato/action-read-yaml@1.1.0
46 | id: read_yml
47 | with:
48 | config: ${{ github.workspace }}/.nf-core.yml
49 |
50 | - name: Install dependencies
51 | run: |
52 | python -m pip install --upgrade pip
53 | pip install nf-core==${{ steps.read_yml.outputs['nf_core_version'] }}
54 |
55 | - name: Run nf-core pipelines lint
56 | if: ${{ github.base_ref != 'master' }}
57 | env:
58 | GITHUB_COMMENTS_URL: ${{ github.event.pull_request.comments_url }}
59 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
60 | GITHUB_PR_COMMIT: ${{ github.event.pull_request.head.sha }}
61 | run: nf-core -l lint_log.txt pipelines lint --dir ${GITHUB_WORKSPACE} --markdown lint_results.md
62 |
63 | - name: Run nf-core pipelines lint --release
64 | if: ${{ github.base_ref == 'master' }}
65 | env:
66 | GITHUB_COMMENTS_URL: ${{ github.event.pull_request.comments_url }}
67 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
68 | GITHUB_PR_COMMIT: ${{ github.event.pull_request.head.sha }}
69 | run: nf-core -l lint_log.txt pipelines lint --release --dir ${GITHUB_WORKSPACE} --markdown lint_results.md
70 |
71 | - name: Save PR number
72 | if: ${{ always() }}
73 | run: echo ${{ github.event.pull_request.number }} > PR_number.txt
74 |
75 | - name: Upload linting log file artifact
76 | if: ${{ always() }}
77 | uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4
78 | with:
79 | name: linting-logs
80 | path: |
81 | lint_log.txt
82 | lint_results.md
83 | PR_number.txt
84 |
--------------------------------------------------------------------------------
/.github/workflows/ci.yml:
--------------------------------------------------------------------------------
1 | name: nf-core CI
2 | # This workflow runs the pipeline with the minimal test dataset to check that it completes without any syntax errors
3 | on:
4 | push:
5 | branches:
6 | - dev
7 | pull_request:
8 | release:
9 | types: [published]
10 | workflow_dispatch:
11 |
12 | env:
13 | NXF_ANSI_LOG: false
14 | NXF_SINGULARITY_CACHEDIR: ${{ github.workspace }}/.singularity
15 | NXF_SINGULARITY_LIBRARYDIR: ${{ github.workspace }}/.singularity
16 |
17 | concurrency:
18 | group: "${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}"
19 | cancel-in-progress: true
20 |
21 | jobs:
22 | test:
23 | name: "Run pipeline with test data (${{ matrix.NXF_VER }} | ${{ matrix.test_name }} | ${{ matrix.profile }})"
24 | # Only run on push if this is the nf-core dev branch (merged PRs)
25 | if: "${{ github.event_name != 'push' || (github.event_name == 'push' && github.repository == 'nf-core/coproid') }}"
26 | runs-on: ubuntu-latest
27 | strategy:
28 | matrix:
29 | NXF_VER:
30 | - "24.10.0"
31 | profile:
32 | - "docker"
33 | - "singularity"
34 | test_name:
35 | - "test"
36 | isMaster:
37 | - ${{ github.base_ref == 'master' }}
38 | # Exclude conda and singularity on dev
39 | exclude:
40 | - isMaster: false
41 | profile: "conda"
42 | - isMaster: false
43 | profile: "singularity"
44 | steps:
45 | - name: Check out pipeline code
46 | uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
47 | with:
48 | fetch-depth: 0
49 |
50 | - name: Set up Nextflow
51 | uses: nf-core/setup-nextflow@v2
52 | with:
53 | version: "${{ matrix.NXF_VER }}"
54 |
55 | - name: Set up Apptainer
56 | if: matrix.profile == 'singularity'
57 | uses: eWaterCycle/setup-apptainer@main
58 |
59 | - name: Set up Singularity
60 | if: matrix.profile == 'singularity'
61 | run: |
62 | mkdir -p $NXF_SINGULARITY_CACHEDIR
63 | mkdir -p $NXF_SINGULARITY_LIBRARYDIR
64 |
65 | - name: Set up Miniconda
66 | if: matrix.profile == 'conda'
67 | uses: conda-incubator/setup-miniconda@a4260408e20b96e80095f42ff7f1a15b27dd94ca # v3
68 | with:
69 | miniconda-version: "latest"
70 | auto-update-conda: true
71 | conda-solver: libmamba
72 | channels: conda-forge,bioconda
73 |
74 | - name: Set up Conda
75 | if: matrix.profile == 'conda'
76 | run: |
77 | echo $(realpath $CONDA)/condabin >> $GITHUB_PATH
78 | echo $(realpath python) >> $GITHUB_PATH
79 |
80 | - name: Clean up Disk space
81 | uses: jlumbroso/free-disk-space@54081f138730dfa15788a46383842cd2f914a1be # v1.3.1
82 |
83 | - name: "Run pipeline with test data ${{ matrix.NXF_VER }} | ${{ matrix.test_name }} | ${{ matrix.profile }}"
84 | run: |
85 | nextflow run ${GITHUB_WORKSPACE} -profile ${{ matrix.test_name }},${{ matrix.profile }} --outdir ./results
86 |
--------------------------------------------------------------------------------
/assets/methods_description_template.yml:
--------------------------------------------------------------------------------
1 | id: "nf-core-coproid-methods-description"
2 | description: "Suggested text and references to use when describing pipeline usage within the methods section of a publication."
3 | section_name: "nf-core/coproid Methods Description"
4 | section_href: "https://github.com/nf-core/coproid"
5 | plot_type: "html"
6 | data: |
7 | Data was processed using nf-core/coproid v${workflow.manifest.version} ${doi_text} of the nf-core collection of workflows (Ewels et al., 2020), utilising reproducible software environments from the Bioconda (Grüning et al., 2018) and Biocontainers (da Veiga Leprevost et al., 2017) projects.
9 |