├── .devcontainer └── devcontainer.json ├── .editorconfig ├── .gitattributes ├── .github ├── .dockstore.yml ├── CONTRIBUTING.md ├── ISSUE_TEMPLATE │ ├── bug_report.yml │ ├── config.yml │ └── feature_request.yml ├── PULL_REQUEST_TEMPLATE.md └── workflows │ ├── awsfulltest.yml │ ├── awstest.yml │ ├── branch.yml │ ├── ci.yml │ ├── clean-up.yml │ ├── download_pipeline.yml │ ├── fix-linting.yml │ ├── linting.yml │ ├── linting_comment.yml │ └── release-announcements.yml ├── .gitignore ├── .gitpod.yml ├── .nf-core.yml ├── .pre-commit-config.yaml ├── .prettierignore ├── .prettierrc.yml ├── CHANGELOG.md ├── CITATIONS.md ├── CODE_OF_CONDUCT.md ├── LICENSE ├── README.md ├── adapter_fasta_test ├── assets ├── adaptivecard.json ├── dummy_file_arriba.txt ├── dummy_file_fusioncatcher.txt ├── dummy_file_pizzly.txt ├── dummy_file_squid.txt ├── dummy_file_starfusion.txt ├── email_template.html ├── email_template.txt ├── methods_description_template.yml ├── multiqc_config.yml ├── nf-core-rnafusion_logo_light.png ├── schema_input.json ├── sendmail_template.txt └── slackreport.json ├── bin ├── get_rrna_transcripts.py └── vcf_collect.py ├── conf ├── base.config ├── modules.config ├── test.config └── test_full.config ├── docs ├── README.md ├── images │ ├── BTB_logo.png │ ├── BTB_logo.svg │ ├── NGI_logo.png │ ├── NGI_logo.svg │ ├── SDU_logo.png │ ├── SciLifeLab_logo.png │ ├── SciLifeLab_logo.svg │ ├── mqc_fastqc_adapter.png │ ├── mqc_fastqc_counts.png │ ├── mqc_fastqc_quality.png │ ├── nf-core-rnafusion_logo_dark.png │ ├── nf-core-rnafusion_logo_light.png │ ├── nf-core-rnafusion_metro_map.png │ ├── nf-core-rnafusion_metro_map.svg │ ├── rnafusion_logo.png │ ├── rnafusion_logo.svg │ ├── summary_graph_1.png │ ├── summary_graph_2.png │ └── summary_graph_3.png ├── output.md └── usage.md ├── main.nf ├── modules.json ├── modules ├── local │ ├── arriba │ │ ├── download │ │ │ ├── main.nf │ │ │ └── meta.yml │ │ └── visualisation │ │ │ ├── main.nf │ │ │ └── meta.yml │ ├── convert2bed │ │ ├── main.nf │ │ └── meta.yml │ ├── ensembl │ │ └── main.nf │ ├── fusioncatcher │ │ ├── detect │ │ │ ├── main.nf │ │ │ └── meta.yml │ │ └── download │ │ │ ├── main.nf │ │ │ └── meta.yml │ ├── fusioninspector │ │ ├── main.nf │ │ └── meta.yml │ ├── fusionreport │ │ ├── detect │ │ │ ├── main.nf │ │ │ └── meta.yml │ │ └── download │ │ │ ├── main.nf │ │ │ └── meta.yml │ ├── hgnc │ │ └── main.nf │ ├── picard │ │ └── collectrnaseqmetrics │ │ │ ├── main.nf │ │ │ └── meta.yml │ ├── rrnatranscripts │ │ └── main.nf │ ├── starfusion │ │ ├── build │ │ │ ├── main.nf │ │ │ └── meta.yml │ │ ├── detect │ │ │ ├── main.nf │ │ │ └── meta.yml │ │ └── download │ │ │ ├── main.nf │ │ │ └── meta.yml │ ├── uscs │ │ └── custom_gtftogenepred │ │ │ ├── main.nf │ │ │ └── meta.yml │ └── vcf_collect │ │ ├── main.nf │ │ └── meta.yml └── nf-core │ ├── agat │ └── convertspgff2tsv │ │ ├── environment.yml │ │ ├── main.nf │ │ └── meta.yml │ ├── arriba │ ├── main.nf │ └── meta.yml │ ├── cat │ ├── cat │ │ ├── environment.yml │ │ ├── main.nf │ │ ├── meta.yml │ │ └── tests │ │ │ ├── main.nf.test │ │ │ ├── main.nf.test.snap │ │ │ ├── nextflow_unzipped_zipped.config │ │ │ ├── nextflow_zipped_unzipped.config │ │ │ └── tags.yml │ └── fastq │ │ ├── environment.yml │ │ ├── main.nf │ │ ├── meta.yml │ │ └── tests │ │ ├── main.nf.test │ │ ├── main.nf.test.snap │ │ └── tags.yml │ ├── fastp │ ├── environment.yml │ ├── main.nf │ ├── meta.yml │ └── tests │ │ ├── main.nf.test │ │ ├── main.nf.test.snap │ │ ├── nextflow.config │ │ └── tags.yml │ ├── fastqc │ ├── environment.yml │ ├── main.nf │ ├── meta.yml │ └── tests │ │ ├── main.nf.test │ │ ├── main.nf.test.snap │ │ └── tags.yml │ ├── gatk4 │ ├── bedtointervallist │ │ ├── environment.yml │ │ ├── main.nf │ │ └── meta.yml │ ├── createsequencedictionary │ │ ├── environment.yml │ │ ├── main.nf │ │ └── meta.yml │ └── markduplicates │ │ ├── environment.yml │ │ ├── main.nf │ │ └── meta.yml │ ├── multiqc │ ├── environment.yml │ ├── main.nf │ ├── meta.yml │ └── tests │ │ ├── main.nf.test │ │ ├── main.nf.test.snap │ │ └── tags.yml │ ├── picard │ ├── collectinsertsizemetrics │ │ ├── environment.yml │ │ ├── main.nf │ │ └── meta.yml │ └── collectwgsmetrics │ │ ├── environment.yml │ │ ├── main.nf │ │ └── meta.yml │ ├── samtools │ ├── faidx │ │ ├── environment.yml │ │ ├── main.nf │ │ └── meta.yml │ ├── index │ │ ├── environment.yml │ │ ├── main.nf │ │ ├── meta.yml │ │ └── tests │ │ │ ├── csi.nextflow.config │ │ │ ├── main.nf.test │ │ │ ├── main.nf.test.snap │ │ │ └── tags.yml │ ├── sort │ │ ├── environment.yml │ │ ├── main.nf │ │ ├── meta.yml │ │ └── tests │ │ │ ├── main.nf.test │ │ │ ├── main.nf.test.snap │ │ │ ├── nextflow.config │ │ │ └── tags.yml │ └── view │ │ ├── environment.yml │ │ ├── main.nf │ │ └── meta.yml │ ├── star │ ├── align │ │ ├── environment.yml │ │ ├── main.nf │ │ ├── meta.yml │ │ └── tests │ │ │ ├── main.nf.test │ │ │ ├── main.nf.test.snap │ │ │ ├── nextflow.arriba.config │ │ │ ├── nextflow.config │ │ │ ├── nextflow.starfusion.config │ │ │ └── tags.yml │ └── genomegenerate │ │ ├── environment.yml │ │ ├── main.nf │ │ ├── meta.yml │ │ └── tests │ │ ├── main.nf.test │ │ ├── main.nf.test.snap │ │ └── tags.yml │ └── stringtie │ ├── merge │ ├── environment.yml │ ├── main.nf │ ├── meta.yml │ └── tests │ │ ├── main.nf.test │ │ ├── main.nf.test.snap │ │ └── tags.yml │ └── stringtie │ ├── environment.yml │ ├── main.nf │ ├── meta.yml │ └── tests │ ├── main.nf.test │ ├── main.nf.test.snap │ └── tags.yml ├── nextflow.config ├── nextflow_schema.json ├── nf-test.config ├── pyproject.toml ├── subworkflows ├── local │ ├── arriba_workflow.nf │ ├── fusioncatcher_workflow.nf │ ├── fusioninspector_workflow.nf │ ├── fusionreport_workflow.nf │ ├── qc_workflow.nf │ ├── starfusion_workflow.nf │ ├── stringtie_workflow.nf │ ├── trim_workflow.nf │ └── utils_nfcore_rnafusion_pipeline │ │ └── main.nf └── nf-core │ ├── utils_nextflow_pipeline │ ├── main.nf │ ├── meta.yml │ └── tests │ │ ├── main.function.nf.test │ │ ├── main.function.nf.test.snap │ │ ├── main.workflow.nf.test │ │ ├── nextflow.config │ │ └── tags.yml │ ├── utils_nfcore_pipeline │ ├── main.nf │ ├── meta.yml │ └── tests │ │ ├── main.function.nf.test │ │ ├── main.function.nf.test.snap │ │ ├── main.workflow.nf.test │ │ ├── main.workflow.nf.test.snap │ │ ├── nextflow.config │ │ └── tags.yml │ └── utils_nfvalidation_plugin │ ├── main.nf │ ├── meta.yml │ └── tests │ ├── main.nf.test │ ├── nextflow_schema.json │ └── tags.yml ├── tests ├── main.nf.test └── nextflow.config ├── tower.yml └── workflows ├── build_references.nf └── rnafusion.nf /.devcontainer/devcontainer.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "nfcore", 3 | "image": "nfcore/gitpod:latest", 4 | "remoteUser": "gitpod", 5 | "runArgs": ["--privileged"], 6 | 7 | // Configure tool-specific properties. 8 | "customizations": { 9 | // Configure properties specific to VS Code. 10 | "vscode": { 11 | // Set *default* container specific settings.json values on container create. 12 | "settings": { 13 | "python.defaultInterpreterPath": "/opt/conda/bin/python" 14 | }, 15 | 16 | // Add the IDs of extensions you want installed when the container is created. 17 | "extensions": ["ms-python.python", "ms-python.vscode-pylance", "nf-core.nf-core-extensionpack"] 18 | } 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /.editorconfig: -------------------------------------------------------------------------------- 1 | root = true 2 | 3 | [*] 4 | charset = utf-8 5 | end_of_line = lf 6 | insert_final_newline = true 7 | trim_trailing_whitespace = true 8 | indent_size = 4 9 | indent_style = space 10 | 11 | [*.{md,yml,yaml,html,css,scss,js}] 12 | indent_size = 2 13 | 14 | # These files are edited and tested upstream in nf-core/modules 15 | [/modules/nf-core/**] 16 | charset = unset 17 | end_of_line = unset 18 | insert_final_newline = unset 19 | trim_trailing_whitespace = unset 20 | indent_style = unset 21 | [/subworkflows/nf-core/**] 22 | charset = unset 23 | end_of_line = unset 24 | insert_final_newline = unset 25 | trim_trailing_whitespace = unset 26 | indent_style = unset 27 | 28 | [/assets/email*] 29 | indent_size = unset 30 | 31 | # ignore Readme 32 | [README.md] 33 | indent_style = unset 34 | 35 | # ignore python 36 | [*.{py,md}] 37 | indent_style = unset 38 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | *.config linguist-language=nextflow 2 | *.nf.test linguist-language=nextflow 3 | modules/nf-core/** linguist-generated 4 | subworkflows/nf-core/** linguist-generated 5 | -------------------------------------------------------------------------------- /.github/.dockstore.yml: -------------------------------------------------------------------------------- 1 | # Dockstore config version, not pipeline version 2 | version: 1.2 3 | workflows: 4 | - subclass: nfl 5 | primaryDescriptorPath: /nextflow.config 6 | publish: True 7 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.yml: -------------------------------------------------------------------------------- 1 | name: Bug report 2 | description: Report something that is broken or incorrect 3 | labels: bug 4 | body: 5 | - type: markdown 6 | attributes: 7 | value: | 8 | Before you post this issue, please check the documentation: 9 | 10 | - [nf-core website: troubleshooting](https://nf-co.re/usage/troubleshooting) 11 | - [nf-core/rnafusion pipeline documentation](https://nf-co.re/rnafusion/usage) 12 | 13 | - type: textarea 14 | id: description 15 | attributes: 16 | label: Description of the bug 17 | description: A clear and concise description of what the bug is. 18 | validations: 19 | required: true 20 | 21 | - type: textarea 22 | id: command_used 23 | attributes: 24 | label: Command used and terminal output 25 | description: Steps to reproduce the behaviour. Please paste the command you used to launch the pipeline and the output from your terminal. 26 | render: console 27 | placeholder: | 28 | $ nextflow run ... 29 | 30 | Some output where something broke 31 | 32 | - type: textarea 33 | id: files 34 | attributes: 35 | label: Relevant files 36 | description: | 37 | Please drag and drop the relevant files here. Create a `.zip` archive if the extension is not allowed. 38 | Your verbose log file `.nextflow.log` is often useful _(this is a hidden file in the directory where you launched the pipeline)_ as well as custom Nextflow configuration files. 39 | 40 | - type: textarea 41 | id: system 42 | attributes: 43 | label: System information 44 | description: | 45 | * Nextflow version _(eg. 23.04.0)_ 46 | * Hardware _(eg. HPC, Desktop, Cloud)_ 47 | * Executor _(eg. slurm, local, awsbatch)_ 48 | * Container engine: _(e.g. Docker, Singularity, Conda, Podman, Shifter, Charliecloud, or Apptainer)_ 49 | * OS _(eg. CentOS Linux, macOS, Linux Mint)_ 50 | * Version of nf-core/rnafusion _(eg. 1.1, 1.5, 1.8.2)_ 51 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/config.yml: -------------------------------------------------------------------------------- 1 | contact_links: 2 | - name: Join nf-core 3 | url: https://nf-co.re/join 4 | about: Please join the nf-core community here 5 | - name: "Slack #rnafusion channel" 6 | url: https://nfcore.slack.com/channels/rnafusion 7 | about: Discussion about the nf-core/rnafusion pipeline 8 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.yml: -------------------------------------------------------------------------------- 1 | name: Feature request 2 | description: Suggest an idea for the nf-core/rnafusion pipeline 3 | labels: enhancement 4 | body: 5 | - type: textarea 6 | id: description 7 | attributes: 8 | label: Description of feature 9 | description: Please describe your suggestion for a new feature. It might help to describe a problem or use case, plus any alternatives that you have considered. 10 | validations: 11 | required: true 12 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | 13 | 14 | ## PR checklist 15 | 16 | - [ ] This comment contains a description of changes (with reason). 17 | - [ ] If you've fixed a bug or added code that should be tested, add tests! 18 | - [ ] If you've added a new tool - have you followed the pipeline conventions in the [contribution docs](https://github.com/nf-core/rnafusion/tree/master/.github/CONTRIBUTING.md) 19 | - [ ] If necessary, also make a PR on the nf-core/rnafusion _branch_ on the [nf-core/test-datasets](https://github.com/nf-core/test-datasets) repository. 20 | - [ ] Make sure your code lints (`nf-core lint`). 21 | 22 | - [ ] Check for unexpected warnings in debug mode (`nextflow run . -profile debug,test,docker --outdir `). 23 | - [ ] Usage Documentation in `docs/usage.md` is updated. 24 | - [ ] Output Documentation in `docs/output.md` is updated. 25 | - [ ] `CHANGELOG.md` is updated. 26 | - [ ] `README.md` is updated (including new tool citations and authors/contributors). 27 | -------------------------------------------------------------------------------- /.github/workflows/awsfulltest.yml: -------------------------------------------------------------------------------- 1 | name: nf-core AWS full size tests 2 | # This workflow is triggered on published releases. 3 | # It can be additionally triggered manually with GitHub actions workflow dispatch button. 4 | # It runs the -profile 'test_full' on AWS batch 5 | 6 | on: 7 | release: 8 | types: [published] 9 | workflow_dispatch: 10 | jobs: 11 | run-tower: 12 | name: Run AWS full tests 13 | if: github.repository == 'nf-core/rnafusion' 14 | runs-on: ubuntu-latest 15 | steps: 16 | - name: Launch build references workflow via tower 17 | uses: seqeralabs/action-tower-launch@v2 18 | with: 19 | workspace_id: ${{ secrets.TOWER_WORKSPACE_ID }} 20 | access_token: ${{ secrets.TOWER_ACCESS_TOKEN }} 21 | compute_env: ${{ secrets.TOWER_COMPUTE_ENV }} 22 | revision: ${{ github.sha }} 23 | workdir: s3://${{ secrets.AWS_S3_BUCKET }}/work/rnafusion/work-${{ github.sha }} 24 | parameters: | 25 | { 26 | "hook_url": "${{ secrets.MEGATESTS_ALERTS_SLACK_HOOK_URL }}", 27 | "outdir": "s3://${{ secrets.AWS_S3_BUCKET }}/rnafusion/results-${{ github.sha }}", 28 | "genomes_base": "s3://${{ secrets.AWS_S3_BUCKET }}/rnafusion/results-${{ github.sha }}/references", 29 | "cosmic_username": "${{ secrets.cosmic_username }}", 30 | "cosmic_passwd": "${{ secrets.cosmic_passwd }}", 31 | "all": true, 32 | "build_references": true 33 | } 34 | profiles: test_full,aws_tower 35 | - uses: actions/upload-artifact@v4 36 | with: 37 | name: Tower debug log file 38 | path: | 39 | tower_action_*.log 40 | tower_action_*.json 41 | 42 | - name: Launch run workflow via tower 43 | uses: seqeralabs/action-tower-launch@v2 44 | with: 45 | workspace_id: ${{ secrets.TOWER_WORKSPACE_ID }} 46 | access_token: ${{ secrets.TOWER_ACCESS_TOKEN }} 47 | compute_env: ${{ secrets.TOWER_COMPUTE_ENV }} 48 | workdir: s3://${{ secrets.AWS_S3_BUCKET }}/work/rnafusion/work-${{ github.sha }} 49 | parameters: | 50 | { 51 | "outdir": "s3://${{ secrets.AWS_S3_BUCKET }}/rnafusion/results-${{ github.sha }}", 52 | "genomes_base": "s3://${{ secrets.AWS_S3_BUCKET }}/rnafusion/results-${{ github.sha }}/references", 53 | "cosmic_username": "${{ secrets.cosmic_username }}", 54 | "cosmic_passwd": "${{ secrets.cosmic_passwd }}", 55 | "all": true, 56 | } 57 | profiles: test_full,aws_tower 58 | -------------------------------------------------------------------------------- /.github/workflows/awstest.yml: -------------------------------------------------------------------------------- 1 | name: nf-core AWS test 2 | # This workflow can be triggered manually with the GitHub actions workflow dispatch button. 3 | # It runs the -profile 'test' on AWS batch 4 | 5 | on: 6 | workflow_dispatch: 7 | jobs: 8 | run-tower: 9 | name: Run AWS tests 10 | if: github.repository == 'nf-core/rnafusion' 11 | runs-on: ubuntu-latest 12 | steps: 13 | # Launch workflow using Tower CLI tool action 14 | - name: Launch build references workflow via tower 15 | uses: seqeralabs/action-tower-launch@v2 16 | with: 17 | workspace_id: ${{ secrets.TOWER_WORKSPACE_ID }} 18 | access_token: ${{ secrets.TOWER_ACCESS_TOKEN }} 19 | compute_env: ${{ secrets.TOWER_COMPUTE_ENV }} 20 | revision: ${{ github.sha }} 21 | workdir: s3://${{ secrets.AWS_S3_BUCKET }}/work/rnafusion/work-${{ github.sha }} 22 | parameters: | 23 | { 24 | "outdir": "s3://${{ secrets.AWS_S3_BUCKET }}/rnafusion/results-${{ github.sha }}", 25 | "genomes_base": "s3://${{ secrets.AWS_S3_BUCKET }}/rnafusion/results-${{ github.sha }}/references", 26 | "cosmic_username": "${{ secrets.cosmic_username }}", 27 | "cosmic_passwd": "${{ secrets.cosmic_passwd }}", 28 | "all": true, 29 | "stub": true, 30 | "build_references": true 31 | } 32 | profiles: test,aws_tower 33 | - uses: actions/upload-artifact@v4 34 | with: 35 | name: Tower debug log file 36 | path: | 37 | tower_action_*.log 38 | tower_action_*.json 39 | 40 | - name: Launch workflow via tower 41 | uses: seqeralabs/action-tower-launch@v2 42 | with: 43 | workspace_id: ${{ secrets.TOWER_WORKSPACE_ID }} 44 | access_token: ${{ secrets.TOWER_ACCESS_TOKEN }} 45 | compute_env: ${{ secrets.TOWER_COMPUTE_ENV }} 46 | workdir: s3://${{ secrets.AWS_S3_BUCKET }}/work/rnafusion/work-${{ github.sha }} 47 | parameters: | 48 | { 49 | "outdir": "s3://${{ secrets.AWS_S3_BUCKET }}/rnafusion/results-${{ github.sha }}", 50 | "genomes_base": "s3://${{ secrets.AWS_S3_BUCKET }}/rnafusion/results-${{ github.sha }}/references", 51 | "cosmic_username": "${{ secrets.cosmic_username }}", 52 | "cosmic_passwd": "${{ secrets.cosmic_passwd }}", 53 | "all": true, 54 | "stub": true 55 | } 56 | profiles: test,aws_tower 57 | - uses: actions/upload-artifact@v4 58 | with: 59 | name: Tower debug log file 60 | path: | 61 | tower_action_*.log 62 | tower_action_*.json 63 | -------------------------------------------------------------------------------- /.github/workflows/branch.yml: -------------------------------------------------------------------------------- 1 | name: nf-core branch protection 2 | # This workflow is triggered on PRs to master branch on the repository 3 | # It fails when someone tries to make a PR against the nf-core `master` branch instead of `dev` 4 | on: 5 | pull_request_target: 6 | branches: [master] 7 | 8 | jobs: 9 | test: 10 | runs-on: ubuntu-latest 11 | steps: 12 | # PRs to the nf-core repo master branch are only ok if coming from the nf-core repo `dev` or any `patch` branches 13 | - name: Check PRs 14 | if: github.repository == 'nf-core/rnafusion' 15 | run: | 16 | { [[ ${{github.event.pull_request.head.repo.full_name }} == nf-core/rnafusion ]] && [[ $GITHUB_HEAD_REF == "dev" ]]; } || [[ $GITHUB_HEAD_REF == "patch" ]] 17 | 18 | # If the above check failed, post a comment on the PR explaining the failure 19 | # NOTE - this doesn't currently work if the PR is coming from a fork, due to limitations in GitHub actions secrets 20 | - name: Post PR comment 21 | if: failure() 22 | uses: mshick/add-pr-comment@b8f338c590a895d50bcbfa6c5859251edc8952fc # v2 23 | with: 24 | message: | 25 | ## This PR is against the `master` branch :x: 26 | 27 | * Do not close this PR 28 | * Click _Edit_ and change the `base` to `dev` 29 | * This CI test will remain failed until you push a new commit 30 | 31 | --- 32 | 33 | Hi @${{ github.event.pull_request.user.login }}, 34 | 35 | It looks like this pull-request is has been made against the [${{github.event.pull_request.head.repo.full_name }}](https://github.com/${{github.event.pull_request.head.repo.full_name }}) `master` branch. 36 | The `master` branch on nf-core repositories should always contain code from the latest release. 37 | Because of this, PRs to `master` are only allowed if they come from the [${{github.event.pull_request.head.repo.full_name }}](https://github.com/${{github.event.pull_request.head.repo.full_name }}) `dev` branch. 38 | 39 | You do not need to close this PR, you can change the target branch to `dev` by clicking the _"Edit"_ button at the top of this page. 40 | Note that even after this, the test will continue to show as failing until you push a new commit. 41 | 42 | Thanks again for your contribution! 43 | repo-token: ${{ secrets.GITHUB_TOKEN }} 44 | allow-repeats: false 45 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: nf-core CI 2 | # This workflow runs the pipeline with the minimal test dataset to check that it completes without any syntax errors 3 | on: 4 | push: 5 | branches: 6 | - dev 7 | pull_request: 8 | release: 9 | types: [published] 10 | 11 | env: 12 | NXF_ANSI_LOG: false 13 | 14 | concurrency: 15 | group: "${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}" 16 | cancel-in-progress: true 17 | 18 | jobs: 19 | test: 20 | name: Run pipeline with test data 21 | # Only run on push if this is the nf-core dev branch (merged PRs) 22 | if: "${{ github.event_name != 'push' || (github.event_name == 'push' && github.repository == 'nf-core/rnafusion') }}" 23 | runs-on: ubuntu-latest 24 | strategy: 25 | matrix: 26 | NXF_VER: 27 | - "23.04.0" 28 | - "latest-everything" 29 | trim_parameters: 30 | - "--fastp_trim false" 31 | - "--fastp_trim true" 32 | steps: 33 | - name: Check out pipeline code 34 | uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4 35 | 36 | - name: Install Nextflow 37 | uses: nf-core/setup-nextflow@v1 38 | with: 39 | version: "${{ matrix.NXF_VER }}" 40 | 41 | - name: Disk space cleanup 42 | uses: jlumbroso/free-disk-space@54081f138730dfa15788a46383842cd2f914a1be # v1.3.1 43 | 44 | - name: Dry test build 45 | run: | 46 | nextflow run ${GITHUB_WORKSPACE} -profile test,docker -stub --build_references \ 47 | --outdir /home/runner/work/rnafusion/rnafusion/results --all \ 48 | --genomes_base /home/runner/work/rnafusion/rnafusion/results/references \ 49 | --cosmic_username ${{ secrets.COSMIC_USERNAME }} --cosmic_passwd ${{ secrets.COSMIC_PASSWD }} 50 | 51 | - name: Dry test run 52 | run: | 53 | nextflow run ${GITHUB_WORKSPACE} -profile test,docker -stub \ 54 | --outdir /home/runner/work/rnafusion/rnafusion/results --all ${{ matrix.trim_parameters }} \ 55 | --genomes_base /home/runner/work/rnafusion/rnafusion/results/references 56 | -------------------------------------------------------------------------------- /.github/workflows/clean-up.yml: -------------------------------------------------------------------------------- 1 | name: "Close user-tagged issues and PRs" 2 | on: 3 | schedule: 4 | - cron: "0 0 * * 0" # Once a week 5 | 6 | jobs: 7 | clean-up: 8 | runs-on: ubuntu-latest 9 | permissions: 10 | issues: write 11 | pull-requests: write 12 | steps: 13 | - uses: actions/stale@28ca1036281a5e5922ead5184a1bbf96e5fc984e # v9 14 | with: 15 | stale-issue-message: "This issue has been tagged as awaiting-changes or awaiting-feedback by an nf-core contributor. Remove stale label or add a comment otherwise this issue will be closed in 20 days." 16 | stale-pr-message: "This PR has been tagged as awaiting-changes or awaiting-feedback by an nf-core contributor. Remove stale label or add a comment if it is still useful." 17 | close-issue-message: "This issue was closed because it has been tagged as awaiting-changes or awaiting-feedback by an nf-core contributor and then staled for 20 days with no activity." 18 | days-before-stale: 30 19 | days-before-close: 20 20 | days-before-pr-close: -1 21 | any-of-labels: "awaiting-changes,awaiting-feedback" 22 | exempt-issue-labels: "WIP" 23 | exempt-pr-labels: "WIP" 24 | repo-token: "${{ secrets.GITHUB_TOKEN }}" 25 | -------------------------------------------------------------------------------- /.github/workflows/download_pipeline.yml: -------------------------------------------------------------------------------- 1 | name: Test successful pipeline download with 'nf-core download' 2 | 3 | # Run the workflow when: 4 | # - dispatched manually 5 | # - when a PR is opened or reopened to master branch 6 | # - the head branch of the pull request is updated, i.e. if fixes for a release are pushed last minute to dev. 7 | on: 8 | workflow_dispatch: 9 | inputs: 10 | testbranch: 11 | description: "The specific branch you wish to utilize for the test execution of nf-core download." 12 | required: true 13 | default: "dev" 14 | pull_request: 15 | types: 16 | - opened 17 | branches: 18 | - master 19 | pull_request_target: 20 | branches: 21 | - master 22 | 23 | env: 24 | NXF_ANSI_LOG: false 25 | 26 | jobs: 27 | download: 28 | runs-on: ubuntu-latest 29 | steps: 30 | - name: Install Nextflow 31 | uses: nf-core/setup-nextflow@v1 32 | 33 | - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5 34 | with: 35 | python-version: "3.11" 36 | architecture: "x64" 37 | - uses: eWaterCycle/setup-singularity@931d4e31109e875b13309ae1d07c70ca8fbc8537 # v7 38 | with: 39 | singularity-version: 3.8.3 40 | 41 | - name: Install dependencies 42 | run: | 43 | python -m pip install --upgrade pip 44 | pip install git+https://github.com/nf-core/tools.git@dev 45 | 46 | - name: Get the repository name and current branch set as environment variable 47 | run: | 48 | echo "REPO_LOWERCASE=${GITHUB_REPOSITORY,,}" >> ${GITHUB_ENV} 49 | echo "REPOTITLE_LOWERCASE=$(basename ${GITHUB_REPOSITORY,,})" >> ${GITHUB_ENV} 50 | echo "REPO_BRANCH=${{ github.event.inputs.testbranch || 'dev' }}" >> ${GITHUB_ENV} 51 | 52 | - name: Download the pipeline 53 | env: 54 | NXF_SINGULARITY_CACHEDIR: ./ 55 | run: | 56 | nf-core download ${{ env.REPO_LOWERCASE }} \ 57 | --revision ${{ env.REPO_BRANCH }} \ 58 | --outdir ./${{ env.REPOTITLE_LOWERCASE }} \ 59 | --compress "none" \ 60 | --container-system 'singularity' \ 61 | --container-library "quay.io" -l "docker.io" -l "ghcr.io" \ 62 | --container-cache-utilisation 'amend' \ 63 | --download-configuration 64 | 65 | - name: Inspect download 66 | run: tree ./${{ env.REPOTITLE_LOWERCASE }} 67 | 68 | - name: Run the downloaded pipeline 69 | env: 70 | NXF_SINGULARITY_CACHEDIR: ./ 71 | NXF_SINGULARITY_HOME_MOUNT: true 72 | run: nextflow run ./${{ env.REPOTITLE_LOWERCASE }}/$( sed 's/\W/_/g' <<< ${{ env.REPO_BRANCH }}) -stub -profile test,singularity --outdir ./results 73 | -------------------------------------------------------------------------------- /.github/workflows/linting.yml: -------------------------------------------------------------------------------- 1 | name: nf-core linting 2 | # This workflow is triggered on pushes and PRs to the repository. 3 | # It runs the `nf-core lint` and markdown lint tests to ensure 4 | # that the code meets the nf-core guidelines. 5 | on: 6 | push: 7 | branches: 8 | - dev 9 | pull_request: 10 | release: 11 | types: [published] 12 | 13 | jobs: 14 | pre-commit: 15 | runs-on: ubuntu-latest 16 | steps: 17 | - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4 18 | 19 | - name: Set up Python 3.11 20 | uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5 21 | with: 22 | python-version: 3.11 23 | cache: "pip" 24 | 25 | - name: Install pre-commit 26 | run: pip install pre-commit 27 | 28 | - name: Run pre-commit 29 | run: pre-commit run --all-files 30 | 31 | nf-core: 32 | runs-on: ubuntu-latest 33 | steps: 34 | - name: Check out pipeline code 35 | uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4 36 | 37 | - name: Install Nextflow 38 | uses: nf-core/setup-nextflow@v1 39 | 40 | - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5 41 | with: 42 | python-version: "3.11" 43 | architecture: "x64" 44 | 45 | - name: Install dependencies 46 | run: | 47 | python -m pip install --upgrade pip 48 | pip install nf-core 49 | 50 | - name: Run nf-core lint 51 | env: 52 | GITHUB_COMMENTS_URL: ${{ github.event.pull_request.comments_url }} 53 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 54 | GITHUB_PR_COMMIT: ${{ github.event.pull_request.head.sha }} 55 | run: nf-core -l lint_log.txt lint --dir ${GITHUB_WORKSPACE} --markdown lint_results.md 56 | 57 | - name: Save PR number 58 | if: ${{ always() }} 59 | run: echo ${{ github.event.pull_request.number }} > PR_number.txt 60 | 61 | - name: Upload linting log file artifact 62 | if: ${{ always() }} 63 | uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4 64 | with: 65 | name: linting-logs 66 | path: | 67 | lint_log.txt 68 | lint_results.md 69 | PR_number.txt 70 | -------------------------------------------------------------------------------- /.github/workflows/linting_comment.yml: -------------------------------------------------------------------------------- 1 | name: nf-core linting comment 2 | # This workflow is triggered after the linting action is complete 3 | # It posts an automated comment to the PR, even if the PR is coming from a fork 4 | 5 | on: 6 | workflow_run: 7 | workflows: ["nf-core linting"] 8 | 9 | jobs: 10 | test: 11 | runs-on: ubuntu-latest 12 | steps: 13 | - name: Download lint results 14 | uses: dawidd6/action-download-artifact@f6b0bace624032e30a85a8fd9c1a7f8f611f5737 # v3 15 | with: 16 | workflow: linting.yml 17 | workflow_conclusion: completed 18 | 19 | - name: Get PR number 20 | id: pr_number 21 | run: echo "pr_number=$(cat linting-logs/PR_number.txt)" >> $GITHUB_OUTPUT 22 | 23 | - name: Post PR comment 24 | uses: marocchino/sticky-pull-request-comment@331f8f5b4215f0445d3c07b4967662a32a2d3e31 # v2 25 | with: 26 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 27 | number: ${{ steps.pr_number.outputs.pr_number }} 28 | path: linting-logs/lint_results.md 29 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .nextflow* 2 | work/ 3 | data/ 4 | results/ 5 | .DS_Store 6 | testing/ 7 | testing* 8 | *.pyc 9 | -------------------------------------------------------------------------------- /.gitpod.yml: -------------------------------------------------------------------------------- 1 | image: nfcore/gitpod:latest 2 | tasks: 3 | - name: Update Nextflow and setup pre-commit 4 | command: | 5 | pre-commit install --install-hooks 6 | nextflow self-update 7 | - name: unset JAVA_TOOL_OPTIONS 8 | command: | 9 | unset JAVA_TOOL_OPTIONS 10 | 11 | vscode: 12 | extensions: # based on nf-core.nf-core-extensionpack 13 | - esbenp.prettier-vscode # Markdown/CommonMark linting and style checking for Visual Studio Code 14 | - EditorConfig.EditorConfig # override user/workspace settings with settings found in .editorconfig files 15 | - Gruntfuggly.todo-tree # Display TODO and FIXME in a tree view in the activity bar 16 | - mechatroner.rainbow-csv # Highlight columns in csv files in different colors 17 | # - nextflow.nextflow # Nextflow syntax highlighting 18 | - oderwat.indent-rainbow # Highlight indentation level 19 | - streetsidesoftware.code-spell-checker # Spelling checker for source code 20 | - charliermarsh.ruff # Code linter Ruff 21 | -------------------------------------------------------------------------------- /.nf-core.yml: -------------------------------------------------------------------------------- 1 | repository_type: pipeline 2 | lint: 3 | files_unchanged: 4 | - .github/CONTRIBUTING.md 5 | - .github/PULL_REQUEST_TEMPLATE.md 6 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/pre-commit/mirrors-prettier 3 | rev: "v3.1.0" 4 | hooks: 5 | - id: prettier 6 | - repo: https://github.com/editorconfig-checker/editorconfig-checker.python 7 | rev: "2.7.3" 8 | hooks: 9 | - id: editorconfig-checker 10 | alias: ec 11 | -------------------------------------------------------------------------------- /.prettierignore: -------------------------------------------------------------------------------- 1 | email_template.html 2 | adaptivecard.json 3 | slackreport.json 4 | .nextflow* 5 | work/ 6 | data/ 7 | results/ 8 | .DS_Store 9 | testing/ 10 | testing* 11 | *.pyc 12 | bin/ 13 | -------------------------------------------------------------------------------- /.prettierrc.yml: -------------------------------------------------------------------------------- 1 | printWidth: 120 2 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) Martin Proks, Annick Renevey 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /adapter_fasta_test: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nf-core/rnafusion/f27e73205bff5a8ccbc367891768db6d6aaada37/adapter_fasta_test -------------------------------------------------------------------------------- /assets/dummy_file_arriba.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nf-core/rnafusion/f27e73205bff5a8ccbc367891768db6d6aaada37/assets/dummy_file_arriba.txt -------------------------------------------------------------------------------- /assets/dummy_file_fusioncatcher.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nf-core/rnafusion/f27e73205bff5a8ccbc367891768db6d6aaada37/assets/dummy_file_fusioncatcher.txt -------------------------------------------------------------------------------- /assets/dummy_file_pizzly.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nf-core/rnafusion/f27e73205bff5a8ccbc367891768db6d6aaada37/assets/dummy_file_pizzly.txt -------------------------------------------------------------------------------- /assets/dummy_file_squid.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nf-core/rnafusion/f27e73205bff5a8ccbc367891768db6d6aaada37/assets/dummy_file_squid.txt -------------------------------------------------------------------------------- /assets/dummy_file_starfusion.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nf-core/rnafusion/f27e73205bff5a8ccbc367891768db6d6aaada37/assets/dummy_file_starfusion.txt -------------------------------------------------------------------------------- /assets/email_template.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | nf-core/rnafusion Pipeline Report 9 | 10 | 11 |
12 | 13 | 14 | 15 |

nf-core/rnafusion ${version}

16 |

Run Name: $runName

17 | 18 | <% if (!success){ 19 | out << """ 20 |
21 |

nf-core/rnafusion execution completed unsuccessfully!

22 |

The exit status of the task that caused the workflow execution to fail was: $exitStatus.

23 |

The full error message was:

24 |
${errorReport}
25 |
26 | """ 27 | } else { 28 | out << """ 29 |
30 | nf-core/rnafusion execution completed successfully! 31 |
32 | """ 33 | } 34 | %> 35 | 36 |

The workflow was completed at $dateComplete (duration: $duration)

37 |

The command used to launch the workflow was as follows:

38 |
$commandLine
39 | 40 |

Pipeline Configuration:

41 | 42 | 43 | <% out << summary.collect{ k,v -> "" }.join("\n") %> 44 | 45 |
$k
$v
46 | 47 |

nf-core/rnafusion

48 |

https://github.com/nf-core/rnafusion

49 | 50 |
51 | 52 | 53 | 54 | -------------------------------------------------------------------------------- /assets/email_template.txt: -------------------------------------------------------------------------------- 1 | ---------------------------------------------------- 2 | ,--./,-. 3 | ___ __ __ __ ___ /,-._.--~\\ 4 | |\\ | |__ __ / ` / \\ |__) |__ } { 5 | | \\| | \\__, \\__/ | \\ |___ \\`-._,-`-, 6 | `._,._,' 7 | nf-core/rnafusion ${version} 8 | ---------------------------------------------------- 9 | Run Name: $runName 10 | 11 | <% if (success){ 12 | out << "## nf-core/rnafusion execution completed successfully! ##" 13 | } else { 14 | out << """#################################################### 15 | ## nf-core/rnafusion execution completed unsuccessfully! ## 16 | #################################################### 17 | The exit status of the task that caused the workflow execution to fail was: $exitStatus. 18 | The full error message was: 19 | 20 | ${errorReport} 21 | """ 22 | } %> 23 | 24 | 25 | The workflow was completed at $dateComplete (duration: $duration) 26 | 27 | The command used to launch the workflow was as follows: 28 | 29 | $commandLine 30 | 31 | 32 | 33 | Pipeline Configuration: 34 | ----------------------- 35 | <% out << summary.collect{ k,v -> " - $k: $v" }.join("\n") %> 36 | 37 | -- 38 | nf-core/rnafusion 39 | https://github.com/nf-core/rnafusion 40 | -------------------------------------------------------------------------------- /assets/multiqc_config.yml: -------------------------------------------------------------------------------- 1 | report_comment: > 2 | This report has been generated by the nf-core/rnafusion 3 | analysis pipeline. For information about how to interpret these results, please see the 4 | documentation. 5 | 6 | report_section_order: 7 | nf-core-rnafusion-methods-description: 8 | order: -1000 9 | software_versions: 10 | order: -1001 11 | nf-core-rnafusion-summary: 12 | order: -1002 13 | 14 | export_plots: true 15 | disable_version_detection: true 16 | 17 | # Run only these modules 18 | run_modules: 19 | - custom_content 20 | - fastqc 21 | - fastp 22 | - star 23 | - samtools 24 | - picard 25 | - arriba 26 | 27 | module_order: 28 | - fastp 29 | - fastqc: 30 | name: "FastQC (raw)" 31 | info: "This section of the report shows FastQC results before adapter trimming." 32 | path_filters: 33 | - "*.zip" 34 | - fastqc: 35 | name: "FastQC (trimmed)" 36 | info: "This section of the report shows FastQC results after adapter trimming." 37 | path_filters: 38 | - "*_trimmed*.zip" 39 | -------------------------------------------------------------------------------- /assets/nf-core-rnafusion_logo_light.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nf-core/rnafusion/f27e73205bff5a8ccbc367891768db6d6aaada37/assets/nf-core-rnafusion_logo_light.png -------------------------------------------------------------------------------- /assets/schema_input.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "http://json-schema.org/draft-07/schema", 3 | "$id": "https://raw.githubusercontent.com/nf-core/rnafusion/master/assets/schema_input.json", 4 | "title": "nf-core/rnafusion pipeline - params.input schema", 5 | "description": "Schema for the file provided with params.input", 6 | "type": "array", 7 | "items": { 8 | "type": "object", 9 | "properties": { 10 | "sample": { 11 | "type": "string", 12 | "pattern": "^\\S+$", 13 | "errorMessage": "Sample name must be provided and cannot contain spaces", 14 | "meta": ["id"] 15 | }, 16 | "fastq_1": { 17 | "type": "string", 18 | "format": "file-path", 19 | "exists": true, 20 | "pattern": "^\\S+\\.f(ast)?q\\.gz$", 21 | "errorMessage": "FastQ file for reads 1 must be provided, cannot contain spaces and must have extension '.fq.gz' or '.fastq.gz'" 22 | }, 23 | "fastq_2": { 24 | "type": "string", 25 | "format": "file-path", 26 | "exists": true, 27 | "pattern": "^\\S+\\.f(ast)?q\\.gz$", 28 | "errorMessage": "FastQ file for reads 2 cannot contain spaces and must have extension '.fq.gz' or '.fastq.gz'" 29 | }, 30 | "strandedness": { 31 | "type": "string", 32 | "format": "string", 33 | "exists": true, 34 | "pattern": "forward|reverse|unstranded|unknown", 35 | "errorMessage": "Strandedness has to be forward, reverse, unstranded or unknown" 36 | } 37 | }, 38 | "required": ["sample", "fastq_1", "fastq_2", "strandedness"] 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /assets/sendmail_template.txt: -------------------------------------------------------------------------------- 1 | To: $email 2 | Subject: $subject 3 | Mime-Version: 1.0 4 | Content-Type: multipart/related;boundary="nfcoremimeboundary" 5 | 6 | --nfcoremimeboundary 7 | Content-Type: text/html; charset=utf-8 8 | 9 | $email_html 10 | 11 | --nfcoremimeboundary 12 | Content-Type: image/png;name="nf-core-rnafusion_logo.png" 13 | Content-Transfer-Encoding: base64 14 | Content-ID: 15 | Content-Disposition: inline; filename="nf-core-rnafusion_logo_light.png" 16 | 17 | <% out << new File("$projectDir/assets/nf-core-rnafusion_logo_light.png"). 18 | bytes. 19 | encodeBase64(). 20 | toString(). 21 | tokenize( '\n' )*. 22 | toList()*. 23 | collate( 76 )*. 24 | collect { it.join() }. 25 | flatten(). 26 | join( '\n' ) %> 27 | 28 | <% 29 | if (mqcFile){ 30 | def mqcFileObj = new File("$mqcFile") 31 | if (mqcFileObj.length() < mqcMaxSize){ 32 | out << """ 33 | --nfcoremimeboundary 34 | Content-Type: text/html; name=\"multiqc_report\" 35 | Content-Transfer-Encoding: base64 36 | Content-ID: 37 | Content-Disposition: attachment; filename=\"${mqcFileObj.getName()}\" 38 | 39 | ${mqcFileObj. 40 | bytes. 41 | encodeBase64(). 42 | toString(). 43 | tokenize( '\n' )*. 44 | toList()*. 45 | collate( 76 )*. 46 | collect { it.join() }. 47 | flatten(). 48 | join( '\n' )} 49 | """ 50 | }} 51 | %> 52 | 53 | --nfcoremimeboundary-- 54 | -------------------------------------------------------------------------------- /assets/slackreport.json: -------------------------------------------------------------------------------- 1 | { 2 | "attachments": [ 3 | { 4 | "fallback": "Plain-text summary of the attachment.", 5 | "color": "<% if (success) { %>good<% } else { %>danger<%} %>", 6 | "author_name": "nf-core/rnafusion ${version} - ${runName}", 7 | "author_icon": "https://www.nextflow.io/docs/latest/_static/favicon.ico", 8 | "text": "<% if (success) { %>Pipeline completed successfully!<% } else { %>Pipeline completed with errors<% } %>", 9 | "fields": [ 10 | { 11 | "title": "Command used to launch the workflow", 12 | "value": "```${commandLine}```", 13 | "short": false 14 | } 15 | <% 16 | if (!success) { %> 17 | , 18 | { 19 | "title": "Full error message", 20 | "value": "```${errorReport}```", 21 | "short": false 22 | }, 23 | { 24 | "title": "Pipeline configuration", 25 | "value": "<% out << summary.collect{ k,v -> k == "hook_url" ? "_${k}_: (_hidden_)" : ( ( v.class.toString().contains('Path') || ( v.class.toString().contains('String') && v.contains('/') ) ) ? "_${k}_: `${v}`" : (v.class.toString().contains('DateTime') ? ("_${k}_: " + v.format(java.time.format.DateTimeFormatter.ofLocalizedDateTime(java.time.format.FormatStyle.MEDIUM))) : "_${k}_: ${v}") ) }.join(",\n") %>", 26 | "short": false 27 | } 28 | <% } 29 | %> 30 | ], 31 | "footer": "Completed at <% out << dateComplete.format(java.time.format.DateTimeFormatter.ofLocalizedDateTime(java.time.format.FormatStyle.MEDIUM)) %> (duration: ${duration})" 32 | } 33 | ] 34 | } 35 | -------------------------------------------------------------------------------- /bin/get_rrna_transcripts.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import argparse 4 | import logging 5 | import sys 6 | from pathlib import Path 7 | 8 | 9 | def get_rrna_intervals(file_in, file_out): 10 | """ 11 | Get lines containing ``#`` or ``gene_type rRNA`` or ```` or ``gene_type rRNA_pseudogene`` or ``gene_type MT_rRNA`` 12 | Create output file 13 | 14 | Args: 15 | file_in (pathlib.Path): The given GTF file. 16 | file_out (pathlib.Path): Where the ribosomal RNA GTF file should 17 | be created; always in GTF format. 18 | """ 19 | 20 | patterns = { 21 | "#", 22 | 'transcript_biotype "Mt_rRNA"', 23 | 'transcript_biotype "rRNA"', 24 | 'transcript_biotype "rRNA_pseudogene"', 25 | } 26 | line_starts = {"MT", "1", "2", "3", "4", "5", "6", "7", "8", "9"} 27 | out_lines = [] 28 | with file_in.open() as f: 29 | data = f.readlines() 30 | for line in data: 31 | for pattern in patterns: 32 | if pattern in line: 33 | for line_start in line_starts: 34 | if line.startswith(line_start): 35 | out_lines.append(line) 36 | 37 | with file_out.open(mode="w") as out_file: 38 | out_file.writelines(out_lines) 39 | 40 | 41 | def parse_args(argv=None): 42 | """Define and immediately parse command line arguments.""" 43 | parser = argparse.ArgumentParser( 44 | description="Extract ribosomal RNA intervals from a gtf file.", 45 | epilog="Example: python get_rrna_transcripts.py ", 46 | ) 47 | parser.add_argument( 48 | "file_in", 49 | metavar="FILE_IN", 50 | type=Path, 51 | help="Input in GTF format.", 52 | ) 53 | parser.add_argument( 54 | "file_out", 55 | metavar="FILE_OUT", 56 | type=Path, 57 | help="Transformed output intervals in GTF format.", 58 | ) 59 | parser.add_argument( 60 | "-l", 61 | "--log-level", 62 | help="The desired log level (default WARNING).", 63 | choices=("CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG"), 64 | default="WARNING", 65 | ) 66 | return parser.parse_args(argv) 67 | 68 | 69 | def main(argv=None): 70 | """Coordinate argument parsing and program execution.""" 71 | args = parse_args(argv) 72 | logging.basicConfig(level=args.log_level, format="[%(levelname)s] %(message)s") 73 | if not args.file_in.is_file(): 74 | logger.error(f"The given input file {args.file_in} was not found!") 75 | sys.exit(2) 76 | args.file_out.parent.mkdir(parents=True, exist_ok=True) 77 | get_rrna_intervals(args.file_in, args.file_out) 78 | 79 | 80 | if __name__ == "__main__": 81 | sys.exit(main()) 82 | -------------------------------------------------------------------------------- /conf/base.config: -------------------------------------------------------------------------------- 1 | /* 2 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 3 | nf-core/rnafusion Nextflow base config file 4 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 5 | A 'blank slate' config file, appropriate for general use on most high performance 6 | compute environments. Assumes that all software is installed and available on 7 | the PATH. Runs in `local` mode - all jobs will be run on the logged in environment. 8 | ---------------------------------------------------------------------------------------- 9 | */ 10 | 11 | process { 12 | 13 | cpus = { check_max( 1 * task.attempt, 'cpus' ) } 14 | memory = { check_max( 6.GB * task.attempt, 'memory' ) } 15 | time = { check_max( 4.h * task.attempt, 'time' ) } 16 | shell = ['/bin/bash', '-euo', 'pipefail'] 17 | 18 | errorStrategy = { task.exitStatus in ((130..145) + 104) ? 'retry' : 'finish' } 19 | maxRetries = 1 20 | maxErrors = '-1' 21 | 22 | withLabel:process_single { 23 | cpus = { check_max( 1 , 'cpus' ) } 24 | memory = { check_max( 6.GB * task.attempt, 'memory' ) } 25 | time = { check_max( 4.h * task.attempt, 'time' ) } 26 | } 27 | withLabel:process_low { 28 | cpus = { check_max( 2 * task.attempt, 'cpus' ) } 29 | memory = { check_max( 12.GB * task.attempt, 'memory' ) } 30 | time = { check_max( 4.h * task.attempt, 'time' ) } 31 | } 32 | withLabel:process_medium { 33 | cpus = { check_max( 6 * task.attempt, 'cpus' ) } 34 | memory = { check_max( 36.GB * task.attempt, 'memory' ) } 35 | time = { check_max( 8.h * task.attempt, 'time' ) } 36 | } 37 | withLabel:process_high { 38 | cpus = { check_max( 12 * task.attempt, 'cpus' ) } 39 | memory = { check_max( 72.GB * task.attempt, 'memory' ) } 40 | time = { check_max( 16.h * task.attempt, 'time' ) } 41 | } 42 | withLabel:process_long { 43 | time = { check_max( 20.h * task.attempt, 'time' ) } 44 | } 45 | withLabel:process_high_memory { 46 | memory = { check_max( 200.GB * task.attempt, 'memory' ) } 47 | } 48 | withLabel:error_ignore { 49 | errorStrategy = 'ignore' 50 | } 51 | withLabel:error_retry { 52 | errorStrategy = 'retry' 53 | maxRetries = 2 54 | } 55 | } 56 | -------------------------------------------------------------------------------- /conf/test.config: -------------------------------------------------------------------------------- 1 | /* 2 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 3 | Nextflow config file for running minimal tests 4 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 5 | Defines input files and everything required to run a fast and simple pipeline test. 6 | 7 | Use as follows: 8 | nextflow run nf-core/rnafusion -profile test, --outdir -stub 9 | 10 | ---------------------------------------------------------------------------------------- 11 | */ 12 | 13 | params { 14 | config_profile_name = 'Test profile' 15 | config_profile_description = 'Minimal test dataset to check pipeline function' 16 | 17 | // Limit resources so that this can run on GitHub Actions 18 | max_cpus = 2 19 | max_memory = 6.GB 20 | max_time = 6.h 21 | 22 | // Input data 23 | input = 'https://raw.githubusercontent.com/nf-core/test-datasets/rnafusion/testdata/human/samplesheet_valid.csv' 24 | } 25 | -------------------------------------------------------------------------------- /conf/test_full.config: -------------------------------------------------------------------------------- 1 | /* 2 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 3 | Nextflow config file for running full-size tests 4 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 5 | Defines input files and everything required to run a full size pipeline test. 6 | Use as follows: 7 | nextflow run nf-core/sarek -profile test_full, --outdir 8 | ---------------------------------------------------------------------------------------- 9 | */ 10 | 11 | params { 12 | config_profile_name = 'Full test profile' 13 | config_profile_description = 'Full test dataset to check pipeline function' 14 | 15 | // Input data for full size test 16 | input = 'https://raw.githubusercontent.com/nf-core/test-datasets/rnafusion/testdata/human/samplesheet_valid.csv' 17 | 18 | // Other params 19 | 20 | } 21 | -------------------------------------------------------------------------------- /docs/README.md: -------------------------------------------------------------------------------- 1 | # nf-core/rnafusion: Documentation 2 | 3 | The nf-core/rnafusion documentation is split into the following pages: 4 | 5 | - [Usage](usage.md) 6 | - An overview of how the pipeline works, how to run it and a description of all of the different command-line flags. 7 | - [Output](output.md) 8 | - An overview of the different results produced by the pipeline and how to interpret them. 9 | 10 | You can find a lot more documentation about installing, configuring and running nf-core pipelines on the website: [https://nf-co.re](https://nf-co.re) 11 | -------------------------------------------------------------------------------- /docs/images/BTB_logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nf-core/rnafusion/f27e73205bff5a8ccbc367891768db6d6aaada37/docs/images/BTB_logo.png -------------------------------------------------------------------------------- /docs/images/NGI_logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nf-core/rnafusion/f27e73205bff5a8ccbc367891768db6d6aaada37/docs/images/NGI_logo.png -------------------------------------------------------------------------------- /docs/images/SDU_logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nf-core/rnafusion/f27e73205bff5a8ccbc367891768db6d6aaada37/docs/images/SDU_logo.png -------------------------------------------------------------------------------- /docs/images/SciLifeLab_logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nf-core/rnafusion/f27e73205bff5a8ccbc367891768db6d6aaada37/docs/images/SciLifeLab_logo.png -------------------------------------------------------------------------------- /docs/images/mqc_fastqc_adapter.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nf-core/rnafusion/f27e73205bff5a8ccbc367891768db6d6aaada37/docs/images/mqc_fastqc_adapter.png -------------------------------------------------------------------------------- /docs/images/mqc_fastqc_counts.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nf-core/rnafusion/f27e73205bff5a8ccbc367891768db6d6aaada37/docs/images/mqc_fastqc_counts.png -------------------------------------------------------------------------------- /docs/images/mqc_fastqc_quality.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nf-core/rnafusion/f27e73205bff5a8ccbc367891768db6d6aaada37/docs/images/mqc_fastqc_quality.png -------------------------------------------------------------------------------- /docs/images/nf-core-rnafusion_logo_dark.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nf-core/rnafusion/f27e73205bff5a8ccbc367891768db6d6aaada37/docs/images/nf-core-rnafusion_logo_dark.png -------------------------------------------------------------------------------- /docs/images/nf-core-rnafusion_logo_light.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nf-core/rnafusion/f27e73205bff5a8ccbc367891768db6d6aaada37/docs/images/nf-core-rnafusion_logo_light.png -------------------------------------------------------------------------------- /docs/images/nf-core-rnafusion_metro_map.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nf-core/rnafusion/f27e73205bff5a8ccbc367891768db6d6aaada37/docs/images/nf-core-rnafusion_metro_map.png -------------------------------------------------------------------------------- /docs/images/rnafusion_logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nf-core/rnafusion/f27e73205bff5a8ccbc367891768db6d6aaada37/docs/images/rnafusion_logo.png -------------------------------------------------------------------------------- /docs/images/summary_graph_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nf-core/rnafusion/f27e73205bff5a8ccbc367891768db6d6aaada37/docs/images/summary_graph_1.png -------------------------------------------------------------------------------- /docs/images/summary_graph_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nf-core/rnafusion/f27e73205bff5a8ccbc367891768db6d6aaada37/docs/images/summary_graph_2.png -------------------------------------------------------------------------------- /docs/images/summary_graph_3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nf-core/rnafusion/f27e73205bff5a8ccbc367891768db6d6aaada37/docs/images/summary_graph_3.png -------------------------------------------------------------------------------- /modules/local/arriba/download/main.nf: -------------------------------------------------------------------------------- 1 | process ARRIBA_DOWNLOAD { 2 | tag "arriba" 3 | label 'process_low' 4 | 5 | conda "bioconda::gnu-wget=1.18" 6 | container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? 7 | 'https://depot.galaxyproject.org/singularity/gnu-wget:1.18--h5bf99c6_5' : 8 | 'quay.io/biocontainers/gnu-wget:1.18--h5bf99c6_5' }" 9 | 10 | output: 11 | path "versions.yml" , emit: versions 12 | path "*" , emit: reference 13 | 14 | script: 15 | """ 16 | wget https://github.com/suhrig/arriba/releases/download/v2.4.0/arriba_v2.4.0.tar.gz -O arriba_v2.4.0.tar.gz 17 | tar -xzvf arriba_v2.4.0.tar.gz 18 | rm arriba_v2.4.0.tar.gz 19 | mv arriba_v2.4.0/database/* . 20 | rm -r arriba_v2.4.0 21 | 22 | cat <<-END_VERSIONS > versions.yml 23 | "${task.process}": 24 | wget: \$(echo wget -V 2>&1 | grep "GNU Wget" | cut -d" " -f3 > versions.yml) 25 | END_VERSIONS 26 | """ 27 | 28 | stub: 29 | """ 30 | touch blacklist_hg38_GRCh38_v2.4.0.tsv.gz 31 | touch protein_domains_hg38_GRCh38_v2.4.0.gff3 32 | touch cytobands_hg38_GRCh38_v2.4.0.tsv 33 | touch known_fusions_hg38_GRCh38_v2.4.0.tsv.gz 34 | touch protein_domains_hg38_GRCh38_v2.4.0.gff3 35 | 36 | cat <<-END_VERSIONS > versions.yml 37 | "${task.process}": 38 | wget: \$(echo wget -V 2>&1 | grep "GNU Wget" | cut -d" " -f3 > versions.yml) 39 | END_VERSIONS 40 | """ 41 | } 42 | -------------------------------------------------------------------------------- /modules/local/arriba/download/meta.yml: -------------------------------------------------------------------------------- 1 | name: arriba_download 2 | description: Arriba is a command-line tool for the detection of gene fusions from RNA-Seq data. 3 | keywords: 4 | - fusion 5 | - arriba 6 | tools: 7 | - arriba: 8 | description: Fast and accurate gene fusion detection from RNA-Seq data 9 | homepage: https://github.com/suhrig/arriba 10 | documentation: https://arriba.readthedocs.io/en/latest/ 11 | tool_dev_url: https://github.com/suhrig/arriba 12 | doi: "10.1101/gr.257246.119" 13 | licence: ["MIT"] 14 | 15 | output: 16 | - versions: 17 | type: file 18 | description: File containing software versions 19 | pattern: "versions.yml" 20 | - reference: 21 | type: directory 22 | description: Folder with arriba references 23 | pattern: "*" 24 | 25 | authors: 26 | - "@praveenraj2018, @rannick" 27 | -------------------------------------------------------------------------------- /modules/local/arriba/visualisation/main.nf: -------------------------------------------------------------------------------- 1 | process ARRIBA_VISUALISATION { 2 | tag "$meta.id" 3 | label 'process_medium' 4 | 5 | conda "bioconda::arriba=2.4.0" 6 | container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? 7 | 'https://depot.galaxyproject.org/singularity/arriba:2.4.0--h0033a41_2' : 8 | 'biocontainers/arriba:2.4.0--h0033a41_2' }" 9 | 10 | input: 11 | tuple val(meta), path(bam), path(bai), path(fusions) 12 | tuple val(meta2), path(gtf) 13 | tuple val(meta3), path(protein_domains) 14 | tuple val(meta4), path(cytobands) 15 | 16 | output: 17 | tuple val(meta), path("*.pdf") , emit: pdf 18 | path "versions.yml" , emit: versions 19 | 20 | when: 21 | task.ext.when == null || task.ext.when 22 | 23 | script: 24 | def args = task.ext.args ?: '' 25 | def cytobands = cytobands ? " --cytobands=$cytobands" : "" 26 | def prefix = task.ext.prefix ?: "${meta.id}" 27 | def protein_domains = protein_domains ? "--proteinDomains=$protein_domains" : "" 28 | """ 29 | draw_fusions.R \\ 30 | --fusions=$fusions \\ 31 | --alignments=$bam \\ 32 | --output=${prefix}.pdf \\ 33 | --annotation=${gtf} \\ 34 | $cytobands \\ 35 | $protein_domains \\ 36 | $args 37 | 38 | cat <<-END_VERSIONS > versions.yml 39 | "${task.process}": 40 | arriba: \$(arriba -h | grep 'Version:' 2>&1 | sed 's/Version:\s//') 41 | END_VERSIONS 42 | """ 43 | 44 | stub: 45 | def prefix = task.ext.prefix ?: "${meta.id}" 46 | """ 47 | touch ${prefix}.pdf 48 | cat <<-END_VERSIONS > versions.yml 49 | "${task.process}": 50 | arriba: \$(arriba -h | grep 'Version:' 2>&1 | sed 's/Version:\s//') 51 | END_VERSIONS 52 | """ 53 | } 54 | -------------------------------------------------------------------------------- /modules/local/arriba/visualisation/meta.yml: -------------------------------------------------------------------------------- 1 | name: arriba_visualisation 2 | description: Arriba is a command-line tool for the detection of gene fusions from RNA-Seq data. 3 | keywords: 4 | - visualisation 5 | - arriba 6 | tools: 7 | - arriba: 8 | description: Fast and accurate gene fusion detection from RNA-Seq data 9 | homepage: https://github.com/suhrig/arriba 10 | documentation: https://arriba.readthedocs.io/en/latest/ 11 | tool_dev_url: https://github.com/suhrig/arriba 12 | doi: "10.1101/gr.257246.119" 13 | licence: ["MIT"] 14 | 15 | input: 16 | - meta: 17 | type: map 18 | description: | 19 | Groovy Map containing sample information 20 | e.g. [ id:'test', single_end:false ] 21 | - bam: 22 | type: file 23 | description: BAM/CRAM/SAM file 24 | pattern: "*.{bam,cram,sam}" 25 | - bai: 26 | type: file 27 | description: BAMindex file 28 | pattern: "*.{bai}" 29 | - fusions: 30 | type: file 31 | description: Arriba fusions file 32 | pattern: "*.{tsv}" 33 | - gtf: 34 | type: file 35 | description: Annotation GTF file 36 | pattern: "*.{gtf}" 37 | 38 | output: 39 | - meta: 40 | type: map 41 | description: | 42 | Groovy Map containing sample information 43 | e.g. [ id:'test', single_end:false ] 44 | - versions: 45 | type: file 46 | description: File containing software versions 47 | pattern: "versions.yml" 48 | - pdf: 49 | type: file 50 | description: File contains fusions visualisation 51 | pattern: "*.{pdf}" 52 | 53 | authors: 54 | - "@rannick" 55 | -------------------------------------------------------------------------------- /modules/local/convert2bed/main.nf: -------------------------------------------------------------------------------- 1 | process CONVERT2BED { 2 | tag "$meta.id" 3 | label 'process_single' 4 | 5 | conda "bioconda::bedops=2.4.41" 6 | container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? 7 | 'https://depot.galaxyproject.org/singularity/bedops:2.4.41--h9f5acd7_0' : 8 | 'quay.io/biocontainers/bedops:2.4.41--h9f5acd7_0' }" 9 | 10 | input: 11 | tuple val(meta), path(gtf) 12 | 13 | output: 14 | tuple val(meta), path("*.bed") , emit: bed 15 | 16 | script: 17 | def prefix = task.ext.prefix ?: "${meta.id}" 18 | """ 19 | convert2bed -i gtf < $gtf > ${prefix}.bed 20 | 21 | cat <<-END_VERSIONS > versions.yml 22 | "${task.process}": 23 | convert2bed: \$(convert2bed --version | grep vers | sed 's/^.*.version: //') 24 | END_VERSIONS 25 | """ 26 | 27 | stub: 28 | def prefix = task.ext.prefix ?: "${meta.id}" 29 | """ 30 | touch ${prefix}.bed 31 | cat <<-END_VERSIONS > versions.yml 32 | "${task.process}": 33 | convert2bed: \$(convert2bed --version | grep vers | sed 's/^.*.version: //') 34 | END_VERSIONS 35 | """ 36 | } 37 | -------------------------------------------------------------------------------- /modules/local/convert2bed/meta.yml: -------------------------------------------------------------------------------- 1 | name: 2 | description: convert from GTF to BED format 3 | - convert2bed 4 | tools: 5 | - convert2bed: 6 | description: convert from GTF to BED format 7 | homepage: https://github.com/bedops/bedops 8 | documentation: https://bedops.readthedocs.io/en/latest/index.html 9 | tool_dev_url: https://github.com/bedops/bedops 10 | doi: "" 11 | licence: ["GNU GENERAL PUBLIC LICENSE"] 12 | 13 | input: 14 | - gtf: 15 | type: file 16 | description: Path to GTF file 17 | pattern: "*.{gtf*}" 18 | 19 | output: 20 | - versions: 21 | type: file 22 | description: File containing software versions 23 | pattern: "versions.yml" 24 | - bed: 25 | type: file 26 | description: bed file 27 | pattern: "*.bed" 28 | 29 | authors: 30 | - "@rannick" 31 | -------------------------------------------------------------------------------- /modules/local/ensembl/main.nf: -------------------------------------------------------------------------------- 1 | process ENSEMBL_DOWNLOAD { 2 | tag "ensembl" 3 | label 'process_low' 4 | 5 | conda "bioconda::gnu-wget=1.18" 6 | container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? 7 | 'https://depot.galaxyproject.org/singularity/gnu-wget:1.18--h5bf99c6_5' : 8 | 'quay.io/biocontainers/gnu-wget:1.18--h5bf99c6_5' }" 9 | 10 | input: 11 | val ensembl_version 12 | val genome 13 | val meta 14 | 15 | output: 16 | tuple val(meta), path("Homo_sapiens.${genome}.${ensembl_version}.all.fa") , emit: fasta 17 | tuple val(meta), path("Homo_sapiens.${genome}.${ensembl_version}.gtf") , emit: gtf 18 | tuple val(meta), path("Homo_sapiens.${genome}.${ensembl_version}.chr.gtf") , emit: chrgtf 19 | tuple val(meta), path("Homo_sapiens.${genome}.${ensembl_version}.cdna.all.fa.gz"), emit: transcript 20 | path "versions.yml" , emit: versions 21 | 22 | 23 | script: 24 | """ 25 | wget ftp://ftp.ensembl.org/pub/release-${ensembl_version}/fasta/homo_sapiens/dna/Homo_sapiens.${params.genome}.dna.chromosome.{1..22}.fa.gz 26 | wget ftp://ftp.ensembl.org/pub/release-${ensembl_version}/fasta/homo_sapiens/dna/Homo_sapiens.${params.genome}.dna.chromosome.{MT,X,Y}.fa.gz 27 | 28 | wget ftp://ftp.ensembl.org/pub/release-${ensembl_version}/gtf/homo_sapiens/Homo_sapiens.${params.genome}.${ensembl_version}.gtf.gz 29 | wget ftp://ftp.ensembl.org/pub/release-${ensembl_version}/gtf/homo_sapiens/Homo_sapiens.${params.genome}.${ensembl_version}.chr.gtf.gz 30 | wget ftp://ftp.ensembl.org/pub/release-${ensembl_version}/fasta/homo_sapiens/cdna/Homo_sapiens.${params.genome}.cdna.all.fa.gz -O Homo_sapiens.${params.genome}.${ensembl_version}.cdna.all.fa.gz 31 | 32 | gunzip -c Homo_sapiens.${params.genome}.dna.chromosome.* > Homo_sapiens.${params.genome}.${ensembl_version}.all.fa 33 | gunzip Homo_sapiens.${params.genome}.${ensembl_version}.gtf.gz 34 | gunzip Homo_sapiens.${params.genome}.${ensembl_version}.chr.gtf.gz 35 | 36 | 37 | 38 | cat <<-END_VERSIONS > versions.yml 39 | "${task.process}": 40 | wget: \$(echo wget -V 2>&1 | grep "GNU Wget" | cut -d" " -f3 > versions.yml) 41 | END_VERSIONS 42 | """ 43 | 44 | stub: 45 | """ 46 | touch "Homo_sapiens.${genome}.${ensembl_version}.all.fa" 47 | touch "Homo_sapiens.${genome}.${ensembl_version}.gtf" 48 | touch "Homo_sapiens.${genome}.${ensembl_version}.chr.gtf" 49 | touch "Homo_sapiens.${genome}.${ensembl_version}.cdna.all.fa.gz" 50 | 51 | cat <<-END_VERSIONS > versions.yml 52 | "${task.process}": 53 | wget: \$(echo wget -V 2>&1 | grep "GNU Wget" | cut -d" " -f3 > versions.yml) 54 | END_VERSIONS 55 | """ 56 | 57 | } 58 | -------------------------------------------------------------------------------- /modules/local/fusioncatcher/detect/main.nf: -------------------------------------------------------------------------------- 1 | process FUSIONCATCHER { 2 | tag "$meta.id" 3 | label 'process_high' 4 | 5 | conda "bioconda::fusioncatcher=1.33" 6 | container "docker.io/clinicalgenomics/fusioncatcher:1.33" 7 | 8 | input: 9 | tuple val(meta), path(fasta) 10 | path reference 11 | 12 | output: 13 | tuple val(meta), path("*.fusioncatcher.fusion-genes.txt") , optional:true , emit: fusions 14 | tuple val(meta), path("*.fusioncatcher.summary.txt") , optional:true , emit: summary 15 | tuple val(meta), path("*.fusioncatcher.log") , emit: log 16 | path "versions.yml" , emit: versions 17 | 18 | when: 19 | task.ext.when == null || task.ext.when 20 | 21 | script: 22 | def args = task.ext.args ?: '' 23 | def prefix = task.ext.prefix ?: "${meta.id}" 24 | def reads = fasta.toString().replace(" ", ",") 25 | def single_end = meta.single_end ? "--single-end" : "" 26 | """ 27 | fusioncatcher.py \\ 28 | -d $reference \\ 29 | -i $reads \\ 30 | -p $task.cpus \\ 31 | -o . \\ 32 | --skip-blat \\ 33 | $single_end \\ 34 | $args 35 | 36 | mv final-list_candidate-fusion-genes.txt ${prefix}.fusioncatcher.fusion-genes.txt 37 | mv summary_candidate_fusions.txt ${prefix}.fusioncatcher.summary.txt 38 | mv fusioncatcher.log ${prefix}.fusioncatcher.log 39 | 40 | cat <<-END_VERSIONS > versions.yml 41 | "${task.process}": 42 | fusioncatcher: \$(echo \$(fusioncatcher.py --version 2>&1)| sed 's/fusioncatcher.py //') 43 | END_VERSIONS 44 | """ 45 | 46 | stub: 47 | def prefix = task.ext.prefix ?: "${meta.id}" 48 | 49 | """ 50 | touch ${prefix}.fusioncatcher.fusion-genes.txt 51 | touch ${prefix}.fusioncatcher.summary.txt 52 | touch ${prefix}.fusioncatcher.log 53 | cat <<-END_VERSIONS > versions.yml 54 | "${task.process}": 55 | fusioncatcher: \$(echo \$(fusioncatcher.py --version 2>&1)| sed 's/fusioncatcher.py //') 56 | END_VERSIONS 57 | """ 58 | } 59 | -------------------------------------------------------------------------------- /modules/local/fusioncatcher/detect/meta.yml: -------------------------------------------------------------------------------- 1 | name: fusioncatcher 2 | description: FusionCatcher searches for novel/known somatic fusion genes, translocations, and chimeras in RNA-seq data 3 | keywords: 4 | - fusioncatcher 5 | tools: 6 | - fusioncatcher: 7 | description: FusionCatcher searches for novel/known somatic fusion genes, translocations, and chimeras in RNA-seq data 8 | homepage: https://github.com/ndaniel/fusioncatcher 9 | documentation: https://github.com/ndaniel/fusioncatcher/wiki 10 | tool_dev_url: https://github.com/ndaniel/fusioncatcher 11 | doi: "10.1101/011650v1" 12 | licence: ["GPL v3"] 13 | 14 | input: 15 | - meta: 16 | type: map 17 | description: | 18 | Groovy Map containing sample information 19 | e.g. [ id:'test', single_end:false ] 20 | - reads: 21 | type: file 22 | description: FASTQ file 23 | pattern: "*.{fastq}" 24 | - reference: 25 | type: directory 26 | description: Path to fusioncatcher references 27 | pattern: "*" 28 | 29 | output: 30 | - meta: 31 | type: map 32 | description: | 33 | Groovy Map containing sample information 34 | e.g. [ id:'test', single_end:false ] 35 | - fusions: 36 | type: file 37 | description: Final list of candidate fusion genes 38 | pattern: "*.fusioncatcher.fusion-genes.txt" 39 | - summary: 40 | type: file 41 | description: Summary of fusion results 42 | pattern: "*.fusioncatcher_summary.txt" 43 | - log: 44 | type: file 45 | description: Log of fusion results 46 | pattern: "*.fusioncatcher.log" 47 | - versions: 48 | type: file 49 | description: File containing software versions 50 | pattern: "versions.yml" 51 | 52 | authors: 53 | - "@praveenraj2018. @rannick" 54 | -------------------------------------------------------------------------------- /modules/local/fusioncatcher/download/main.nf: -------------------------------------------------------------------------------- 1 | process FUSIONCATCHER_DOWNLOAD { 2 | tag "fusioncatcher_download" 3 | label 'process_medium' 4 | 5 | conda "bioconda::fusioncatcher=1.33" 6 | container "docker.io/clinicalgenomics/fusioncatcher:1.33" 7 | 8 | output: 9 | path "*" , emit: reference 10 | path "versions.yml" , emit: versions 11 | 12 | when: 13 | task.ext.when == null || task.ext.when 14 | 15 | script: 16 | 17 | def args = task.ext.args ?: '' 18 | def args2 = task.ext.args2 ?: '' 19 | def human_version = "v102" 20 | def url = "http://sourceforge.net/projects/fusioncatcher/files/data/human_${human_version}.tar.gz.aa" 21 | """ 22 | if wget --spider "$url" 2>/dev/null; then 23 | wget $args $url 24 | wget $args http://sourceforge.net/projects/fusioncatcher/files/data/human_${human_version}.tar.gz.ab 25 | wget $args http://sourceforge.net/projects/fusioncatcher/files/data/human_${human_version}.tar.gz.ac 26 | wget $args http://sourceforge.net/projects/fusioncatcher/files/data/human_${human_version}.tar.gz.ad 27 | cat human_${human_version}.tar.gz.* | tar xz 28 | rm human_${human_version}.tar* 29 | else 30 | fusioncatcher-build \\ 31 | -g homo_sapiens \\ 32 | -o human_${human_version} \\ 33 | $args2 34 | fi 35 | 36 | cat <<-END_VERSIONS > versions.yml 37 | "${task.process}": 38 | fusioncatcher: \$(echo \$(fusioncatcher --version 2>&1)) 39 | END_VERSIONS 40 | """ 41 | 42 | stub: 43 | def human_version = "v102" 44 | """ 45 | mkdir human_${human_version} 46 | cat <<-END_VERSIONS > versions.yml 47 | "${task.process}": 48 | fusioncatcher: \$(echo \$(fusioncatcher --version 2>&1)) 49 | END_VERSIONS 50 | """ 51 | } 52 | -------------------------------------------------------------------------------- /modules/local/fusioncatcher/download/meta.yml: -------------------------------------------------------------------------------- 1 | name: fusioncatcher_download 2 | description: Build genome for fusioncatcher 3 | keywords: 4 | - sort 5 | tools: 6 | - fusioncatcher: 7 | description: Build genome for fusioncatcher 8 | homepage: https://github.com/ndaniel/fusioncatcher/ 9 | documentation: https://github.com/ndaniel/fusioncatcher/blob/master/doc/manual.md 10 | tool_dev_url: https://github.com/ndaniel/fusioncatcher/ 11 | doi: "10.1101/011650" 12 | licence: ["GPL v3"] 13 | 14 | output: 15 | - versions: 16 | type: file 17 | description: File containing software versions 18 | pattern: "versions.yml" 19 | - reference: 20 | type: directory 21 | description: Path to fusioncatcher references 22 | pattern: "*" 23 | 24 | authors: 25 | - "@praveenraj2018, @rannick" 26 | -------------------------------------------------------------------------------- /modules/local/fusioninspector/main.nf: -------------------------------------------------------------------------------- 1 | process FUSIONINSPECTOR { 2 | tag "$meta.id" 3 | label 'process_high' 4 | 5 | conda "bioconda::dfam=3.3 bioconda::hmmer=3.3.2 bioconda::star-fusion=1.12.0 bioconda::samtools=1.9 bioconda::star=2.7.8a" 6 | container 'docker.io/trinityctat/starfusion:1.12.0' 7 | 8 | input: 9 | tuple val(meta), path(reads), path(fusion_list) 10 | path reference 11 | 12 | output: 13 | tuple val(meta), path("*FusionInspector.fusions.tsv") , emit: tsv 14 | tuple val(meta), path("*.coding_effect") , optional:true, emit: tsv_coding_effect 15 | tuple val(meta), path("*.gtf") , optional:true, emit: out_gtf 16 | path "*" , emit: output 17 | path "versions.yml" , emit: versions 18 | 19 | when: 20 | task.ext.when == null || task.ext.when 21 | 22 | script: 23 | def prefix = task.ext.prefix ?: "${meta.id}" 24 | def fasta = meta.single_end ? "--left_fq ${reads[0]}" : "--left_fq ${reads[0]} --right_fq ${reads[1]}" 25 | def args = task.ext.args ?: '' 26 | def args2 = task.ext.args2 ?: '' 27 | """ 28 | FusionInspector \\ 29 | --fusions $fusion_list \\ 30 | --genome_lib ${reference} \\ 31 | $fasta \\ 32 | --CPU ${task.cpus} \\ 33 | -O . \\ 34 | --out_prefix $prefix \\ 35 | --vis $args $args2 36 | 37 | cat <<-END_VERSIONS > versions.yml 38 | "${task.process}": 39 | STAR-Fusion: \$(STAR-Fusion --version 2>&1 | grep -i 'version' | sed 's/STAR-Fusion version: //') 40 | END_VERSIONS 41 | """ 42 | 43 | stub: 44 | def prefix = task.ext.prefix ?: "${meta.id}" 45 | """ 46 | touch ${prefix}.FusionInspector.log 47 | touch ${prefix}.FusionInspector.fusions.tsv 48 | touch ${prefix}.FusionInspector.fusions.tsv.annotated.coding_effect 49 | touch ${prefix}.gtf 50 | 51 | cat <<-END_VERSIONS > versions.yml 52 | "${task.process}": 53 | STAR-Fusion: \$(STAR-Fusion --version 2>&1 | grep -i 'version' | sed 's/STAR-Fusion version: //') 54 | END_VERSIONS 55 | """ 56 | } 57 | -------------------------------------------------------------------------------- /modules/local/fusioninspector/meta.yml: -------------------------------------------------------------------------------- 1 | name: fusioninspector 2 | description: Validation of Fusion Transcript Predictions 3 | keywords: 4 | - fusioninspector 5 | tools: 6 | - fusioninspector: 7 | description: Validation of Fusion Transcript Predictions 8 | homepage: https://github.com/FusionInspector/FusionInspector 9 | documentation: https://github.com/FusionInspector/FusionInspector/wiki 10 | tool_dev_url: https://github.com/FusionInspector/FusionInspector 11 | doi: 10.1101/2021.08.02.454639" 12 | licence: https://github.com/FusionInspector/FusionInspector/blob/master/LICENSE.txt 13 | 14 | input: 15 | - meta: 16 | type: map 17 | description: | 18 | Groovy Map containing sample information 19 | e.g. [ id:'test', single_end:false ] 20 | - reads: 21 | type: file 22 | description: FASTQ file 23 | pattern: "*.{fastq*}" 24 | - reference: 25 | type: directory 26 | description: Path to ctat references 27 | pattern: "*" 28 | 29 | output: 30 | - versions: 31 | type: file 32 | description: File containing software versions 33 | pattern: "versions.yml" 34 | - reference: 35 | type: directory 36 | description: Genome resource path 37 | pattern: "*" 38 | 39 | authors: 40 | - "@rannick" 41 | -------------------------------------------------------------------------------- /modules/local/fusionreport/detect/main.nf: -------------------------------------------------------------------------------- 1 | process FUSIONREPORT { 2 | tag "$meta.id" 3 | label 'process_medium' 4 | 5 | conda "bioconda::star=2.7.9a" 6 | container "docker.io/clinicalgenomics/fusion-report:2.1.8" 7 | 8 | 9 | input: 10 | tuple val(meta), path(reads), path(arriba_fusions), path(starfusion_fusions), path(fusioncatcher_fusions) 11 | tuple val(meta2), path(fusionreport_ref) 12 | val(tools_cutoff) 13 | 14 | output: 15 | path "versions.yml" , emit: versions 16 | tuple val(meta), path("*fusionreport.tsv") , emit: fusion_list 17 | tuple val(meta), path("*fusionreport_filtered.tsv") , emit: fusion_list_filtered 18 | tuple val(meta), path("*index.html") , emit: report 19 | tuple val(meta), path("*_*.html") , optional:true, emit: html 20 | tuple val(meta), path("*.csv") , optional:true, emit: csv 21 | tuple val(meta), path("*.json") , optional:true, emit: json 22 | 23 | when: 24 | task.ext.when == null || task.ext.when 25 | 26 | script: 27 | def args = task.ext.args ?: '' 28 | def args2 = task.ext.args2 ?: '' 29 | def tools = params.arriba || params.all ? "--arriba ${arriba_fusions} " : '' 30 | tools += params.starfusion || params.all ? "--starfusion ${starfusion_fusions} " : '' 31 | tools += params.fusioncatcher || params.all ? "--fusioncatcher ${fusioncatcher_fusions} " : '' 32 | def prefix = task.ext.prefix ?: "${meta.id}" 33 | """ 34 | fusion_report run $meta.id . $fusionreport_ref $tools --allow-multiple-gene-symbols --tool-cutoff $tools_cutoff $args $args2 35 | 36 | mv fusion_list.tsv ${prefix}.fusionreport.tsv 37 | mv fusion_list_filtered.tsv ${prefix}.fusionreport_filtered.tsv 38 | mv index.html ${prefix}_fusionreport_index.html 39 | [ ! -f fusions.csv ] || mv fusions.csv ${prefix}.fusions.csv 40 | [ ! -f fusions.json ] || mv fusions.json ${prefix}.fusions.json 41 | 42 | cat <<-END_VERSIONS > versions.yml 43 | "${task.process}": 44 | fusion_report: \$(fusion_report --version | sed 's/fusion-report //') 45 | fusion_report DB retrieval: \$(cat $fusionreport_ref/DB-timestamp.txt) 46 | END_VERSIONS 47 | """ 48 | 49 | stub: 50 | def prefix = task.ext.prefix ?: "${meta.id}" 51 | """ 52 | touch ${prefix}.fusionreport_filtered.tsv 53 | touch ${prefix}.fusionreport.tsv 54 | touch ${prefix}_fusionreport_index.html 55 | touch AAA_BBB.html 56 | touch ${prefix}.fusions.csv 57 | touch ${prefix}.fusions.json 58 | 59 | cat <<-END_VERSIONS > versions.yml 60 | "${task.process}": 61 | fusion_report: \$(fusion_report --version | sed 's/fusion-report //') 62 | END_VERSIONS 63 | """ 64 | } 65 | -------------------------------------------------------------------------------- /modules/local/fusionreport/detect/meta.yml: -------------------------------------------------------------------------------- 1 | name: fusionreport 2 | description: fusionreport 3 | keywords: 4 | - sort 5 | tools: 6 | - fusionreport: 7 | description: Tool for parsing outputs from fusion detection tools 8 | homepage: https://github.com/Clinical-Genomics/fusion-report 9 | documentation: https://matq007.github.io/fusion-report/#/ 10 | doi: "10.1101/011650" 11 | licence: ["GPL v3"] 12 | 13 | input: 14 | - meta: 15 | type: map 16 | description: | 17 | Groovy Map containing sample information 18 | e.g. [ id:'test', single_end:false ] 19 | - reference: 20 | type: path 21 | description: Path to fusionreport references 22 | pattern: "*" 23 | - arriba_fusions: 24 | type: path 25 | description: File 26 | pattern: "*.fusions.tsv" 27 | - starfusion_fusions: 28 | type: path 29 | description: File containing fusions from STARfusion 30 | pattern: "*.starfusion.fusion_predictions.tsv" 31 | - fusioncatcher_fusions: 32 | type: path 33 | description: File containing fusions from fusioncatcher 34 | pattern: "*.fusions.tsv" 35 | 36 | output: 37 | - versions: 38 | type: file 39 | description: File containing software versions 40 | pattern: "versions.yml" 41 | - fusion_list: 42 | type: file 43 | description: File containing the summary of all fusions fed-in 44 | pattern: "*.tsv" 45 | - report: 46 | type: file 47 | description: HTML files 48 | pattern: "*.html" 49 | 50 | authors: 51 | - "@praveenraj2018, @rannick" 52 | -------------------------------------------------------------------------------- /modules/local/fusionreport/download/main.nf: -------------------------------------------------------------------------------- 1 | process FUSIONREPORT_DOWNLOAD { 2 | tag 'fusionreport' 3 | label 'process_medium' 4 | 5 | conda "bioconda::star=2.7.9a" 6 | container "docker.io/clinicalgenomics/fusion-report:2.1.8" 7 | 8 | input: 9 | val(username) 10 | val(passwd) 11 | 12 | output: 13 | path "*" , emit: reference 14 | path "versions.yml" , emit: versions 15 | 16 | script: 17 | def args = task.ext.args ?: '' 18 | """ 19 | fusion_report download --cosmic_usr "$username" --cosmic_passwd "$passwd" $args ./ 20 | 21 | cat <<-END_VERSIONS > versions.yml 22 | "${task.process}": 23 | fusion_report: \$(fusion_report --version | sed 's/fusion-report //') 24 | END_VERSIONS 25 | """ 26 | 27 | stub: 28 | """ 29 | touch cosmic.db 30 | touch fusiongdb2.db 31 | touch fusiongdb.db 32 | touch mitelman.db 33 | cat <<-END_VERSIONS > versions.yml 34 | "${task.process}": 35 | fusion_report: \$(fusion_report --version | sed 's/fusion-report //') 36 | END_VERSIONS 37 | """ 38 | 39 | } 40 | -------------------------------------------------------------------------------- /modules/local/fusionreport/download/meta.yml: -------------------------------------------------------------------------------- 1 | name: fusionreport_download 2 | description: Build DB for fusionreport 3 | keywords: 4 | - sort 5 | tools: 6 | - fusioncatcher: 7 | description: Build DB for fusionreport 8 | homepage: https://github.com/ndaniel/fusioncatcher/ 9 | documentation: https://github.com/ndaniel/fusioncatcher/blob/master/doc/manual.md 10 | tool_dev_url: https://github.com/ndaniel/fusioncatcher/ 11 | doi: "10.1101/011650" 12 | licence: ["GPL v3"] 13 | 14 | input: 15 | - username: 16 | type: value 17 | description: Organism for which the data is downloaded from Ensembl database and built 18 | pattern: "*" 19 | - passwd: 20 | type: value 21 | description: Organism for which the data is downloaded from Ensembl database and built 22 | pattern: "*" 23 | 24 | output: 25 | - versions: 26 | type: file 27 | description: File containing software versions 28 | pattern: "versions.yml" 29 | - reference: 30 | type: directory 31 | description: directory containing the genome resource files required for fusioncatcher 32 | pattern: "fusioncatcher-genome" 33 | 34 | authors: 35 | - "@praveenraj2018" 36 | -------------------------------------------------------------------------------- /modules/local/hgnc/main.nf: -------------------------------------------------------------------------------- 1 | process HGNC_DOWNLOAD { 2 | tag "hgnc" 3 | label 'process_low' 4 | 5 | conda "bioconda::gnu-wget=1.18" 6 | container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? 7 | 'https://depot.galaxyproject.org/singularity/gnu-wget:1.18--h5bf99c6_5' : 8 | 'quay.io/biocontainers/gnu-wget:1.18--h5bf99c6_5' }" 9 | 10 | input: 11 | 12 | output: 13 | path "hgnc_complete_set.txt" , emit: hgnc_ref 14 | path "HGNC-DB-timestamp.txt" , emit: hgnc_date 15 | 16 | path "versions.yml" , emit: versions 17 | 18 | 19 | script: 20 | """ 21 | wget https://ftp.ebi.ac.uk/pub/databases/genenames/hgnc/tsv/hgnc_complete_set.txt 22 | date +%Y-%m-%d/%H:%M > HGNC-DB-timestamp.txt 23 | 24 | cat <<-END_VERSIONS > versions.yml 25 | "${task.process}": 26 | wget: \$(echo wget -V 2>&1 | grep "GNU Wget" | cut -d" " -f3 > versions.yml) 27 | END_VERSIONS 28 | """ 29 | 30 | stub: 31 | """ 32 | touch "hgnc_complete_set.txt" 33 | touch "HGNC-DB-timestamp.txt" 34 | 35 | cat <<-END_VERSIONS > versions.yml 36 | "${task.process}": 37 | wget: \$(echo wget -V 2>&1 | grep "GNU Wget" | cut -d" " -f3 > versions.yml) 38 | END_VERSIONS 39 | """ 40 | 41 | } 42 | -------------------------------------------------------------------------------- /modules/local/picard/collectrnaseqmetrics/main.nf: -------------------------------------------------------------------------------- 1 | process PICARD_COLLECTRNASEQMETRICS { 2 | tag "$meta.id" 3 | label 'process_medium' 4 | 5 | conda "bioconda::picard=3.1.0" 6 | container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? 7 | 'https://depot.galaxyproject.org/singularity/picard:3.1.0--hdfd78af_0' : 8 | 'biocontainers/picard:3.1.0--hdfd78af_0' }" 9 | 10 | input: 11 | tuple val(meta), path(bam), path(bai) 12 | tuple val(meta2), path(refflat) 13 | tuple val(meta3), path(rrna_intervals) 14 | 15 | output: 16 | tuple val(meta), path("*rna_metrics.txt") , emit: metrics 17 | path "versions.yml" , emit: versions 18 | 19 | when: 20 | task.ext.when == null || task.ext.when 21 | 22 | script: 23 | def strandedness = '' 24 | // def strandedness = '--STRAND_SPECIFICITY FIRST_READ_TRANSCRIPTION_STRAND' 25 | if ("${meta.strandedness}" == 'forward') { 26 | strandedness = '--STRAND_SPECIFICITY FIRST_READ_TRANSCRIPTION_STRAND' 27 | } else if ("${meta.strandedness}" == 'reverse') { 28 | strandedness = '--STRAND_SPECIFICITY SECOND_READ_TRANSCRIPTION_STRAND' 29 | } else { 30 | strandedness = '--STRAND_SPECIFICITY NONE' 31 | } 32 | 33 | def rrna = rrna_intervals == [] ? '' : "--RIBOSOMAL_INTERVALS ${rrna_intervals}" 34 | def args = task.ext.args ?: '' 35 | def prefix = task.ext.prefix ?: "${meta.id}" 36 | def avail_mem = 3072 37 | if (!task.memory) { 38 | log.info '[Picard CollectRnaMetrics] Available memory not known - defaulting to 3GB. Specify process memory requirements to change this.' 39 | } else { 40 | avail_mem = (task.memory.mega*0.8).intValue() 41 | } 42 | """ 43 | picard \\ 44 | -Xmx${avail_mem}M \\ 45 | CollectRnaSeqMetrics \\ 46 | --TMP_DIR ./tmp \\ 47 | ${strandedness} \\ 48 | ${rrna} \\ 49 | --REF_FLAT ${refflat} \\ 50 | --INPUT ${bam} \\ 51 | --OUTPUT ${prefix}_rna_metrics.txt \\ 52 | 53 | cat <<-END_VERSIONS > versions.yml 54 | "${task.process}": 55 | picard: \$(picard CollectRnaMetrics --version 2>&1 | grep -o 'Version.*' | cut -f2- -d:) 56 | END_VERSIONS 57 | """ 58 | 59 | stub: 60 | def prefix = task.ext.prefix ?: "${meta.id}" 61 | """ 62 | touch ${prefix}_rna_metrics.txt 63 | cat <<-END_VERSIONS > versions.yml 64 | "${task.process}": 65 | picard: \$(picard CollectRnaMetrics --version 2>&1 | grep -o 'Version.*' | cut -f2- -d:) 66 | END_VERSIONS 67 | """ 68 | } 69 | -------------------------------------------------------------------------------- /modules/local/picard/collectrnaseqmetrics/meta.yml: -------------------------------------------------------------------------------- 1 | name: picard_collectrnaseqmetrics 2 | description: Produces RNA alignment metrics for a SAM or BAM file. 3 | keywords: 4 | - alignment 5 | - metrics 6 | - statistics 7 | - quality 8 | - bam 9 | - RNA 10 | tools: 11 | - picard: 12 | description: | 13 | A set of command line tools (in Java) for manipulating high-throughput sequencing (HTS) 14 | data and formats such as SAM/BAM/CRAM and VCF. 15 | homepage: https://broadinstitute.github.io/picard/ 16 | documentation: https://broadinstitute.github.io/picard/ 17 | licence: ["MIT"] 18 | input: 19 | - meta: 20 | type: map 21 | description: | 22 | Groovy Map containing sample information 23 | e.g. [ id:'test', single_end:false ] 24 | - bam: 25 | type: file 26 | description: BAM file 27 | pattern: "*.{bam}" 28 | - bai: 29 | type: file 30 | description: An optional BAM index file. If desired, --CREATE_INDEX must be passed as a flag 31 | pattern: "*.{bai}" 32 | - refflat: 33 | type: file 34 | description: Gene annotations in refFlat form 35 | - rrna_intervals: 36 | type: file 37 | description: Location of rRNA sequences in genome, in interval_list format 38 | output: 39 | - meta: 40 | type: map 41 | description: | 42 | Groovy Map containing sample information 43 | e.g. [ id:'test', single_end:false ] 44 | - metrics: 45 | type: file 46 | description: Alignment metrics files generated by picard 47 | pattern: "*_{metrics}" 48 | - versions: 49 | type: file 50 | description: File containing software versions 51 | pattern: "versions.yml" 52 | authors: 53 | - "@rannick" 54 | -------------------------------------------------------------------------------- /modules/local/rrnatranscripts/main.nf: -------------------------------------------------------------------------------- 1 | process RRNA_TRANSCRIPTS { 2 | tag "$meta.id" 3 | label 'process_single' 4 | 5 | conda "conda-forge::python=3.8.3" 6 | 7 | container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? 8 | 'https://depot.galaxyproject.org/singularity/python:3.9--1' : 9 | 'quay.io/biocontainers/python:3.9--1' }" 10 | 11 | 12 | input: 13 | tuple val(meta), path(gtf) 14 | 15 | output: 16 | tuple val(meta), path("*rrna_intervals.gtf") , emit: rrna_gtf 17 | path "versions.yml" , emit: versions 18 | 19 | 20 | when: 21 | task.ext.when == null || task.ext.when 22 | 23 | script: // This script is bundled with the pipeline, in nf-core/rnafusion/bin/ 24 | def prefix = task.ext.prefix ?: "${meta.id}" 25 | """ 26 | get_rrna_transcripts.py $gtf ${prefix}_rrna_intervals.gtf 27 | cat <<-END_VERSIONS > versions.yml 28 | "${task.process}": 29 | python: \$(python --version | sed 's/Python //g') 30 | END_VERSIONS 31 | """ 32 | 33 | stub: 34 | def prefix = task.ext.prefix ?: "${meta.id}" 35 | """ 36 | touch ${prefix}_rrna_intervals.gtf 37 | cat <<-END_VERSIONS > versions.yml 38 | "${task.process}": 39 | python: \$(python --version | sed 's/Python //g') 40 | END_VERSIONS 41 | """ 42 | } 43 | -------------------------------------------------------------------------------- /modules/local/starfusion/build/main.nf: -------------------------------------------------------------------------------- 1 | process STARFUSION_BUILD { 2 | tag 'star-fusion' 3 | 4 | conda "bioconda::dfam=3.3 bioconda::hmmer=3.3.2 bioconda::star-fusion=1.12.0 bioconda::trinity=2.13.2 bioconda::samtools=1.9 bioconda::star=2.7.8a" 5 | container "docker.io/trinityctat/starfusion:1.12.0" 6 | 7 | input: 8 | tuple val(meta), path(fasta) 9 | tuple val(meta2), path(gtf) 10 | 11 | output: 12 | path "*" , emit: reference 13 | 14 | script: 15 | def binPath = (workflow.profile.tokenize(',').intersect(['conda', 'mamba']).size() >= 1) ? "prep_genome_lib.pl" : "/usr/local/src/STAR-Fusion/ctat-genome-lib-builder/prep_genome_lib.pl" 16 | """ 17 | export TMPDIR=/tmp 18 | wget http://ftp.ebi.ac.uk/pub/databases/Pfam/releases/Pfam34.0/Pfam-A.hmm.gz --no-check-certificate 19 | wget https://github.com/FusionAnnotator/CTAT_HumanFusionLib/releases/download/v0.3.0/fusion_lib.Mar2021.dat.gz -O CTAT_HumanFusionLib_Mar2021.dat.gz --no-check-certificate 20 | wget https://data.broadinstitute.org/Trinity/CTAT_RESOURCE_LIB/AnnotFilterRule.pm -O AnnotFilterRule.pm --no-check-certificate 21 | wget https://www.dfam.org/releases/Dfam_3.4/infrastructure/dfamscan/homo_sapiens_dfam.hmm --no-check-certificate 22 | wget https://www.dfam.org/releases/Dfam_3.4/infrastructure/dfamscan/homo_sapiens_dfam.hmm.h3f --no-check-certificate 23 | wget https://www.dfam.org/releases/Dfam_3.4/infrastructure/dfamscan/homo_sapiens_dfam.hmm.h3i --no-check-certificate 24 | wget https://www.dfam.org/releases/Dfam_3.4/infrastructure/dfamscan/homo_sapiens_dfam.hmm.h3m --no-check-certificate 25 | wget https://www.dfam.org/releases/Dfam_3.4/infrastructure/dfamscan/homo_sapiens_dfam.hmm.h3p --no-check-certificate 26 | gunzip Pfam-A.hmm.gz && hmmpress Pfam-A.hmm 27 | $binPath \\ 28 | --genome_fa $fasta \\ 29 | --gtf $gtf \\ 30 | --annot_filter_rule AnnotFilterRule.pm \\ 31 | --fusion_annot_lib CTAT_HumanFusionLib_Mar2021.dat.gz \\ 32 | --pfam_db Pfam-A.hmm \\ 33 | --dfam_db homo_sapiens_dfam.hmm \\ 34 | --max_readlength $params.read_length \\ 35 | --CPU $task.cpus 36 | 37 | cat <<-END_VERSIONS > versions.yml 38 | "${task.process}": 39 | STAR-Fusion: \$(STAR-Fusion --version 2>&1 | grep -i 'version' | sed 's/STAR-Fusion version: //') 40 | END_VERSIONS 41 | """ 42 | 43 | stub: 44 | """ 45 | mkdir ctat_genome_lib_build_dir 46 | touch ref_annot.cdna.fa 47 | 48 | cat <<-END_VERSIONS > versions.yml 49 | "${task.process}": 50 | STAR-Fusion: \$(STAR-Fusion --version 2>&1 | grep -i 'version' | sed 's/STAR-Fusion version: //') 51 | END_VERSIONS 52 | """ 53 | 54 | } 55 | -------------------------------------------------------------------------------- /modules/local/starfusion/build/meta.yml: -------------------------------------------------------------------------------- 1 | name: starfusion_downloadgenome 2 | description: Download STAR-fusion genome resource required to run STAR-Fusion caller 3 | keywords: 4 | - downoad 5 | tools: 6 | - star-fusion: 7 | description: Fusion calling algorithm for RNAseq data 8 | homepage: https://github.com/STAR-Fusion/ 9 | documentation: https://github.com/STAR-Fusion/STAR-Fusion/wiki/installing-star-fusion 10 | tool_dev_url: https://github.com/STAR-Fusion/STAR-Fusion 11 | doi: "10.1186/s13059-019-1842-9" 12 | licence: ["GPL v3"] 13 | 14 | input: 15 | - fasta: 16 | type: file 17 | description: genome fasta file 18 | pattern: "*.{fasta}" 19 | - gtf: 20 | type: file 21 | description: genome gtf file 22 | pattern: "*.{gtf}" 23 | 24 | output: 25 | - reference: 26 | type: directory 27 | description: Reference dir 28 | pattern: "ctat_genome_lib_build_dir" 29 | 30 | authors: 31 | - "@praveenraj2018" 32 | -------------------------------------------------------------------------------- /modules/local/starfusion/detect/main.nf: -------------------------------------------------------------------------------- 1 | process STARFUSION { 2 | tag "$meta.id" 3 | label 'process_high' 4 | 5 | conda "bioconda::dfam=3.3 bioconda::hmmer=3.3.2 bioconda::star-fusion=1.12.0 bioconda::trinity=2.13.2 bioconda::samtools=1.9 bioconda::star=2.7.8a" 6 | container 'docker.io/trinityctat/starfusion:1.12.0' 7 | 8 | input: 9 | tuple val(meta), path(reads), path(junction) 10 | path reference 11 | 12 | output: 13 | tuple val(meta), path("*.fusion_predictions.tsv") , emit: fusions 14 | tuple val(meta), path("*.abridged.tsv") , emit: abridged 15 | tuple val(meta), path("*.coding_effect.tsv") , optional: true , emit: coding_effect 16 | path "versions.yml" , emit: versions 17 | 18 | script: 19 | def prefix = task.ext.prefix ?: "${meta.id}" 20 | def fasta = meta.single_end ? "--left_fq ${reads[0]}" : "--left_fq ${reads[0]} --right_fq ${reads[1]}" 21 | def args = task.ext.args ?: '' 22 | """ 23 | STAR-Fusion \\ 24 | --genome_lib_dir $reference \\ 25 | $fasta \\ 26 | -J $junction \\ 27 | --CPU $task.cpus \\ 28 | --examine_coding_effect \\ 29 | --output_dir . \\ 30 | $args 31 | 32 | mv star-fusion.fusion_predictions.tsv ${prefix}.starfusion.fusion_predictions.tsv 33 | mv star-fusion.fusion_predictions.abridged.tsv ${prefix}.starfusion.abridged.tsv 34 | mv star-fusion.fusion_predictions.abridged.coding_effect.tsv ${prefix}.starfusion.abridged.coding_effect.tsv 35 | 36 | cat <<-END_VERSIONS > versions.yml 37 | "${task.process}": 38 | STAR-Fusion: \$(STAR-Fusion --version 2>&1 | grep -i 'version' | sed 's/STAR-Fusion version: //') 39 | END_VERSIONS 40 | """ 41 | 42 | stub: 43 | def prefix = task.ext.prefix ?: "${meta.id}" 44 | """ 45 | touch ${prefix}.starfusion.fusion_predictions.tsv 46 | touch ${prefix}.starfusion.abridged.tsv 47 | touch ${prefix}.starfusion.abridged.coding_effect.tsv 48 | cat <<-END_VERSIONS > versions.yml 49 | "${task.process}": 50 | STAR-Fusion: \$(STAR-Fusion --version 2>&1 | grep -i 'version' | sed 's/STAR-Fusion version: //') 51 | END_VERSIONS 52 | """ 53 | } 54 | 55 | 56 | -------------------------------------------------------------------------------- /modules/local/starfusion/detect/meta.yml: -------------------------------------------------------------------------------- 1 | name: starfusion 2 | description: Fast and Accurate Fusion Transcript Detection from RNA-Seq 3 | keywords: 4 | - Fusion 5 | tools: 6 | - star-fusion: 7 | description: Fast and Accurate Fusion Transcript Detection from RNA-Seq 8 | homepage: https://github.com/STAR-Fusion/STAR-Fusion 9 | documentation: https://github.com/STAR-Fusion/STAR-Fusion/wiki 10 | tool_dev_url: https://github.com/STAR-Fusion/STAR-Fusion/releases 11 | doi: "10.1101/120295v1" 12 | licence: ["GPL v3"] 13 | 14 | input: 15 | - meta: 16 | type: map 17 | description: | 18 | Groovy Map containing sample information 19 | e.g. [ id:'test', single_end:false ] 20 | - genome_lib: 21 | type: path 22 | description: STAR-fusion reference genome lib folder 23 | - junction: 24 | type: file 25 | description: Chimeric junction output from STAR aligner 26 | pattern: "*.{out.junction}" 27 | - reference: 28 | type: directory 29 | description: Reference dir 30 | pattern: "ctat_genome_lib_build_dir" 31 | 32 | output: 33 | - meta: 34 | type: map 35 | description: | 36 | Groovy Map containing sample information 37 | e.g. [ id:'test', single_end:false ] 38 | - version: 39 | type: file 40 | description: File containing software version 41 | pattern: "*.{versions.yml}" 42 | - fusions: 43 | type: file 44 | description: Fusion events from STAR-fusion 45 | pattern: "*.{fusion_predictions.tsv}" 46 | - abridged: 47 | type: file 48 | description: Fusion events from STAR-fusion 49 | pattern: "*.{fusion.abridged.tsv}" 50 | - coding_effect: 51 | type: file 52 | description: Fusion events from STAR-fusion 53 | pattern: "*.{coding_effect.tsv}" 54 | 55 | authors: 56 | - "@praveenraj2018" 57 | -------------------------------------------------------------------------------- /modules/local/starfusion/download/main.nf: -------------------------------------------------------------------------------- 1 | process STARFUSION_DOWNLOAD { 2 | tag 'star-fusion' 3 | 4 | conda "bioconda::dfam=3.3 bioconda::hmmer=3.3.2 bioconda::star-fusion=1.12.0 bioconda::trinity=2.13.2 bioconda::samtools=1.9 bioconda::star=2.7.8a" 5 | container 'docker.io/trinityctat/starfusion:1.12.0' 6 | 7 | output: 8 | path "ctat_genome_lib_build_dir/*" , emit: reference 9 | path "ctat_genome_lib_build_dir/ref_annot.gtf", emit: chrgtf 10 | 11 | 12 | script: 13 | """ 14 | wget https://data.broadinstitute.org/Trinity/CTAT_RESOURCE_LIB/__genome_libs_StarFv1.10/GRCh38_gencode_v37_CTAT_lib_Mar012021.plug-n-play.tar.gz --no-check-certificate 15 | 16 | tar xvf GRCh38_gencode_v37_CTAT_lib_Mar012021.plug-n-play.tar.gz 17 | 18 | rm GRCh38_gencode_v37_CTAT_lib_Mar012021.plug-n-play.tar.gz 19 | 20 | mv */ctat_genome_lib_build_dir . 21 | """ 22 | 23 | stub: 24 | """ 25 | mkdir ctat_genome_lib_build_dir 26 | touch ref_annot.cdna.fa 27 | cat <<-END_VERSIONS > versions.yml 28 | "${task.process}": 29 | STAR-Fusion: \$(STAR-Fusion --version 2>&1 | grep -i 'version' | sed 's/STAR-Fusion version: //') 30 | END_VERSIONS 31 | """ 32 | } 33 | -------------------------------------------------------------------------------- /modules/local/starfusion/download/meta.yml: -------------------------------------------------------------------------------- 1 | name: starfusion_downloadgenome 2 | description: Download STAR-fusion genome resource required to run STAR-Fusion caller 3 | keywords: 4 | - downoad 5 | tools: 6 | - star-fusion: 7 | description: Fusion calling algorithm for RNAseq data 8 | homepage: https://github.com/STAR-Fusion/ 9 | documentation: https://github.com/STAR-Fusion/STAR-Fusion/wiki/installing-star-fusion 10 | tool_dev_url: https://github.com/STAR-Fusion/STAR-Fusion 11 | doi: "10.1186/s13059-019-1842-9" 12 | licence: ["GPL v3"] 13 | 14 | output: 15 | - reference: 16 | type: directory 17 | description: Genome resource path 18 | pattern: "star-fusion-genome" 19 | - gtf: 20 | type: file 21 | description: genome gtf file 22 | pattern: "*.{gtf}" 23 | 24 | authors: 25 | - "@praveenraj2018,@rannick" 26 | -------------------------------------------------------------------------------- /modules/local/uscs/custom_gtftogenepred/main.nf: -------------------------------------------------------------------------------- 1 | process GTF_TO_REFFLAT { 2 | label 'process_low' 3 | 4 | conda "bioconda::ucsc-gtftogenepred=377" 5 | container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? 6 | 'https://depot.galaxyproject.org/singularity/ucsc-gtftogenepred:377--ha8a8165_5' : 7 | 'quay.io/biocontainers/ucsc-gtftogenepred:377--ha8a8165_5' }" 8 | 9 | input: 10 | tuple val(meta), path (gtf) 11 | 12 | output: 13 | path('*.refflat'), emit: refflat 14 | 15 | script: 16 | def genepred = gtf + '.genepred' 17 | def refflat = gtf + '.refflat' 18 | """ 19 | gtfToGenePred -genePredExt -geneNameAsName2 ${gtf} ${genepred} 20 | paste ${genepred} ${genepred} | cut -f12,16-25 > ${refflat} 21 | 22 | cat <<-END_VERSIONS > versions.yml 23 | "${task.process}": 24 | gtfToGenePred: 377 25 | END_VERSIONS 26 | """ 27 | 28 | stub: 29 | def refflat = gtf + '.refflat' 30 | """ 31 | touch ${refflat} 32 | cat <<-END_VERSIONS > versions.yml 33 | "${task.process}": 34 | gtfToGenePred: 377 35 | END_VERSIONS 36 | """ 37 | } 38 | -------------------------------------------------------------------------------- /modules/local/uscs/custom_gtftogenepred/meta.yml: -------------------------------------------------------------------------------- 1 | name: gtf_to_refflat 2 | description: generate gene annotations in refFlat form 3 | - gtftorefflat 4 | tools: 5 | - gtf_to_refflat: 6 | description: generate gene annotations in refFlat form 7 | homepage: https://pachterlab.github.io/kallisto/ 8 | documentation: https://pachterlab.github.io/kallisto/manual 9 | tool_dev_url: https://github.com/pachterlab/kallisto 10 | doi: "" 11 | licence: ["BSD-2-Clause"] 12 | 13 | input: 14 | - fasta: 15 | type: file 16 | description: genome fasta file 17 | pattern: "*.{fasta*}" 18 | - reference: 19 | type: directory 20 | description: Path to kallisto index 21 | pattern: "*" 22 | 23 | output: 24 | - versions: 25 | type: file 26 | description: File containing software versions 27 | pattern: "versions.yml" 28 | - fusions: 29 | type: file 30 | description: fusions 31 | pattern: "*.txt" 32 | 33 | authors: 34 | - "@rannick" 35 | -------------------------------------------------------------------------------- /modules/local/vcf_collect/main.nf: -------------------------------------------------------------------------------- 1 | process VCF_COLLECT { 2 | tag "$meta.id" 3 | label 'process_single' 4 | 5 | conda "conda-forge::pandas=1.5.2" 6 | container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? 7 | 'https://depot.galaxyproject.org/singularity/pandas:1.5.2' : 8 | 'quay.io/biocontainers/pandas:1.5.2' }" 9 | 10 | input: 11 | tuple val(meta), path(fusioninspector_tsv), path(fusioninspector_gtf_tsv), path(fusionreport_report), path(fusionreport_csv) 12 | tuple val(meta2), path(hgnc_ref) 13 | tuple val(meta3), path(hgnc_date) 14 | 15 | output: 16 | path "versions.yml" , emit: versions 17 | tuple val(meta), path("*vcf.gz") , emit: vcf 18 | 19 | when: 20 | task.ext.when == null || task.ext.when 21 | 22 | script: 23 | def prefix = task.ext.prefix ?: "${meta.id}" 24 | """ 25 | vcf_collect.py --fusioninspector $fusioninspector_tsv --fusionreport $fusionreport_report --fusioninspector_gtf $fusioninspector_gtf_tsv --fusionreport_csv $fusionreport_csv --hgnc $hgnc_ref --sample ${prefix} --out ${prefix}_fusion_data.vcf 26 | gzip ${prefix}_fusion_data.vcf 27 | 28 | cat <<-END_VERSIONS > versions.yml 29 | "${task.process}": 30 | python: \$(python --version | sed 's/Python //g') 31 | HGNC DB retrieval: \$(cat $hgnc_date) 32 | END_VERSIONS 33 | """ 34 | 35 | stub: 36 | def prefix = task.ext.prefix ?: "${meta.id}" 37 | """ 38 | touch ${prefix}.vcf 39 | 40 | cat <<-END_VERSIONS > versions.yml 41 | "${task.process}": 42 | python: \$(python --version | sed 's/Python //g') 43 | END_VERSIONS 44 | """ 45 | } 46 | -------------------------------------------------------------------------------- /modules/local/vcf_collect/meta.yml: -------------------------------------------------------------------------------- 1 | name: vcf_collect 2 | description: vcf_collect 3 | keywords: 4 | - sort 5 | tools: 6 | - fusionreport: 7 | description: Converts RNA fusion files to SV VCF and collects statistics and metrics in a VCF file. 8 | homepage: Adapted from https://github.com/J35P312/MegaFusion 9 | documentation: https://github.com/J35P312/MegaFusion 10 | doi: "" 11 | licence: [""] 12 | 13 | input: 14 | - meta: 15 | type: map 16 | description: | 17 | Groovy Map containing sample information 18 | e.g. [ id:'test', single_end:false ] 19 | - tsv: 20 | type: path 21 | description: Path to FusionInspector tsv output 22 | pattern: "*" 23 | - report: 24 | type: path 25 | description: Path to fusionreport report 26 | pattern: "*.fusions.tsv" 27 | 28 | output: 29 | - versions: 30 | type: file 31 | description: File containing software versions 32 | pattern: "versions.yml" 33 | - vcf: 34 | type: file 35 | description: File containing the summary of all fusions as compressed vcf file 36 | pattern: "*.vcf.gz" 37 | 38 | authors: 39 | - "@rannick" 40 | -------------------------------------------------------------------------------- /modules/nf-core/agat/convertspgff2tsv/environment.yml: -------------------------------------------------------------------------------- 1 | name: agat_convertspgff2tsv 2 | channels: 3 | - conda-forge 4 | - bioconda 5 | - defaults 6 | dependencies: 7 | - bioconda::agat=1.2.0 8 | -------------------------------------------------------------------------------- /modules/nf-core/agat/convertspgff2tsv/main.nf: -------------------------------------------------------------------------------- 1 | process AGAT_CONVERTSPGFF2TSV { 2 | tag "$meta.id" 3 | label 'process_single' 4 | 5 | conda "${moduleDir}/environment.yml" 6 | container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? 7 | 'https://depot.galaxyproject.org/singularity/agat:1.2.0--pl5321hdfd78af_0' : 8 | 'biocontainers/agat:1.2.0--pl5321hdfd78af_0' }" 9 | 10 | input: 11 | tuple val(meta), path(gff) 12 | 13 | output: 14 | tuple val(meta), path("*.tsv"), emit: tsv 15 | path "versions.yml" , emit: versions 16 | 17 | when: 18 | task.ext.when == null || task.ext.when 19 | 20 | script: 21 | def args = task.ext.args ?: '' 22 | def prefix = task.ext.prefix ?: "${meta.id}" 23 | 24 | """ 25 | agat_convert_sp_gff2tsv.pl \\ 26 | --gff $gff \\ 27 | --output ${prefix}.tsv \\ 28 | $args 29 | 30 | cat <<-END_VERSIONS > versions.yml 31 | "${task.process}": 32 | agat: \$(agat_convert_sp_gff2tsv.pl --help | sed '3!d; s/.*v//' | sed 's/ .*//') 33 | END_VERSIONS 34 | """ 35 | } 36 | -------------------------------------------------------------------------------- /modules/nf-core/agat/convertspgff2tsv/meta.yml: -------------------------------------------------------------------------------- 1 | name: agat_convertspgff2tsv 2 | description: | 3 | Converts a GFF/GTF file into a TSV file 4 | keywords: 5 | - genome 6 | - gff 7 | - gtf 8 | - conversion 9 | - tsv 10 | tools: 11 | - agat: 12 | description: "AGAT is a toolkit for manipulation and getting information from GFF/GTF files" 13 | homepage: "https://github.com/NBISweden/AGAT" 14 | documentation: "https://agat.readthedocs.io/" 15 | tool_dev_url: "https://github.com/NBISweden/AGAT" 16 | doi: "10.5281/zenodo.3552717" 17 | licence: ["GPL v3"] 18 | input: 19 | - meta: 20 | type: map 21 | description: | 22 | Groovy Map containing sample information 23 | e.g. [ id:'test', single_end:false ] 24 | - gff: 25 | type: file 26 | description: Annotation file in GFF3/GTF format 27 | pattern: "*.{gff, gtf}" 28 | output: 29 | - tsv: 30 | type: file 31 | description: Annotation file in TSV format 32 | pattern: "*.{gtf}" 33 | - versions: 34 | type: file 35 | description: File containing software versions 36 | pattern: "versions.yml" 37 | authors: 38 | - "@rannick" 39 | -------------------------------------------------------------------------------- /modules/nf-core/arriba/main.nf: -------------------------------------------------------------------------------- 1 | process ARRIBA { 2 | tag "$meta.id" 3 | label 'process_medium' 4 | 5 | conda "bioconda::arriba=2.4.0" 6 | container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? 7 | 'https://depot.galaxyproject.org/singularity/arriba:2.4.0--h0033a41_2' : 8 | 'biocontainers/arriba:2.4.0--h0033a41_2' }" 9 | 10 | input: 11 | tuple val(meta), path(bam) 12 | tuple val(meta2), path(fasta) 13 | tuple val(meta3), path(gtf) 14 | tuple val(meta4), path(blacklist) 15 | tuple val(meta5), path(known_fusions) 16 | tuple val(meta6), path(structural_variants) 17 | tuple val(meta7), path(tags) 18 | tuple val(meta8), path(protein_domains) 19 | 20 | output: 21 | tuple val(meta), path("*.fusions.tsv") , emit: fusions 22 | tuple val(meta), path("*.fusions.discarded.tsv"), emit: fusions_fail 23 | path "versions.yml" , emit: versions 24 | 25 | when: 26 | task.ext.when == null || task.ext.when 27 | 28 | script: 29 | def args = task.ext.args ?: '' 30 | def prefix = task.ext.prefix ?: "${meta.id}" 31 | def blacklist = blacklist ? "-b $blacklist" : "-f blacklist" 32 | def known_fusions = known_fusions ? "-k $known_fusions" : "" 33 | def structural_variants = structural_variants ? "-d $structual_variants" : "" 34 | def tags = tags ? "-t $tags" : "" 35 | def protein_domains = protein_domains ? "-p $protein_domains" : "" 36 | 37 | """ 38 | arriba \\ 39 | -x $bam \\ 40 | -a $fasta \\ 41 | -g $gtf \\ 42 | -o ${prefix}.fusions.tsv \\ 43 | -O ${prefix}.fusions.discarded.tsv \\ 44 | $blacklist \\ 45 | $known_fusions \\ 46 | $structural_variants \\ 47 | $tags \\ 48 | $protein_domains \\ 49 | $args 50 | 51 | cat <<-END_VERSIONS > versions.yml 52 | "${task.process}": 53 | arriba: \$(arriba -h | grep 'Version:' 2>&1 | sed 's/Version:\s//') 54 | END_VERSIONS 55 | """ 56 | 57 | stub: 58 | def prefix = task.ext.prefix ?: "${meta.id}" 59 | """ 60 | echo stub > ${prefix}.fusions.tsv 61 | echo stub > ${prefix}.fusions.discarded.tsv 62 | 63 | echo "${task.process}:" > versions.yml 64 | echo ' arriba: 2.2.1' >> versions.yml 65 | """ 66 | } 67 | -------------------------------------------------------------------------------- /modules/nf-core/cat/cat/environment.yml: -------------------------------------------------------------------------------- 1 | name: cat_cat 2 | channels: 3 | - conda-forge 4 | - bioconda 5 | - defaults 6 | dependencies: 7 | - conda-forge::pigz=2.3.4 8 | -------------------------------------------------------------------------------- /modules/nf-core/cat/cat/main.nf: -------------------------------------------------------------------------------- 1 | process CAT_CAT { 2 | tag "$meta.id" 3 | label 'process_low' 4 | 5 | conda "${moduleDir}/environment.yml" 6 | container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? 7 | 'https://depot.galaxyproject.org/singularity/pigz:2.3.4' : 8 | 'biocontainers/pigz:2.3.4' }" 9 | 10 | input: 11 | tuple val(meta), path(files_in) 12 | 13 | output: 14 | tuple val(meta), path("${prefix}"), emit: file_out 15 | path "versions.yml" , emit: versions 16 | 17 | when: 18 | task.ext.when == null || task.ext.when 19 | 20 | script: 21 | def args = task.ext.args ?: '' 22 | def args2 = task.ext.args2 ?: '' 23 | def file_list = files_in.collect { it.toString() } 24 | 25 | // | input | output | command1 | command2 | 26 | // |-----------|------------|----------|----------| 27 | // | gzipped | gzipped | cat | | 28 | // | ungzipped | ungzipped | cat | | 29 | // | gzipped | ungzipped | zcat | | 30 | // | ungzipped | gzipped | cat | pigz | 31 | 32 | // Use input file ending as default 33 | prefix = task.ext.prefix ?: "${meta.id}${file_list[0].substring(file_list[0].lastIndexOf('.'))}" 34 | out_zip = prefix.endsWith('.gz') 35 | in_zip = file_list[0].endsWith('.gz') 36 | command1 = (in_zip && !out_zip) ? 'zcat' : 'cat' 37 | command2 = (!in_zip && out_zip) ? "| pigz -c -p $task.cpus $args2" : '' 38 | """ 39 | $command1 \\ 40 | $args \\ 41 | ${file_list.join(' ')} \\ 42 | $command2 \\ 43 | > ${prefix} 44 | 45 | cat <<-END_VERSIONS > versions.yml 46 | "${task.process}": 47 | pigz: \$( pigz --version 2>&1 | sed 's/pigz //g' ) 48 | END_VERSIONS 49 | """ 50 | 51 | stub: 52 | def file_list = files_in.collect { it.toString() } 53 | prefix = task.ext.prefix ?: "${meta.id}${file_list[0].substring(file_list[0].lastIndexOf('.'))}" 54 | """ 55 | touch $prefix 56 | 57 | cat <<-END_VERSIONS > versions.yml 58 | "${task.process}": 59 | pigz: \$( pigz --version 2>&1 | sed 's/pigz //g' ) 60 | END_VERSIONS 61 | """ 62 | } 63 | -------------------------------------------------------------------------------- /modules/nf-core/cat/cat/meta.yml: -------------------------------------------------------------------------------- 1 | name: cat_cat 2 | description: A module for concatenation of gzipped or uncompressed files 3 | keywords: 4 | - concatenate 5 | - gzip 6 | - cat 7 | tools: 8 | - cat: 9 | description: Just concatenation 10 | documentation: https://man7.org/linux/man-pages/man1/cat.1.html 11 | licence: ["GPL-3.0-or-later"] 12 | input: 13 | - meta: 14 | type: map 15 | description: | 16 | Groovy Map containing sample information 17 | e.g. [ id:'test', single_end:false ] 18 | - files_in: 19 | type: file 20 | description: List of compressed / uncompressed files 21 | pattern: "*" 22 | output: 23 | - versions: 24 | type: file 25 | description: File containing software versions 26 | pattern: "versions.yml" 27 | - file_out: 28 | type: file 29 | description: Concatenated file. Will be gzipped if file_out ends with ".gz" 30 | pattern: "${file_out}" 31 | authors: 32 | - "@erikrikarddaniel" 33 | - "@FriederikeHanssen" 34 | maintainers: 35 | - "@erikrikarddaniel" 36 | - "@FriederikeHanssen" 37 | -------------------------------------------------------------------------------- /modules/nf-core/cat/cat/tests/nextflow_unzipped_zipped.config: -------------------------------------------------------------------------------- 1 | 2 | process { 3 | withName: CAT_CAT { 4 | ext.prefix = 'cat.txt.gz' 5 | } 6 | } 7 | -------------------------------------------------------------------------------- /modules/nf-core/cat/cat/tests/nextflow_zipped_unzipped.config: -------------------------------------------------------------------------------- 1 | 2 | process { 3 | 4 | withName: CAT_CAT { 5 | ext.prefix = 'cat.txt' 6 | } 7 | 8 | } 9 | -------------------------------------------------------------------------------- /modules/nf-core/cat/cat/tests/tags.yml: -------------------------------------------------------------------------------- 1 | cat/cat: 2 | - modules/nf-core/cat/cat/** 3 | -------------------------------------------------------------------------------- /modules/nf-core/cat/fastq/environment.yml: -------------------------------------------------------------------------------- 1 | name: cat_fastq 2 | channels: 3 | - conda-forge 4 | - bioconda 5 | - defaults 6 | dependencies: 7 | - conda-forge::sed=4.7 8 | -------------------------------------------------------------------------------- /modules/nf-core/cat/fastq/meta.yml: -------------------------------------------------------------------------------- 1 | name: cat_fastq 2 | description: Concatenates fastq files 3 | keywords: 4 | - cat 5 | - fastq 6 | - concatenate 7 | tools: 8 | - cat: 9 | description: | 10 | The cat utility reads files sequentially, writing them to the standard output. 11 | documentation: https://www.gnu.org/software/coreutils/manual/html_node/cat-invocation.html 12 | licence: ["GPL-3.0-or-later"] 13 | input: 14 | - meta: 15 | type: map 16 | description: | 17 | Groovy Map containing sample information 18 | e.g. [ id:'test', single_end:false ] 19 | - reads: 20 | type: file 21 | description: | 22 | List of input FastQ files to be concatenated. 23 | output: 24 | - meta: 25 | type: map 26 | description: | 27 | Groovy Map containing sample information 28 | e.g. [ id:'test', single_end:false ] 29 | - reads: 30 | type: file 31 | description: Merged fastq file 32 | pattern: "*.{merged.fastq.gz}" 33 | - versions: 34 | type: file 35 | description: File containing software versions 36 | pattern: "versions.yml" 37 | authors: 38 | - "@joseespinosa" 39 | - "@drpatelh" 40 | maintainers: 41 | - "@joseespinosa" 42 | - "@drpatelh" 43 | -------------------------------------------------------------------------------- /modules/nf-core/cat/fastq/tests/main.nf.test.snap: -------------------------------------------------------------------------------- 1 | { 2 | "test_cat_fastq_single_end": { 3 | "content": [ 4 | [ 5 | [ 6 | { 7 | "id": "test", 8 | "single_end": true 9 | }, 10 | "test.merged.fastq.gz:md5,f9cf5e375f7de81a406144a2c70cc64d" 11 | ] 12 | ] 13 | ], 14 | "timestamp": "2023-10-17T23:19:12.990284837" 15 | }, 16 | "test_cat_fastq_single_end_same_name": { 17 | "content": [ 18 | [ 19 | [ 20 | { 21 | "id": "test", 22 | "single_end": true 23 | }, 24 | "test.merged.fastq.gz:md5,63f817db7a29a03eb538104495556f66" 25 | ] 26 | ] 27 | ], 28 | "timestamp": "2023-10-17T23:19:31.554568147" 29 | }, 30 | "test_cat_fastq_single_end_single_file": { 31 | "content": [ 32 | [ 33 | [ 34 | { 35 | "id": "test", 36 | "single_end": true 37 | }, 38 | "test.merged.fastq.gz:md5,e325ef7deb4023447a1f074e285761af" 39 | ] 40 | ] 41 | ], 42 | "timestamp": "2023-10-17T23:19:49.629360033" 43 | }, 44 | "test_cat_fastq_paired_end_same_name": { 45 | "content": [ 46 | [ 47 | [ 48 | { 49 | "id": "test", 50 | "single_end": false 51 | }, 52 | [ 53 | "test_1.merged.fastq.gz:md5,63f817db7a29a03eb538104495556f66", 54 | "test_2.merged.fastq.gz:md5,fe9f266f43a6fc3dcab690a18419a56e" 55 | ] 56 | ] 57 | ] 58 | ], 59 | "timestamp": "2023-10-17T23:19:40.711617539" 60 | }, 61 | "test_cat_fastq_paired_end": { 62 | "content": [ 63 | [ 64 | [ 65 | { 66 | "id": "test", 67 | "single_end": false 68 | }, 69 | [ 70 | "test_1.merged.fastq.gz:md5,f9cf5e375f7de81a406144a2c70cc64d", 71 | "test_2.merged.fastq.gz:md5,77c8e966e130d8c6b6ec9be52fcb2bda" 72 | ] 73 | ] 74 | ] 75 | ], 76 | "timestamp": "2023-10-18T07:53:20.923560211" 77 | } 78 | } -------------------------------------------------------------------------------- /modules/nf-core/cat/fastq/tests/tags.yml: -------------------------------------------------------------------------------- 1 | cat/fastq: 2 | - modules/nf-core/cat/fastq/** 3 | -------------------------------------------------------------------------------- /modules/nf-core/fastp/environment.yml: -------------------------------------------------------------------------------- 1 | name: fastp 2 | channels: 3 | - conda-forge 4 | - bioconda 5 | - defaults 6 | dependencies: 7 | - bioconda::fastp=0.23.4 8 | -------------------------------------------------------------------------------- /modules/nf-core/fastp/meta.yml: -------------------------------------------------------------------------------- 1 | name: fastp 2 | description: Perform adapter/quality trimming on sequencing reads 3 | keywords: 4 | - trimming 5 | - quality control 6 | - fastq 7 | tools: 8 | - fastp: 9 | description: | 10 | A tool designed to provide fast all-in-one preprocessing for FastQ files. This tool is developed in C++ with multithreading supported to afford high performance. 11 | documentation: https://github.com/OpenGene/fastp 12 | doi: 10.1093/bioinformatics/bty560 13 | licence: ["MIT"] 14 | input: 15 | - meta: 16 | type: map 17 | description: | 18 | Groovy Map containing sample information. Use 'single_end: true' to specify single ended or interleaved FASTQs. Use 'single_end: false' for paired-end reads. 19 | e.g. [ id:'test', single_end:false ] 20 | - reads: 21 | type: file 22 | description: | 23 | List of input FastQ files of size 1 and 2 for single-end and paired-end data, 24 | respectively. If you wish to run interleaved paired-end data, supply as single-end data 25 | but with `--interleaved_in` in your `modules.conf`'s `ext.args` for the module. 26 | - adapter_fasta: 27 | type: file 28 | description: File in FASTA format containing possible adapters to remove. 29 | pattern: "*.{fasta,fna,fas,fa}" 30 | - save_trimmed_fail: 31 | type: boolean 32 | description: Specify true to save files that failed to pass trimming thresholds ending in `*.fail.fastq.gz` 33 | - save_merged: 34 | type: boolean 35 | description: Specify true to save all merged reads to the a file ending in `*.merged.fastq.gz` 36 | output: 37 | - meta: 38 | type: map 39 | description: | 40 | Groovy Map containing sample information 41 | e.g. [ id:'test', single_end:false ] 42 | - reads: 43 | type: file 44 | description: The trimmed/modified/unmerged fastq reads 45 | pattern: "*fastp.fastq.gz" 46 | - json: 47 | type: file 48 | description: Results in JSON format 49 | pattern: "*.json" 50 | - html: 51 | type: file 52 | description: Results in HTML format 53 | pattern: "*.html" 54 | - log: 55 | type: file 56 | description: fastq log file 57 | pattern: "*.log" 58 | - versions: 59 | type: file 60 | description: File containing software versions 61 | pattern: "versions.yml" 62 | - reads_fail: 63 | type: file 64 | description: Reads the failed the preprocessing 65 | pattern: "*fail.fastq.gz" 66 | - reads_merged: 67 | type: file 68 | description: Reads that were successfully merged 69 | pattern: "*.{merged.fastq.gz}" 70 | authors: 71 | - "@drpatelh" 72 | - "@kevinmenden" 73 | maintainers: 74 | - "@drpatelh" 75 | - "@kevinmenden" 76 | -------------------------------------------------------------------------------- /modules/nf-core/fastp/tests/main.nf.test.snap: -------------------------------------------------------------------------------- 1 | { 2 | "fastp test_fastp_interleaved_json": { 3 | "content": [ 4 | [ 5 | [ 6 | { 7 | "id": "test", 8 | "single_end": true 9 | }, 10 | "test.fastp.json:md5,168f516f7bd4b7b6c32da7cba87299a4" 11 | ] 12 | ] 13 | ], 14 | "timestamp": "2023-10-17T11:04:45.794175881" 15 | }, 16 | "test_fastp_single_end_json": { 17 | "content": [ 18 | [ 19 | [ 20 | { 21 | "id": "test", 22 | "single_end": true 23 | }, 24 | "test.fastp.json:md5,c852d7a6dba5819e4ac8d9673bedcacc" 25 | ] 26 | ] 27 | ], 28 | "timestamp": "2023-10-17T11:04:10.566343705" 29 | }, 30 | "versions": { 31 | "content": [ 32 | [ 33 | "versions.yml:md5,48ffc994212fb1fc9f83a74fa69c9f02" 34 | ] 35 | ], 36 | "timestamp": "2023-10-17T11:04:10.582076024" 37 | }, 38 | "test_fastp_single_end_trim_fail_json": { 39 | "content": [ 40 | [ 41 | [ 42 | { 43 | "id": "test", 44 | "single_end": true 45 | }, 46 | "test.fastp.json:md5,9a7ee180f000e8d00c7fb67f06293eb5" 47 | ] 48 | ] 49 | ], 50 | "timestamp": "2023-10-17T11:05:00.379878948" 51 | } 52 | } -------------------------------------------------------------------------------- /modules/nf-core/fastp/tests/nextflow.config: -------------------------------------------------------------------------------- 1 | process { 2 | 3 | withName: FASTP { 4 | ext.args = "--interleaved_in" 5 | } 6 | } 7 | -------------------------------------------------------------------------------- /modules/nf-core/fastp/tests/tags.yml: -------------------------------------------------------------------------------- 1 | fastp: 2 | - modules/nf-core/fastp/** 3 | -------------------------------------------------------------------------------- /modules/nf-core/fastqc/environment.yml: -------------------------------------------------------------------------------- 1 | name: fastqc 2 | channels: 3 | - conda-forge 4 | - bioconda 5 | - defaults 6 | dependencies: 7 | - bioconda::fastqc=0.12.1 8 | -------------------------------------------------------------------------------- /modules/nf-core/fastqc/main.nf: -------------------------------------------------------------------------------- 1 | process FASTQC { 2 | tag "$meta.id" 3 | label 'process_medium' 4 | 5 | conda "${moduleDir}/environment.yml" 6 | container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? 7 | 'https://depot.galaxyproject.org/singularity/fastqc:0.12.1--hdfd78af_0' : 8 | 'biocontainers/fastqc:0.12.1--hdfd78af_0' }" 9 | 10 | input: 11 | tuple val(meta), path(reads) 12 | 13 | output: 14 | tuple val(meta), path("*.html"), emit: html 15 | tuple val(meta), path("*.zip") , emit: zip 16 | path "versions.yml" , emit: versions 17 | 18 | when: 19 | task.ext.when == null || task.ext.when 20 | 21 | script: 22 | def args = task.ext.args ?: '' 23 | def prefix = task.ext.prefix ?: "${meta.id}" 24 | // Make list of old name and new name pairs to use for renaming in the bash while loop 25 | def old_new_pairs = reads instanceof Path || reads.size() == 1 ? [[ reads, "${prefix}.${reads.extension}" ]] : reads.withIndex().collect { entry, index -> [ entry, "${prefix}_${index + 1}.${entry.extension}" ] } 26 | def rename_to = old_new_pairs*.join(' ').join(' ') 27 | def renamed_files = old_new_pairs.collect{ old_name, new_name -> new_name }.join(' ') 28 | """ 29 | printf "%s %s\\n" $rename_to | while read old_name new_name; do 30 | [ -f "\${new_name}" ] || ln -s \$old_name \$new_name 31 | done 32 | 33 | fastqc \\ 34 | $args \\ 35 | --threads $task.cpus \\ 36 | $renamed_files 37 | 38 | cat <<-END_VERSIONS > versions.yml 39 | "${task.process}": 40 | fastqc: \$( fastqc --version | sed '/FastQC v/!d; s/.*v//' ) 41 | END_VERSIONS 42 | """ 43 | 44 | stub: 45 | def prefix = task.ext.prefix ?: "${meta.id}" 46 | """ 47 | touch ${prefix}.html 48 | touch ${prefix}.zip 49 | 50 | cat <<-END_VERSIONS > versions.yml 51 | "${task.process}": 52 | fastqc: \$( fastqc --version | sed '/FastQC v/!d; s/.*v//' ) 53 | END_VERSIONS 54 | """ 55 | } 56 | -------------------------------------------------------------------------------- /modules/nf-core/fastqc/meta.yml: -------------------------------------------------------------------------------- 1 | name: fastqc 2 | description: Run FastQC on sequenced reads 3 | keywords: 4 | - quality control 5 | - qc 6 | - adapters 7 | - fastq 8 | tools: 9 | - fastqc: 10 | description: | 11 | FastQC gives general quality metrics about your reads. 12 | It provides information about the quality score distribution 13 | across your reads, the per base sequence content (%A/C/G/T). 14 | You get information about adapter contamination and other 15 | overrepresented sequences. 16 | homepage: https://www.bioinformatics.babraham.ac.uk/projects/fastqc/ 17 | documentation: https://www.bioinformatics.babraham.ac.uk/projects/fastqc/Help/ 18 | licence: ["GPL-2.0-only"] 19 | input: 20 | - meta: 21 | type: map 22 | description: | 23 | Groovy Map containing sample information 24 | e.g. [ id:'test', single_end:false ] 25 | - reads: 26 | type: file 27 | description: | 28 | List of input FastQ files of size 1 and 2 for single-end and paired-end data, 29 | respectively. 30 | output: 31 | - meta: 32 | type: map 33 | description: | 34 | Groovy Map containing sample information 35 | e.g. [ id:'test', single_end:false ] 36 | - html: 37 | type: file 38 | description: FastQC report 39 | pattern: "*_{fastqc.html}" 40 | - zip: 41 | type: file 42 | description: FastQC report archive 43 | pattern: "*_{fastqc.zip}" 44 | - versions: 45 | type: file 46 | description: File containing software versions 47 | pattern: "versions.yml" 48 | authors: 49 | - "@drpatelh" 50 | - "@grst" 51 | - "@ewels" 52 | - "@FelixKrueger" 53 | maintainers: 54 | - "@drpatelh" 55 | - "@grst" 56 | - "@ewels" 57 | - "@FelixKrueger" 58 | -------------------------------------------------------------------------------- /modules/nf-core/fastqc/tests/main.nf.test.snap: -------------------------------------------------------------------------------- 1 | { 2 | "fastqc_versions_interleaved": { 3 | "content": [ 4 | [ 5 | "versions.yml:md5,e1cc25ca8af856014824abd842e93978" 6 | ] 7 | ], 8 | "meta": { 9 | "nf-test": "0.8.4", 10 | "nextflow": "23.10.1" 11 | }, 12 | "timestamp": "2024-01-31T17:40:07.293713" 13 | }, 14 | "fastqc_stub": { 15 | "content": [ 16 | [ 17 | "test.html", 18 | "test.zip", 19 | "versions.yml:md5,e1cc25ca8af856014824abd842e93978" 20 | ] 21 | ], 22 | "meta": { 23 | "nf-test": "0.8.4", 24 | "nextflow": "23.10.1" 25 | }, 26 | "timestamp": "2024-01-31T17:31:01.425198" 27 | }, 28 | "fastqc_versions_multiple": { 29 | "content": [ 30 | [ 31 | "versions.yml:md5,e1cc25ca8af856014824abd842e93978" 32 | ] 33 | ], 34 | "meta": { 35 | "nf-test": "0.8.4", 36 | "nextflow": "23.10.1" 37 | }, 38 | "timestamp": "2024-01-31T17:40:55.797907" 39 | }, 40 | "fastqc_versions_bam": { 41 | "content": [ 42 | [ 43 | "versions.yml:md5,e1cc25ca8af856014824abd842e93978" 44 | ] 45 | ], 46 | "meta": { 47 | "nf-test": "0.8.4", 48 | "nextflow": "23.10.1" 49 | }, 50 | "timestamp": "2024-01-31T17:40:26.795862" 51 | }, 52 | "fastqc_versions_single": { 53 | "content": [ 54 | [ 55 | "versions.yml:md5,e1cc25ca8af856014824abd842e93978" 56 | ] 57 | ], 58 | "meta": { 59 | "nf-test": "0.8.4", 60 | "nextflow": "23.10.1" 61 | }, 62 | "timestamp": "2024-01-31T17:39:27.043675" 63 | }, 64 | "fastqc_versions_paired": { 65 | "content": [ 66 | [ 67 | "versions.yml:md5,e1cc25ca8af856014824abd842e93978" 68 | ] 69 | ], 70 | "meta": { 71 | "nf-test": "0.8.4", 72 | "nextflow": "23.10.1" 73 | }, 74 | "timestamp": "2024-01-31T17:39:47.584191" 75 | }, 76 | "fastqc_versions_custom_prefix": { 77 | "content": [ 78 | [ 79 | "versions.yml:md5,e1cc25ca8af856014824abd842e93978" 80 | ] 81 | ], 82 | "meta": { 83 | "nf-test": "0.8.4", 84 | "nextflow": "23.10.1" 85 | }, 86 | "timestamp": "2024-01-31T17:41:14.576531" 87 | } 88 | } -------------------------------------------------------------------------------- /modules/nf-core/fastqc/tests/tags.yml: -------------------------------------------------------------------------------- 1 | fastqc: 2 | - modules/nf-core/fastqc/** 3 | -------------------------------------------------------------------------------- /modules/nf-core/gatk4/bedtointervallist/environment.yml: -------------------------------------------------------------------------------- 1 | name: gatk4_bedtointervallist 2 | channels: 3 | - conda-forge 4 | - bioconda 5 | - defaults 6 | dependencies: 7 | - bioconda::gatk4=4.4.0.0 8 | -------------------------------------------------------------------------------- /modules/nf-core/gatk4/bedtointervallist/main.nf: -------------------------------------------------------------------------------- 1 | process GATK4_BEDTOINTERVALLIST { 2 | tag "$meta.id" 3 | label 'process_medium' 4 | 5 | conda "${moduleDir}/environment.yml" 6 | container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? 7 | 'https://depot.galaxyproject.org/singularity/gatk4:4.4.0.0--py36hdfd78af_0': 8 | 'biocontainers/gatk4:4.4.0.0--py36hdfd78af_0' }" 9 | 10 | input: 11 | tuple val(meta), path(bed) 12 | tuple val(meta2), path(dict) 13 | 14 | output: 15 | tuple val(meta), path('*.interval_list'), emit: interval_list 16 | path "versions.yml" , emit: versions 17 | 18 | when: 19 | task.ext.when == null || task.ext.when 20 | 21 | script: 22 | def args = task.ext.args ?: '' 23 | def prefix = task.ext.prefix ?: "${meta.id}" 24 | 25 | def avail_mem = 3072 26 | if (!task.memory) { 27 | log.info '[GATK BedToIntervalList] Available memory not known - defaulting to 3GB. Specify process memory requirements to change this.' 28 | } else { 29 | avail_mem = (task.memory.mega*0.8).intValue() 30 | } 31 | """ 32 | gatk --java-options "-Xmx${avail_mem}M -XX:-UsePerfData" \\ 33 | BedToIntervalList \\ 34 | --INPUT $bed \\ 35 | --OUTPUT ${prefix}.interval_list \\ 36 | --SEQUENCE_DICTIONARY $dict \\ 37 | --TMP_DIR . \\ 38 | $args 39 | 40 | cat <<-END_VERSIONS > versions.yml 41 | "${task.process}": 42 | gatk4: \$(echo \$(gatk --version 2>&1) | sed 's/^.*(GATK) v//; s/ .*\$//') 43 | END_VERSIONS 44 | """ 45 | 46 | stub: 47 | def prefix = task.ext.prefix ?: "${meta.id}" 48 | """ 49 | touch ${prefix}.interval_list 50 | 51 | cat <<-END_VERSIONS > versions.yml 52 | "${task.process}": 53 | gatk4: \$(echo \$(gatk --version 2>&1) | sed 's/^.*(GATK) v//; s/ .*\$//') 54 | END_VERSIONS 55 | """ 56 | } 57 | -------------------------------------------------------------------------------- /modules/nf-core/gatk4/bedtointervallist/meta.yml: -------------------------------------------------------------------------------- 1 | name: gatk4_bedtointervallist 2 | description: Creates an interval list from a bed file and a reference dict 3 | keywords: 4 | - bed 5 | - bedtointervallist 6 | - gatk4 7 | - interval list 8 | tools: 9 | - gatk4: 10 | description: | 11 | Developed in the Data Sciences Platform at the Broad Institute, the toolkit offers a wide variety of tools 12 | with a primary focus on variant discovery and genotyping. Its powerful processing engine 13 | and high-performance computing features make it capable of taking on projects of any size. 14 | homepage: https://gatk.broadinstitute.org/hc/en-us 15 | documentation: https://gatk.broadinstitute.org/hc/en-us/categories/360002369672s 16 | doi: 10.1158/1538-7445.AM2017-3590 17 | licence: ["Apache-2.0"] 18 | input: 19 | - meta: 20 | type: map 21 | description: | 22 | Groovy Map containing sample information 23 | e.g. [ id:'test'] 24 | - bed: 25 | type: file 26 | description: Input bed file 27 | pattern: "*.bed" 28 | - meta2: 29 | type: map 30 | description: | 31 | Groovy Map containing reference information 32 | e.g. [ id:'genome' ] 33 | - dict: 34 | type: file 35 | description: Sequence dictionary 36 | pattern: "*.dict" 37 | output: 38 | - interval_list: 39 | type: file 40 | description: gatk interval list file 41 | pattern: "*.interval_list" 42 | - versions: 43 | type: file 44 | description: File containing software versions 45 | pattern: "versions.yml" 46 | authors: 47 | - "@kevinmenden" 48 | - "@ramprasadn" 49 | maintainers: 50 | - "@kevinmenden" 51 | - "@ramprasadn" 52 | -------------------------------------------------------------------------------- /modules/nf-core/gatk4/createsequencedictionary/environment.yml: -------------------------------------------------------------------------------- 1 | name: gatk4_createsequencedictionary 2 | channels: 3 | - conda-forge 4 | - bioconda 5 | - defaults 6 | dependencies: 7 | - bioconda::gatk4=4.4.0.0 8 | -------------------------------------------------------------------------------- /modules/nf-core/gatk4/createsequencedictionary/main.nf: -------------------------------------------------------------------------------- 1 | process GATK4_CREATESEQUENCEDICTIONARY { 2 | tag "$fasta" 3 | label 'process_medium' 4 | 5 | conda "${moduleDir}/environment.yml" 6 | container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? 7 | 'https://depot.galaxyproject.org/singularity/gatk4:4.4.0.0--py36hdfd78af_0': 8 | 'biocontainers/gatk4:4.4.0.0--py36hdfd78af_0' }" 9 | 10 | input: 11 | tuple val(meta), path(fasta) 12 | 13 | output: 14 | tuple val(meta), path('*.dict') , emit: dict 15 | path "versions.yml" , emit: versions 16 | 17 | when: 18 | task.ext.when == null || task.ext.when 19 | 20 | script: 21 | def args = task.ext.args ?: '' 22 | 23 | def avail_mem = 6144 24 | if (!task.memory) { 25 | log.info '[GATK CreateSequenceDictionary] Available memory not known - defaulting to 6GB. Specify process memory requirements to change this.' 26 | } else { 27 | avail_mem = (task.memory.mega*0.8).intValue() 28 | } 29 | """ 30 | gatk --java-options "-Xmx${avail_mem}M -XX:-UsePerfData" \\ 31 | CreateSequenceDictionary \\ 32 | --REFERENCE $fasta \\ 33 | --URI $fasta \\ 34 | --TMP_DIR . \\ 35 | $args 36 | 37 | cat <<-END_VERSIONS > versions.yml 38 | "${task.process}": 39 | gatk4: \$(echo \$(gatk --version 2>&1) | sed 's/^.*(GATK) v//; s/ .*\$//') 40 | END_VERSIONS 41 | """ 42 | 43 | stub: 44 | """ 45 | touch ${fasta.baseName}.dict 46 | 47 | cat <<-END_VERSIONS > versions.yml 48 | "${task.process}": 49 | gatk4: \$(echo \$(gatk --version 2>&1) | sed 's/^.*(GATK) v//; s/ .*\$//') 50 | END_VERSIONS 51 | """ 52 | } 53 | -------------------------------------------------------------------------------- /modules/nf-core/gatk4/createsequencedictionary/meta.yml: -------------------------------------------------------------------------------- 1 | name: gatk4_createsequencedictionary 2 | description: Creates a sequence dictionary for a reference sequence 3 | keywords: 4 | - createsequencedictionary 5 | - dictionary 6 | - fasta 7 | - gatk4 8 | tools: 9 | - gatk: 10 | description: | 11 | Developed in the Data Sciences Platform at the Broad Institute, the toolkit offers a wide variety of tools 12 | with a primary focus on variant discovery and genotyping. Its powerful processing engine 13 | and high-performance computing features make it capable of taking on projects of any size. 14 | homepage: https://gatk.broadinstitute.org/hc/en-us 15 | documentation: https://gatk.broadinstitute.org/hc/en-us/categories/360002369672s 16 | doi: 10.1158/1538-7445.AM2017-3590 17 | licence: ["Apache-2.0"] 18 | input: 19 | - meta: 20 | type: map 21 | description: | 22 | Groovy Map containing reference information 23 | e.g. [ id:'genome' ] 24 | - fasta: 25 | type: file 26 | description: Input fasta file 27 | pattern: "*.{fasta,fa}" 28 | output: 29 | - dict: 30 | type: file 31 | description: gatk dictionary file 32 | pattern: "*.{dict}" 33 | - versions: 34 | type: file 35 | description: File containing software versions 36 | pattern: "versions.yml" 37 | authors: 38 | - "@maxulysse" 39 | - "@ramprasadn" 40 | maintainers: 41 | - "@maxulysse" 42 | - "@ramprasadn" 43 | -------------------------------------------------------------------------------- /modules/nf-core/gatk4/markduplicates/environment.yml: -------------------------------------------------------------------------------- 1 | name: gatk4_markduplicates 2 | channels: 3 | - conda-forge 4 | - bioconda 5 | - defaults 6 | dependencies: 7 | - bioconda::gatk4=4.4.0.0 8 | - bioconda::samtools=1.17 9 | -------------------------------------------------------------------------------- /modules/nf-core/gatk4/markduplicates/meta.yml: -------------------------------------------------------------------------------- 1 | name: gatk4_markduplicates 2 | description: This tool locates and tags duplicate reads in a BAM or SAM file, where duplicate reads are defined as originating from a single fragment of DNA. 3 | keywords: 4 | - bam 5 | - gatk4 6 | - markduplicates 7 | - sort 8 | tools: 9 | - gatk4: 10 | description: Developed in the Data Sciences Platform at the Broad Institute, the toolkit offers a wide variety of tools with a primary focus on variant discovery and genotyping. Its powerful processing engine and high-performance computing features make it capable of taking on projects of any size. 11 | homepage: https://gatk.broadinstitute.org/hc/en-us 12 | documentation: https://gatk.broadinstitute.org/hc/en-us/articles/360037052812-MarkDuplicates-Picard- 13 | tool_dev_url: https://github.com/broadinstitute/gatk 14 | doi: 10.1158/1538-7445.AM2017-3590 15 | licence: ["MIT"] 16 | input: 17 | - meta: 18 | type: map 19 | description: | 20 | Groovy Map containing sample information 21 | e.g. [ id:'test', single_end:false ] 22 | - bam: 23 | type: file 24 | description: Sorted BAM file 25 | pattern: "*.{bam}" 26 | - fasta: 27 | type: file 28 | description: Fasta file 29 | pattern: "*.{fasta}" 30 | - fasta_fai: 31 | type: file 32 | description: Fasta index file 33 | pattern: "*.{fai}" 34 | output: 35 | - meta: 36 | type: map 37 | description: | 38 | Groovy Map containing sample information 39 | e.g. [ id:'test', single_end:false ] 40 | - versions: 41 | type: file 42 | description: File containing software versions 43 | pattern: "versions.yml" 44 | - bam: 45 | type: file 46 | description: Marked duplicates BAM file 47 | pattern: "*.{bam}" 48 | - cram: 49 | type: file 50 | description: Marked duplicates CRAM file 51 | pattern: "*.{cram}" 52 | - bai: 53 | type: file 54 | description: BAM index file 55 | pattern: "*.{bam.bai}" 56 | - crai: 57 | type: file 58 | description: CRAM index file 59 | pattern: "*.{cram.crai}" 60 | - metrics: 61 | type: file 62 | description: Duplicate metrics file generated by GATK 63 | pattern: "*.{metrics.txt}" 64 | authors: 65 | - "@ajodeh-juma" 66 | - "@FriederikeHanssen" 67 | - "@maxulysse" 68 | maintainers: 69 | - "@ajodeh-juma" 70 | - "@FriederikeHanssen" 71 | - "@maxulysse" 72 | -------------------------------------------------------------------------------- /modules/nf-core/multiqc/environment.yml: -------------------------------------------------------------------------------- 1 | name: multiqc 2 | channels: 3 | - conda-forge 4 | - bioconda 5 | - defaults 6 | dependencies: 7 | - bioconda::multiqc=1.21 8 | -------------------------------------------------------------------------------- /modules/nf-core/multiqc/main.nf: -------------------------------------------------------------------------------- 1 | process MULTIQC { 2 | label 'process_single' 3 | 4 | conda "${moduleDir}/environment.yml" 5 | container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? 6 | 'https://depot.galaxyproject.org/singularity/multiqc:1.21--pyhdfd78af_0' : 7 | 'biocontainers/multiqc:1.21--pyhdfd78af_0' }" 8 | 9 | input: 10 | path multiqc_files, stageAs: "?/*" 11 | path(multiqc_config) 12 | path(extra_multiqc_config) 13 | path(multiqc_logo) 14 | 15 | output: 16 | path "*multiqc_report.html", emit: report 17 | path "*_data" , emit: data 18 | path "*_plots" , optional:true, emit: plots 19 | path "versions.yml" , emit: versions 20 | 21 | when: 22 | task.ext.when == null || task.ext.when 23 | 24 | script: 25 | def args = task.ext.args ?: '' 26 | def config = multiqc_config ? "--config $multiqc_config" : '' 27 | def extra_config = extra_multiqc_config ? "--config $extra_multiqc_config" : '' 28 | def logo = multiqc_logo ? /--cl-config 'custom_logo: "${multiqc_logo}"'/ : '' 29 | """ 30 | multiqc \\ 31 | --force \\ 32 | $args \\ 33 | $config \\ 34 | $extra_config \\ 35 | $logo \\ 36 | . 37 | 38 | cat <<-END_VERSIONS > versions.yml 39 | "${task.process}": 40 | multiqc: \$( multiqc --version | sed -e "s/multiqc, version //g" ) 41 | END_VERSIONS 42 | """ 43 | 44 | stub: 45 | """ 46 | mkdir multiqc_data 47 | touch multiqc_plots 48 | touch multiqc_report.html 49 | 50 | cat <<-END_VERSIONS > versions.yml 51 | "${task.process}": 52 | multiqc: \$( multiqc --version | sed -e "s/multiqc, version //g" ) 53 | END_VERSIONS 54 | """ 55 | } 56 | -------------------------------------------------------------------------------- /modules/nf-core/multiqc/meta.yml: -------------------------------------------------------------------------------- 1 | name: multiqc 2 | description: Aggregate results from bioinformatics analyses across many samples into a single report 3 | keywords: 4 | - QC 5 | - bioinformatics tools 6 | - Beautiful stand-alone HTML report 7 | tools: 8 | - multiqc: 9 | description: | 10 | MultiQC searches a given directory for analysis logs and compiles a HTML report. 11 | It's a general use tool, perfect for summarising the output from numerous bioinformatics tools. 12 | homepage: https://multiqc.info/ 13 | documentation: https://multiqc.info/docs/ 14 | licence: ["GPL-3.0-or-later"] 15 | input: 16 | - multiqc_files: 17 | type: file 18 | description: | 19 | List of reports / files recognised by MultiQC, for example the html and zip output of FastQC 20 | - multiqc_config: 21 | type: file 22 | description: Optional config yml for MultiQC 23 | pattern: "*.{yml,yaml}" 24 | - extra_multiqc_config: 25 | type: file 26 | description: Second optional config yml for MultiQC. Will override common sections in multiqc_config. 27 | pattern: "*.{yml,yaml}" 28 | - multiqc_logo: 29 | type: file 30 | description: Optional logo file for MultiQC 31 | pattern: "*.{png}" 32 | output: 33 | - report: 34 | type: file 35 | description: MultiQC report file 36 | pattern: "multiqc_report.html" 37 | - data: 38 | type: directory 39 | description: MultiQC data dir 40 | pattern: "multiqc_data" 41 | - plots: 42 | type: file 43 | description: Plots created by MultiQC 44 | pattern: "*_data" 45 | - versions: 46 | type: file 47 | description: File containing software versions 48 | pattern: "versions.yml" 49 | authors: 50 | - "@abhi18av" 51 | - "@bunop" 52 | - "@drpatelh" 53 | - "@jfy133" 54 | maintainers: 55 | - "@abhi18av" 56 | - "@bunop" 57 | - "@drpatelh" 58 | - "@jfy133" 59 | -------------------------------------------------------------------------------- /modules/nf-core/multiqc/tests/main.nf.test.snap: -------------------------------------------------------------------------------- 1 | { 2 | "multiqc_versions_single": { 3 | "content": [ 4 | [ 5 | "versions.yml:md5,21f35ee29416b9b3073c28733efe4b7d" 6 | ] 7 | ], 8 | "meta": { 9 | "nf-test": "0.8.4", 10 | "nextflow": "23.10.1" 11 | }, 12 | "timestamp": "2024-02-29T08:48:55.657331" 13 | }, 14 | "multiqc_stub": { 15 | "content": [ 16 | [ 17 | "multiqc_report.html", 18 | "multiqc_data", 19 | "multiqc_plots", 20 | "versions.yml:md5,21f35ee29416b9b3073c28733efe4b7d" 21 | ] 22 | ], 23 | "meta": { 24 | "nf-test": "0.8.4", 25 | "nextflow": "23.10.1" 26 | }, 27 | "timestamp": "2024-02-29T08:49:49.071937" 28 | }, 29 | "multiqc_versions_config": { 30 | "content": [ 31 | [ 32 | "versions.yml:md5,21f35ee29416b9b3073c28733efe4b7d" 33 | ] 34 | ], 35 | "meta": { 36 | "nf-test": "0.8.4", 37 | "nextflow": "23.10.1" 38 | }, 39 | "timestamp": "2024-02-29T08:49:25.457567" 40 | } 41 | } -------------------------------------------------------------------------------- /modules/nf-core/multiqc/tests/tags.yml: -------------------------------------------------------------------------------- 1 | multiqc: 2 | - modules/nf-core/multiqc/** 3 | -------------------------------------------------------------------------------- /modules/nf-core/picard/collectinsertsizemetrics/environment.yml: -------------------------------------------------------------------------------- 1 | name: picard_collectinsertsizemetrics 2 | channels: 3 | - conda-forge 4 | - bioconda 5 | - defaults 6 | dependencies: 7 | - bioconda::picard=3.1.0 8 | -------------------------------------------------------------------------------- /modules/nf-core/picard/collectinsertsizemetrics/main.nf: -------------------------------------------------------------------------------- 1 | process PICARD_COLLECTINSERTSIZEMETRICS { 2 | tag "$meta.id" 3 | label 'process_single' 4 | 5 | conda "${moduleDir}/environment.yml" 6 | container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? 7 | 'https://depot.galaxyproject.org/singularity/picard:3.1.0--hdfd78af_0' : 8 | 'biocontainers/picard:3.1.0--hdfd78af_0' }" 9 | 10 | input: 11 | tuple val(meta), path(bam) 12 | 13 | output: 14 | tuple val(meta), path("*.txt"), emit: metrics 15 | tuple val(meta), path("*.pdf"), emit: histogram 16 | path "versions.yml" , emit: versions 17 | 18 | when: 19 | task.ext.when == null || task.ext.when 20 | 21 | script: 22 | def args = task.ext.args ?: '' 23 | def prefix = task.ext.prefix ?: "${meta.id}" 24 | 25 | def avail_mem = 3072 26 | if (!task.memory) { 27 | log.info '[Picard CollectInsertSizeMetrics] Available memory not known - defaulting to 3GB. Specify process memory requirements to change this.' 28 | } else { 29 | avail_mem = (task.memory.mega*0.8).intValue() 30 | } 31 | """ 32 | picard \\ 33 | -Xmx${avail_mem}M \\ 34 | CollectInsertSizeMetrics \\ 35 | $args \\ 36 | --INPUT $bam \\ 37 | --OUTPUT ${prefix}.txt \\ 38 | --Histogram_FILE ${prefix}.pdf \\ 39 | $args 40 | 41 | cat <<-END_VERSIONS > versions.yml 42 | "${task.process}": 43 | picard: \$(picard CollectInsertSizeMetrics --version 2>&1 | grep -o 'Version:.*' | cut -f2- -d:) 44 | END_VERSIONS 45 | """ 46 | 47 | 48 | stub: 49 | def prefix = task.ext.prefix ?: "${meta.id}" 50 | """ 51 | touch ${prefix}.pdf 52 | touch ${prefix}.txt 53 | cat <<-END_VERSIONS > versions.yml 54 | "${task.process}": 55 | picard: \$(picard CollectInsertSizeMetrics --version 2>&1 | grep -o 'Version:.*' | cut -f2- -d:) 56 | END_VERSIONS 57 | """ 58 | 59 | 60 | 61 | } 62 | -------------------------------------------------------------------------------- /modules/nf-core/picard/collectinsertsizemetrics/meta.yml: -------------------------------------------------------------------------------- 1 | name: "picard_collectinsertsizemetrics" 2 | description: Collect metrics about the insert size distribution of a paired-end library. 3 | keywords: 4 | - metrics 5 | - alignment 6 | - insert 7 | - statistics 8 | - bam 9 | tools: 10 | - "picard": 11 | description: "Java tools for working with NGS data in the BAM format" 12 | homepage: "https://broadinstitute.github.io/picard/" 13 | documentation: "https://broadinstitute.github.io/picard/" 14 | tool_dev_url: "https://github.com/broadinstitute/picard" 15 | licence: "['MIT']" 16 | input: 17 | - meta: 18 | type: map 19 | description: | 20 | Groovy Map containing sample information 21 | e.g. [ id:'test', single_end:false ] 22 | - bam: 23 | type: file 24 | description: BAM/CRAM/SAM file 25 | pattern: "*.{bam,cram,sam}" 26 | output: 27 | - meta: 28 | type: map 29 | description: | 30 | Groovy Map containing sample information 31 | e.g. [ id:'test', single_end:false ] 32 | - versions: 33 | type: file 34 | description: File containing software versions 35 | pattern: "versions.yml" 36 | - pdf: 37 | type: file 38 | description: Histogram plots of the insert size metrics computed by Picard 39 | pattern: "*.pdf" 40 | - metrics: 41 | type: file 42 | description: Values used by Picard to generate the insert size histograms 43 | pattern: "*.txt" 44 | authors: 45 | - "@FerriolCalvet" 46 | maintainers: 47 | - "@FerriolCalvet" 48 | -------------------------------------------------------------------------------- /modules/nf-core/picard/collectwgsmetrics/environment.yml: -------------------------------------------------------------------------------- 1 | name: picard_collectwgsmetrics 2 | channels: 3 | - conda-forge 4 | - bioconda 5 | - defaults 6 | dependencies: 7 | - bioconda::picard=3.1.0 8 | - r::r-base 9 | -------------------------------------------------------------------------------- /modules/nf-core/picard/collectwgsmetrics/main.nf: -------------------------------------------------------------------------------- 1 | process PICARD_COLLECTWGSMETRICS { 2 | tag "$meta.id" 3 | label 'process_single' 4 | 5 | conda "${moduleDir}/environment.yml" 6 | container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? 7 | 'https://depot.galaxyproject.org/singularity/picard:3.1.0--hdfd78af_0' : 8 | 'biocontainers/picard:3.1.0--hdfd78af_0' }" 9 | 10 | input: 11 | tuple val(meta), path(bam), path(bai) 12 | tuple val(meta2), path(fasta) 13 | tuple val(meta3), path(fai) 14 | path intervallist 15 | 16 | output: 17 | tuple val(meta), path("*_metrics"), emit: metrics 18 | path "versions.yml" , emit: versions 19 | 20 | when: 21 | task.ext.when == null || task.ext.when 22 | 23 | script: 24 | def args = task.ext.args ?: '' 25 | def prefix = task.ext.prefix ?: "${meta.id}" 26 | def avail_mem = 3072 27 | def interval = intervallist ? "--INTERVALS ${intervallist}" : '' 28 | if (!task.memory) { 29 | log.info '[Picard CollectWgsMetrics] Available memory not known - defaulting to 3GB. Specify process memory requirements to change this.' 30 | } else { 31 | avail_mem = (task.memory.mega*0.8).intValue() 32 | } 33 | """ 34 | picard \\ 35 | -Xmx${avail_mem}M \\ 36 | CollectWgsMetrics \\ 37 | $args \\ 38 | --INPUT $bam \\ 39 | --OUTPUT ${prefix}.CollectWgsMetrics.coverage_metrics \\ 40 | --REFERENCE_SEQUENCE ${fasta} \\ 41 | $interval 42 | 43 | 44 | cat <<-END_VERSIONS > versions.yml 45 | "${task.process}": 46 | picard: \$(picard CollectWgsMetrics --version 2>&1 | grep -o 'Version.*' | cut -f2- -d:) 47 | END_VERSIONS 48 | """ 49 | 50 | stub: 51 | def prefix = task.ext.prefix ?: "${meta.id}" 52 | """ 53 | touch ${prefix}.CollectWgsMetrics.coverage_metrics 54 | 55 | cat <<-END_VERSIONS > versions.yml 56 | "${task.process}": 57 | picard: \$(picard CollectWgsMetrics --version 2>&1 | grep -o 'Version.*' | cut -f2- -d:) 58 | END_VERSIONS 59 | """ 60 | } 61 | -------------------------------------------------------------------------------- /modules/nf-core/picard/collectwgsmetrics/meta.yml: -------------------------------------------------------------------------------- 1 | name: picard_collectwgsmetrics 2 | description: Collect metrics about coverage and performance of whole genome sequencing (WGS) experiments. 3 | keywords: 4 | - alignment 5 | - metrics 6 | - statistics 7 | - quality 8 | - bam 9 | tools: 10 | - picard: 11 | description: | 12 | A set of command line tools (in Java) for manipulating high-throughput sequencing (HTS) 13 | data and formats such as SAM/BAM/CRAM and VCF. 14 | homepage: https://broadinstitute.github.io/picard/ 15 | documentation: https://broadinstitute.github.io/picard/ 16 | licence: ["MIT"] 17 | input: 18 | - meta: 19 | type: map 20 | description: | 21 | Groovy Map containing sample information 22 | e.g. [ id:'test', single_end:false ] 23 | - bam: 24 | type: file 25 | description: Aligned reads file 26 | pattern: "*.{bam, cram}" 27 | - bai: 28 | type: file 29 | description: (Optional) Aligned reads file index 30 | pattern: "*.{bai,crai}" 31 | - meta2: 32 | type: map 33 | description: | 34 | Groovy Map containing reference information 35 | e.g. [ id:'genome' ] 36 | - fasta: 37 | type: file 38 | description: Genome fasta file 39 | pattern: "*.{fa,fasta,fna}" 40 | - meta3: 41 | type: map 42 | description: | 43 | Groovy Map containing reference information 44 | e.g. [ id:'genome' ] 45 | - fai: 46 | type: file 47 | description: Genome fasta file index 48 | pattern: "*.{fai}" 49 | - intervallist: 50 | type: file 51 | description: Picard Interval List. Defines which contigs to include. Can be generated from a BED file with GATK BedToIntervalList. 52 | output: 53 | - meta: 54 | type: map 55 | description: | 56 | Groovy Map containing sample information 57 | e.g. [ id:'test', single_end:false ] 58 | - metrics: 59 | type: file 60 | description: Alignment metrics files generated by picard 61 | pattern: "*_{metrics}" 62 | - versions: 63 | type: file 64 | description: File containing software versions 65 | pattern: "versions.yml" 66 | authors: 67 | - "@drpatelh" 68 | - "@flowuenne" 69 | - "@lassefolkersen" 70 | - "@ramprasadn" 71 | maintainers: 72 | - "@drpatelh" 73 | - "@flowuenne" 74 | - "@lassefolkersen" 75 | - "@ramprasadn" 76 | -------------------------------------------------------------------------------- /modules/nf-core/samtools/faidx/environment.yml: -------------------------------------------------------------------------------- 1 | name: samtools_faidx 2 | channels: 3 | - conda-forge 4 | - bioconda 5 | - defaults 6 | dependencies: 7 | - bioconda::samtools=1.17 8 | -------------------------------------------------------------------------------- /modules/nf-core/samtools/faidx/main.nf: -------------------------------------------------------------------------------- 1 | process SAMTOOLS_FAIDX { 2 | tag "$fasta" 3 | label 'process_single' 4 | 5 | conda "${moduleDir}/environment.yml" 6 | container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? 7 | 'https://depot.galaxyproject.org/singularity/samtools:1.17--h00cdaf9_0' : 8 | 'biocontainers/samtools:1.17--h00cdaf9_0' }" 9 | 10 | input: 11 | tuple val(meta), path(fasta) 12 | tuple val(meta2), path(fai) 13 | 14 | output: 15 | tuple val(meta), path ("*.{fa,fasta}") , emit: fa , optional: true 16 | tuple val(meta), path ("*.fai") , emit: fai, optional: true 17 | tuple val(meta), path ("*.gzi") , emit: gzi, optional: true 18 | path "versions.yml" , emit: versions 19 | 20 | when: 21 | task.ext.when == null || task.ext.when 22 | 23 | script: 24 | def args = task.ext.args ?: '' 25 | """ 26 | samtools \\ 27 | faidx \\ 28 | $fasta \\ 29 | $args 30 | 31 | cat <<-END_VERSIONS > versions.yml 32 | "${task.process}": 33 | samtools: \$(echo \$(samtools --version 2>&1) | sed 's/^.*samtools //; s/Using.*\$//') 34 | END_VERSIONS 35 | """ 36 | 37 | stub: 38 | def match = (task.ext.args =~ /-o(?:utput)?\s(.*)\s?/).findAll() 39 | def fastacmd = match[0] ? "touch ${match[0][1]}" : '' 40 | """ 41 | ${fastacmd} 42 | touch ${fasta}.fai 43 | 44 | cat <<-END_VERSIONS > versions.yml 45 | 46 | "${task.process}": 47 | samtools: \$(echo \$(samtools --version 2>&1) | sed 's/^.*samtools //; s/Using.*\$//') 48 | END_VERSIONS 49 | """ 50 | } 51 | -------------------------------------------------------------------------------- /modules/nf-core/samtools/faidx/meta.yml: -------------------------------------------------------------------------------- 1 | name: samtools_faidx 2 | description: Index FASTA file 3 | keywords: 4 | - index 5 | - fasta 6 | - faidx 7 | tools: 8 | - samtools: 9 | description: | 10 | SAMtools is a set of utilities for interacting with and post-processing 11 | short DNA sequence read alignments in the SAM, BAM and CRAM formats, written by Heng Li. 12 | These files are generated as output by short read aligners like BWA. 13 | homepage: http://www.htslib.org/ 14 | documentation: http://www.htslib.org/doc/samtools.html 15 | doi: 10.1093/bioinformatics/btp352 16 | licence: ["MIT"] 17 | input: 18 | - meta: 19 | type: map 20 | description: | 21 | Groovy Map containing reference information 22 | e.g. [ id:'test' ] 23 | - fasta: 24 | type: file 25 | description: FASTA file 26 | pattern: "*.{fa,fasta}" 27 | - meta2: 28 | type: map 29 | description: | 30 | Groovy Map containing reference information 31 | e.g. [ id:'test' ] 32 | - fai: 33 | type: file 34 | description: FASTA index file 35 | pattern: "*.{fai}" 36 | output: 37 | - meta: 38 | type: map 39 | description: | 40 | Groovy Map containing sample information 41 | e.g. [ id:'test', single_end:false ] 42 | - fai: 43 | type: file 44 | description: FASTA index file 45 | pattern: "*.{fai}" 46 | - gzi: 47 | type: file 48 | description: Optional gzip index file for compressed inputs 49 | pattern: "*.gzi" 50 | - versions: 51 | type: file 52 | description: File containing software versions 53 | pattern: "versions.yml" 54 | authors: 55 | - "@drpatelh" 56 | - "@ewels" 57 | - "@phue" 58 | maintainers: 59 | - "@drpatelh" 60 | - "@ewels" 61 | - "@phue" 62 | -------------------------------------------------------------------------------- /modules/nf-core/samtools/index/environment.yml: -------------------------------------------------------------------------------- 1 | name: samtools_index 2 | channels: 3 | - conda-forge 4 | - bioconda 5 | - defaults 6 | dependencies: 7 | - bioconda::samtools=1.17 8 | -------------------------------------------------------------------------------- /modules/nf-core/samtools/index/main.nf: -------------------------------------------------------------------------------- 1 | process SAMTOOLS_INDEX { 2 | tag "$meta.id" 3 | label 'process_low' 4 | 5 | conda "${moduleDir}/environment.yml" 6 | container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? 7 | 'https://depot.galaxyproject.org/singularity/samtools:1.17--h00cdaf9_0' : 8 | 'biocontainers/samtools:1.17--h00cdaf9_0' }" 9 | 10 | input: 11 | tuple val(meta), path(input) 12 | 13 | output: 14 | tuple val(meta), path("*.bai") , optional:true, emit: bai 15 | tuple val(meta), path("*.csi") , optional:true, emit: csi 16 | tuple val(meta), path("*.crai"), optional:true, emit: crai 17 | path "versions.yml" , emit: versions 18 | 19 | when: 20 | task.ext.when == null || task.ext.when 21 | 22 | script: 23 | def args = task.ext.args ?: '' 24 | """ 25 | samtools \\ 26 | index \\ 27 | -@ ${task.cpus-1} \\ 28 | $args \\ 29 | $input 30 | 31 | cat <<-END_VERSIONS > versions.yml 32 | "${task.process}": 33 | samtools: \$(echo \$(samtools --version 2>&1) | sed 's/^.*samtools //; s/Using.*\$//') 34 | END_VERSIONS 35 | """ 36 | 37 | stub: 38 | """ 39 | touch ${input}.bai 40 | touch ${input}.crai 41 | touch ${input}.csi 42 | 43 | cat <<-END_VERSIONS > versions.yml 44 | "${task.process}": 45 | samtools: \$(echo \$(samtools --version 2>&1) | sed 's/^.*samtools //; s/Using.*\$//') 46 | END_VERSIONS 47 | """ 48 | } 49 | -------------------------------------------------------------------------------- /modules/nf-core/samtools/index/meta.yml: -------------------------------------------------------------------------------- 1 | name: samtools_index 2 | description: Index SAM/BAM/CRAM file 3 | keywords: 4 | - index 5 | - bam 6 | - sam 7 | - cram 8 | tools: 9 | - samtools: 10 | description: | 11 | SAMtools is a set of utilities for interacting with and post-processing 12 | short DNA sequence read alignments in the SAM, BAM and CRAM formats, written by Heng Li. 13 | These files are generated as output by short read aligners like BWA. 14 | homepage: http://www.htslib.org/ 15 | documentation: http://www.htslib.org/doc/samtools.html 16 | doi: 10.1093/bioinformatics/btp352 17 | licence: ["MIT"] 18 | input: 19 | - meta: 20 | type: map 21 | description: | 22 | Groovy Map containing sample information 23 | e.g. [ id:'test', single_end:false ] 24 | - bam: 25 | type: file 26 | description: BAM/CRAM/SAM file 27 | pattern: "*.{bam,cram,sam}" 28 | output: 29 | - meta: 30 | type: map 31 | description: | 32 | Groovy Map containing sample information 33 | e.g. [ id:'test', single_end:false ] 34 | - bai: 35 | type: file 36 | description: BAM/CRAM/SAM index file 37 | pattern: "*.{bai,crai,sai}" 38 | - crai: 39 | type: file 40 | description: BAM/CRAM/SAM index file 41 | pattern: "*.{bai,crai,sai}" 42 | - csi: 43 | type: file 44 | description: CSI index file 45 | pattern: "*.{csi}" 46 | - versions: 47 | type: file 48 | description: File containing software versions 49 | pattern: "versions.yml" 50 | authors: 51 | - "@drpatelh" 52 | - "@ewels" 53 | - "@maxulysse" 54 | maintainers: 55 | - "@drpatelh" 56 | - "@ewels" 57 | - "@maxulysse" 58 | -------------------------------------------------------------------------------- /modules/nf-core/samtools/index/tests/csi.nextflow.config: -------------------------------------------------------------------------------- 1 | process { 2 | 3 | withName: SAMTOOLS_INDEX { 4 | ext.args = '-c' 5 | } 6 | 7 | } 8 | -------------------------------------------------------------------------------- /modules/nf-core/samtools/index/tests/main.nf.test: -------------------------------------------------------------------------------- 1 | nextflow_process { 2 | 3 | name "Test Process SAMTOOLS_INDEX" 4 | script "../main.nf" 5 | process "SAMTOOLS_INDEX" 6 | tag "modules" 7 | tag "modules_nfcore" 8 | tag "samtools" 9 | tag "samtools/index" 10 | 11 | test("sarscov2 [BAI]") { 12 | 13 | when { 14 | params { 15 | outdir = "$outputDir" 16 | } 17 | process { 18 | """ 19 | input[0] = [ 20 | [ id:'test' ], // meta map 21 | file(params.test_data['sarscov2']['illumina']['test_paired_end_sorted_bam'], checkIfExists: true) 22 | ] 23 | """ 24 | } 25 | } 26 | 27 | then { 28 | assertAll ( 29 | { assert process.success }, 30 | { assert snapshot(process.out.bai).match("bai") }, 31 | { assert path(process.out.versions.get(0)).getText().contains("samtools") } 32 | ) 33 | } 34 | } 35 | 36 | test("homo_sapiens [CRAI]") { 37 | 38 | when { 39 | params { 40 | outdir = "$outputDir" 41 | } 42 | process { 43 | """ 44 | input[0] = [ 45 | [ id:'test' ], // meta map 46 | file(params.test_data['homo_sapiens']['illumina']['test_paired_end_recalibrated_sorted_cram'], checkIfExists: true) 47 | ] 48 | """ 49 | } 50 | } 51 | 52 | then { 53 | assertAll ( 54 | { assert process.success }, 55 | { assert snapshot(process.out.crai).match("crai") }, 56 | { assert path(process.out.versions.get(0)).getText().contains("samtools") } 57 | ) 58 | } 59 | } 60 | 61 | test("homo_sapiens [CSI]") { 62 | 63 | config "./csi.nextflow.config" 64 | 65 | when { 66 | params { 67 | outdir = "$outputDir" 68 | } 69 | process { 70 | """ 71 | input[0] = [ 72 | [ id:'test' ], // meta map 73 | file(params.test_data['sarscov2']['illumina']['test_paired_end_sorted_bam'], checkIfExists: true) 74 | ] 75 | """ 76 | } 77 | } 78 | 79 | then { 80 | assertAll ( 81 | { assert process.success }, 82 | { assert path(process.out.csi.get(0).get(1)).exists() }, 83 | { assert path(process.out.versions.get(0)).getText().contains("samtools") } 84 | ) 85 | } 86 | } 87 | } 88 | -------------------------------------------------------------------------------- /modules/nf-core/samtools/index/tests/main.nf.test.snap: -------------------------------------------------------------------------------- 1 | { 2 | "crai": { 3 | "content": [ 4 | [ 5 | [ 6 | { 7 | "id": "test" 8 | }, 9 | "test.paired_end.recalibrated.sorted.cram.crai:md5,14bc3bd5c89cacc8f4541f9062429029" 10 | ] 11 | ] 12 | ], 13 | "timestamp": "2023-11-15T15:17:37.30801" 14 | }, 15 | "bai": { 16 | "content": [ 17 | [ 18 | [ 19 | { 20 | "id": "test" 21 | }, 22 | "test.paired_end.sorted.bam.bai:md5,704c10dd1326482448ca3073fdebc2f4" 23 | ] 24 | ] 25 | ], 26 | "timestamp": "2023-11-15T15:17:30.869234" 27 | } 28 | } -------------------------------------------------------------------------------- /modules/nf-core/samtools/index/tests/tags.yml: -------------------------------------------------------------------------------- 1 | samtools/index: 2 | - modules/nf-core/samtools/index/** 3 | -------------------------------------------------------------------------------- /modules/nf-core/samtools/sort/environment.yml: -------------------------------------------------------------------------------- 1 | name: samtools_sort 2 | channels: 3 | - conda-forge 4 | - bioconda 5 | - defaults 6 | dependencies: 7 | - bioconda::samtools=1.17 8 | -------------------------------------------------------------------------------- /modules/nf-core/samtools/sort/main.nf: -------------------------------------------------------------------------------- 1 | process SAMTOOLS_SORT { 2 | tag "$meta.id" 3 | label 'process_medium' 4 | 5 | conda "${moduleDir}/environment.yml" 6 | container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? 7 | 'https://depot.galaxyproject.org/singularity/samtools:1.17--h00cdaf9_0' : 8 | 'biocontainers/samtools:1.17--h00cdaf9_0' }" 9 | 10 | input: 11 | tuple val(meta), path(bam) 12 | 13 | output: 14 | tuple val(meta), path("*.bam"), emit: bam 15 | tuple val(meta), path("*.csi"), emit: csi, optional: true 16 | path "versions.yml" , emit: versions 17 | 18 | when: 19 | task.ext.when == null || task.ext.when 20 | 21 | script: 22 | def args = task.ext.args ?: '' 23 | def prefix = task.ext.prefix ?: "${meta.id}" 24 | if ("$bam" == "${prefix}.bam") error "Input and output names are the same, use \"task.ext.prefix\" to disambiguate!" 25 | """ 26 | samtools sort \\ 27 | $args \\ 28 | -@ $task.cpus \\ 29 | -o ${prefix}.bam \\ 30 | -T $prefix \\ 31 | $bam 32 | 33 | cat <<-END_VERSIONS > versions.yml 34 | "${task.process}": 35 | samtools: \$(echo \$(samtools --version 2>&1) | sed 's/^.*samtools //; s/Using.*\$//') 36 | END_VERSIONS 37 | """ 38 | 39 | stub: 40 | def prefix = task.ext.prefix ?: "${meta.id}" 41 | """ 42 | touch ${prefix}.bam 43 | 44 | cat <<-END_VERSIONS > versions.yml 45 | "${task.process}": 46 | samtools: \$(echo \$(samtools --version 2>&1) | sed 's/^.*samtools //; s/Using.*\$//') 47 | END_VERSIONS 48 | """ 49 | } 50 | -------------------------------------------------------------------------------- /modules/nf-core/samtools/sort/meta.yml: -------------------------------------------------------------------------------- 1 | name: samtools_sort 2 | description: Sort SAM/BAM/CRAM file 3 | keywords: 4 | - sort 5 | - bam 6 | - sam 7 | - cram 8 | tools: 9 | - samtools: 10 | description: | 11 | SAMtools is a set of utilities for interacting with and post-processing 12 | short DNA sequence read alignments in the SAM, BAM and CRAM formats, written by Heng Li. 13 | These files are generated as output by short read aligners like BWA. 14 | homepage: http://www.htslib.org/ 15 | documentation: http://www.htslib.org/doc/samtools.html 16 | doi: 10.1093/bioinformatics/btp352 17 | licence: ["MIT"] 18 | input: 19 | - meta: 20 | type: map 21 | description: | 22 | Groovy Map containing sample information 23 | e.g. [ id:'test', single_end:false ] 24 | - bam: 25 | type: file 26 | description: BAM/CRAM/SAM file 27 | pattern: "*.{bam,cram,sam}" 28 | output: 29 | - meta: 30 | type: map 31 | description: | 32 | Groovy Map containing sample information 33 | e.g. [ id:'test', single_end:false ] 34 | - bam: 35 | type: file 36 | description: Sorted BAM/CRAM/SAM file 37 | pattern: "*.{bam,cram,sam}" 38 | - versions: 39 | type: file 40 | description: File containing software versions 41 | pattern: "versions.yml" 42 | - csi: 43 | type: file 44 | description: BAM index file (optional) 45 | pattern: "*.csi" 46 | authors: 47 | - "@drpatelh" 48 | - "@ewels" 49 | maintainers: 50 | - "@drpatelh" 51 | - "@ewels" 52 | -------------------------------------------------------------------------------- /modules/nf-core/samtools/sort/tests/main.nf.test: -------------------------------------------------------------------------------- 1 | nextflow_process { 2 | 3 | name "Test Process SAMTOOLS_SORT" 4 | script "../main.nf" 5 | process "SAMTOOLS_SORT" 6 | tag "modules" 7 | tag "modules_nfcore" 8 | tag "samtools" 9 | tag "samtools/sort" 10 | 11 | test("test_samtools_sort") { 12 | 13 | config "./nextflow.config" 14 | 15 | when { 16 | params { 17 | outdir = "$outputDir" 18 | } 19 | process { 20 | """ 21 | input[0] = [ 22 | [ id:'test', single_end:false ], 23 | [ 24 | file(params.test_data['sarscov2']['illumina']['test_paired_end_bam'], checkIfExists: true) 25 | ] 26 | ] 27 | """ 28 | } 29 | } 30 | 31 | then { 32 | assertAll ( 33 | { assert process.success }, 34 | { assert snapshot(process.out).match() } 35 | ) 36 | } 37 | 38 | } 39 | 40 | test("test_samtools_sort_stub") { 41 | 42 | config "./nextflow.config" 43 | options "-stub-run" 44 | 45 | when { 46 | params { 47 | outdir = "$outputDir" 48 | } 49 | process { 50 | """ 51 | input[0] = [ 52 | [ id:'test', single_end:false ], 53 | [ 54 | file(params.test_data['sarscov2']['illumina']['test_paired_end_bam'], checkIfExists: true) 55 | ] 56 | ] 57 | """ 58 | } 59 | } 60 | 61 | then { 62 | assertAll ( 63 | { assert process.success }, 64 | { assert snapshot(process.out).match() } 65 | ) 66 | } 67 | 68 | } 69 | 70 | } 71 | -------------------------------------------------------------------------------- /modules/nf-core/samtools/sort/tests/main.nf.test.snap: -------------------------------------------------------------------------------- 1 | { 2 | "test_samtools_sort": { 3 | "content": [ 4 | { 5 | "0": [ 6 | [ 7 | { 8 | "id": "test", 9 | "single_end": false 10 | }, 11 | "test.sorted.bam:md5,a29570e7607d217c2fa4d75829e09cd7" 12 | ] 13 | ], 14 | "1": [ 15 | 16 | ], 17 | "2": [ 18 | "versions.yml:md5,46f7a36082fa1f68285fe30d689244e8" 19 | ], 20 | "bam": [ 21 | [ 22 | { 23 | "id": "test", 24 | "single_end": false 25 | }, 26 | "test.sorted.bam:md5,a29570e7607d217c2fa4d75829e09cd7" 27 | ] 28 | ], 29 | "csi": [ 30 | 31 | ], 32 | "versions": [ 33 | "versions.yml:md5,46f7a36082fa1f68285fe30d689244e8" 34 | ] 35 | } 36 | ], 37 | "timestamp": "2023-10-17T17:21:46.5427968" 38 | } 39 | } -------------------------------------------------------------------------------- /modules/nf-core/samtools/sort/tests/nextflow.config: -------------------------------------------------------------------------------- 1 | process { 2 | 3 | withName: SAMTOOLS_SORT { 4 | ext.prefix = { "${meta.id}.sorted" } 5 | } 6 | 7 | } 8 | -------------------------------------------------------------------------------- /modules/nf-core/samtools/sort/tests/tags.yml: -------------------------------------------------------------------------------- 1 | samtools/sort: 2 | - modules/nf-core/samtools/sort/** 3 | - tests/modules/nf-core/samtools/sort/** 4 | -------------------------------------------------------------------------------- /modules/nf-core/samtools/view/environment.yml: -------------------------------------------------------------------------------- 1 | name: samtools_view 2 | channels: 3 | - conda-forge 4 | - bioconda 5 | - defaults 6 | dependencies: 7 | - bioconda::samtools=1.17 8 | -------------------------------------------------------------------------------- /modules/nf-core/samtools/view/main.nf: -------------------------------------------------------------------------------- 1 | process SAMTOOLS_VIEW { 2 | tag "$meta.id" 3 | label 'process_low' 4 | 5 | conda "${moduleDir}/environment.yml" 6 | container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? 7 | 'https://depot.galaxyproject.org/singularity/samtools:1.17--h00cdaf9_0' : 8 | 'biocontainers/samtools:1.17--h00cdaf9_0' }" 9 | 10 | input: 11 | tuple val(meta), path(input), path(index) 12 | tuple val(meta2), path(fasta) 13 | path qname 14 | 15 | output: 16 | tuple val(meta), path("*.bam"), emit: bam, optional: true 17 | tuple val(meta), path("*.cram"), emit: cram, optional: true 18 | tuple val(meta), path("*.sam"), emit: sam, optional: true 19 | tuple val(meta), path("*.bai"), emit: bai, optional: true 20 | tuple val(meta), path("*.csi"), emit: csi, optional: true 21 | tuple val(meta), path("*.crai"), emit: crai, optional: true 22 | path "versions.yml", emit: versions 23 | 24 | when: 25 | task.ext.when == null || task.ext.when 26 | 27 | script: 28 | def args = task.ext.args ?: '' 29 | def args2 = task.ext.args2 ?: '' 30 | def prefix = task.ext.prefix ?: "${meta.id}" 31 | def reference = fasta ? "--reference ${fasta}" : "" 32 | def readnames = qname ? "--qname-file ${qname}": "" 33 | def file_type = args.contains("--output-fmt sam") ? "sam" : 34 | args.contains("--output-fmt bam") ? "bam" : 35 | args.contains("--output-fmt cram") ? "cram" : 36 | input.getExtension() 37 | if ("$input" == "${prefix}.${file_type}") error "Input and output names are the same, use \"task.ext.prefix\" to disambiguate!" 38 | """ 39 | samtools \\ 40 | view \\ 41 | --threads ${task.cpus-1} \\ 42 | ${reference} \\ 43 | ${readnames} \\ 44 | $args \\ 45 | -o ${prefix}.${file_type} \\ 46 | $input \\ 47 | $args2 48 | 49 | cat <<-END_VERSIONS > versions.yml 50 | "${task.process}": 51 | samtools: \$(echo \$(samtools --version 2>&1) | sed 's/^.*samtools //; s/Using.*\$//') 52 | END_VERSIONS 53 | """ 54 | 55 | stub: 56 | def prefix = task.ext.prefix ?: "${meta.id}" 57 | """ 58 | touch ${prefix}.bam 59 | touch ${prefix}.cram 60 | 61 | cat <<-END_VERSIONS > versions.yml 62 | "${task.process}": 63 | samtools: \$(echo \$(samtools --version 2>&1) | sed 's/^.*samtools //; s/Using.*\$//') 64 | END_VERSIONS 65 | """ 66 | } 67 | -------------------------------------------------------------------------------- /modules/nf-core/samtools/view/meta.yml: -------------------------------------------------------------------------------- 1 | name: samtools_view 2 | description: filter/convert SAM/BAM/CRAM file 3 | keywords: 4 | - view 5 | - bam 6 | - sam 7 | - cram 8 | tools: 9 | - samtools: 10 | description: | 11 | SAMtools is a set of utilities for interacting with and post-processing 12 | short DNA sequence read alignments in the SAM, BAM and CRAM formats, written by Heng Li. 13 | These files are generated as output by short read aligners like BWA. 14 | homepage: http://www.htslib.org/ 15 | documentation: http://www.htslib.org/doc/samtools.html 16 | doi: 10.1093/bioinformatics/btp352 17 | licence: ["MIT"] 18 | input: 19 | - meta: 20 | type: map 21 | description: | 22 | Groovy Map containing sample information 23 | e.g. [ id:'test', single_end:false ] 24 | - input: 25 | type: file 26 | description: BAM/CRAM/SAM file 27 | pattern: "*.{bam,cram,sam}" 28 | - index: 29 | type: file 30 | description: BAM.BAI/BAM.CSI/CRAM.CRAI file (optional) 31 | pattern: "*.{.bai,.csi,.crai}" 32 | - meta2: 33 | type: map 34 | description: | 35 | Groovy Map containing reference information 36 | e.g. [ id:'test' ] 37 | - fasta: 38 | type: file 39 | description: Reference file the CRAM was created with (optional) 40 | pattern: "*.{fasta,fa}" 41 | - qname: 42 | type: file 43 | description: Optional file with read names to output only select alignments 44 | pattern: "*.{txt,list}" 45 | output: 46 | - meta: 47 | type: map 48 | description: | 49 | Groovy Map containing sample information 50 | e.g. [ id:'test', single_end:false ] 51 | - bam: 52 | type: file 53 | description: optional filtered/converted BAM file 54 | pattern: "*.{bam}" 55 | - cram: 56 | type: file 57 | description: optional filtered/converted CRAM file 58 | pattern: "*.{cram}" 59 | - sam: 60 | type: file 61 | description: optional filtered/converted SAM file 62 | pattern: "*.{sam}" 63 | # bai, csi, and crai are created with `--write-index` 64 | - bai: 65 | type: file 66 | description: optional BAM file index 67 | pattern: "*.{bai}" 68 | - csi: 69 | type: file 70 | description: optional tabix BAM file index 71 | pattern: "*.{csi}" 72 | - crai: 73 | type: file 74 | description: optional CRAM file index 75 | pattern: "*.{crai}" 76 | - versions: 77 | type: file 78 | description: File containing software versions 79 | pattern: "versions.yml" 80 | authors: 81 | - "@drpatelh" 82 | - "@joseespinosa" 83 | - "@FriederikeHanssen" 84 | - "@priyanka-surana" 85 | maintainers: 86 | - "@drpatelh" 87 | - "@joseespinosa" 88 | - "@FriederikeHanssen" 89 | - "@priyanka-surana" 90 | -------------------------------------------------------------------------------- /modules/nf-core/star/align/environment.yml: -------------------------------------------------------------------------------- 1 | name: star_align 2 | channels: 3 | - conda-forge 4 | - bioconda 5 | - defaults 6 | dependencies: 7 | - bioconda::star=2.7.10a 8 | - bioconda::samtools=1.16.1 9 | - conda-forge::gawk=5.1.0 10 | -------------------------------------------------------------------------------- /modules/nf-core/star/align/tests/nextflow.arriba.config: -------------------------------------------------------------------------------- 1 | process { 2 | 3 | withName: STAR_GENOMEGENERATE { 4 | ext.args = '--genomeSAindexNbases 9' 5 | } 6 | 7 | withName: STAR_ALIGN { 8 | ext.args = '--readFilesCommand zcat --outSAMtype BAM Unsorted --outSAMunmapped Within --outBAMcompression 0 --outFilterMultimapNmax 50 --peOverlapNbasesMin 10 --alignSplicedMateMapLminOverLmate 0.5 --alignSJstitchMismatchNmax 5 -1 5 5 --chimSegmentMin 10 --chimOutType WithinBAM HardClip --chimJunctionOverhangMin 10 --chimScoreDropMax 30 --chimScoreJunctionNonGTAG 0 --chimScoreSeparation 1 --chimSegmentReadGapMax 3 --chimMultimapNmax 50' 9 | } 10 | 11 | } 12 | 13 | // Fix chown issue for the output star folder 14 | docker.runOptions = '--platform=linux/amd64 -u $(id -u):$(id -g)' 15 | -------------------------------------------------------------------------------- /modules/nf-core/star/align/tests/nextflow.config: -------------------------------------------------------------------------------- 1 | process { 2 | 3 | withName: STAR_GENOMEGENERATE { 4 | ext.args = '--genomeSAindexNbases 9' 5 | } 6 | 7 | withName: STAR_ALIGN { 8 | ext.args = '--readFilesCommand zcat --outSAMtype BAM SortedByCoordinate --outWigType bedGraph --outWigStrand Unstranded' 9 | } 10 | 11 | } 12 | 13 | // Fix chown issue for the output star folder 14 | docker.runOptions = '--platform=linux/amd64 -u $(id -u):$(id -g)' 15 | -------------------------------------------------------------------------------- /modules/nf-core/star/align/tests/nextflow.starfusion.config: -------------------------------------------------------------------------------- 1 | process { 2 | 3 | withName: STAR_GENOMEGENERATE { 4 | ext.args = '--genomeSAindexNbases 9' 5 | } 6 | 7 | withName: STAR_ALIGN { 8 | ext.args = '--readFilesCommand zcat --outSAMtype BAM Unsorted --outReadsUnmapped None --twopassMode Basic --outSAMstrandField intronMotif --outSAMunmapped Within --chimSegmentMin 12 --chimJunctionOverhangMin 8 --chimOutJunctionFormat 1 --alignSJDBoverhangMin 10 --alignMatesGapMax 100000 --alignIntronMax 100000 --alignSJstitchMismatchNmax 5 -1 5 5 --chimMultimapScoreRange 3 --chimScoreJunctionNonGTAG -4 --chimMultimapNmax 20 --chimNonchimScoreDropMin 10 --peOverlapNbasesMin 12 --peOverlapMMp 0.1 --alignInsertionFlush Right --alignSplicedMateMapLminOverLmate 0 --alignSplicedMateMapLmin 30' 9 | } 10 | 11 | } 12 | 13 | // Fix chown issue for the output star folder 14 | docker.runOptions = '--platform=linux/amd64 -u $(id -u):$(id -g)' 15 | -------------------------------------------------------------------------------- /modules/nf-core/star/align/tests/tags.yml: -------------------------------------------------------------------------------- 1 | star/align: 2 | - modules/nf-core/star/align/** 3 | -------------------------------------------------------------------------------- /modules/nf-core/star/genomegenerate/environment.yml: -------------------------------------------------------------------------------- 1 | name: star_genomegenerate 2 | channels: 3 | - conda-forge 4 | - bioconda 5 | - defaults 6 | dependencies: 7 | - bioconda::star=2.7.10a 8 | - bioconda::samtools=1.16.1 9 | - conda-forge::gawk=5.1.0 10 | -------------------------------------------------------------------------------- /modules/nf-core/star/genomegenerate/meta.yml: -------------------------------------------------------------------------------- 1 | name: star_genomegenerate 2 | description: Create index for STAR 3 | keywords: 4 | - index 5 | - fasta 6 | - genome 7 | - reference 8 | tools: 9 | - star: 10 | description: | 11 | STAR is a software package for mapping DNA sequences against 12 | a large reference genome, such as the human genome. 13 | homepage: https://github.com/alexdobin/STAR 14 | manual: https://github.com/alexdobin/STAR/blob/master/doc/STARmanual.pdf 15 | doi: 10.1093/bioinformatics/bts635 16 | licence: ["MIT"] 17 | input: 18 | - meta: 19 | type: map 20 | description: | 21 | Groovy Map containing sample information 22 | e.g. [ id:'test', single_end:false ] 23 | - fasta: 24 | type: file 25 | description: Fasta file of the reference genome 26 | - meta2: 27 | type: map 28 | description: | 29 | Groovy Map containing reference information 30 | e.g. [ id:'test' ] 31 | - gtf: 32 | type: file 33 | description: GTF file of the reference genome 34 | output: 35 | - meta: 36 | type: map 37 | description: | 38 | Groovy Map containing sample information 39 | e.g. [ id:'test', single_end:false ] 40 | - index: 41 | type: directory 42 | description: Folder containing the star index files 43 | pattern: "star" 44 | - versions: 45 | type: file 46 | description: File containing software versions 47 | pattern: "versions.yml" 48 | authors: 49 | - "@kevinmenden" 50 | - "@drpatelh" 51 | maintainers: 52 | - "@kevinmenden" 53 | - "@drpatelh" 54 | -------------------------------------------------------------------------------- /modules/nf-core/star/genomegenerate/tests/main.nf.test: -------------------------------------------------------------------------------- 1 | nextflow_process { 2 | 3 | name "Test Process STAR_GENOMEGENERATE" 4 | script "../main.nf" 5 | process "STAR_GENOMEGENERATE" 6 | tag "modules" 7 | tag "modules_nfcore" 8 | tag "star" 9 | tag "star/genomegenerate" 10 | 11 | test("homo_sapiens") { 12 | 13 | when { 14 | process { 15 | """ 16 | input[0] = Channel.of([ 17 | [ id:'test_fasta' ], 18 | [file(params.test_data['homo_sapiens']['genome']['genome_fasta'], checkIfExists: true)] 19 | ]) 20 | input[1] = Channel.of([ 21 | [ id:'test_gtf' ], 22 | [file(params.test_data['homo_sapiens']['genome']['genome_gtf'], checkIfExists: true)] 23 | ]) 24 | """ 25 | } 26 | } 27 | 28 | then { 29 | assertAll( 30 | { assert process.success }, 31 | { assert snapshot(file(process.out.index[0][1]).name).match("index") }, 32 | { assert snapshot(process.out.versions).match("versions") } 33 | ) 34 | } 35 | 36 | } 37 | 38 | } -------------------------------------------------------------------------------- /modules/nf-core/star/genomegenerate/tests/main.nf.test.snap: -------------------------------------------------------------------------------- 1 | { 2 | "versions": { 3 | "content": [ 4 | [ 5 | "versions.yml:md5,9c11319b80fdedc90dadce4e0fb42ded" 6 | ] 7 | ], 8 | "timestamp": "2023-11-23T11:18:14.835118" 9 | }, 10 | "index": { 11 | "content": [ 12 | "star" 13 | ], 14 | "timestamp": "2023-11-23T11:31:47.560528" 15 | } 16 | } -------------------------------------------------------------------------------- /modules/nf-core/star/genomegenerate/tests/tags.yml: -------------------------------------------------------------------------------- 1 | star/genomegenerate: 2 | - modules/nf-core/star/genomegenerate/** 3 | -------------------------------------------------------------------------------- /modules/nf-core/stringtie/merge/environment.yml: -------------------------------------------------------------------------------- 1 | name: stringtie_merge 2 | channels: 3 | - conda-forge 4 | - bioconda 5 | - defaults 6 | dependencies: 7 | - bioconda::stringtie=2.2.1 8 | -------------------------------------------------------------------------------- /modules/nf-core/stringtie/merge/main.nf: -------------------------------------------------------------------------------- 1 | process STRINGTIE_MERGE { 2 | label 'process_medium' 3 | 4 | // Note: 2.7X indices incompatible with AWS iGenomes. 5 | conda "${moduleDir}/environment.yml" 6 | container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? 7 | 'https://depot.galaxyproject.org/singularity/stringtie:2.2.1--hecb563c_2' : 8 | 'biocontainers/stringtie:2.2.1--hecb563c_2' }" 9 | 10 | input: 11 | path stringtie_gtf 12 | path annotation_gtf 13 | 14 | output: 15 | path "stringtie.merged.gtf", emit: gtf 16 | path "versions.yml" , emit: versions 17 | 18 | when: 19 | task.ext.when == null || task.ext.when 20 | 21 | script: 22 | def args = task.ext.args ?: '' 23 | def reference = annotation_gtf ? "-G $annotation_gtf" : "" 24 | """ 25 | stringtie \\ 26 | --merge $stringtie_gtf \\ 27 | $reference \\ 28 | -o stringtie.merged.gtf \\ 29 | $args 30 | 31 | cat <<-END_VERSIONS > versions.yml 32 | "${task.process}": 33 | stringtie: \$(stringtie --version 2>&1) 34 | END_VERSIONS 35 | """ 36 | 37 | stub: 38 | """ 39 | touch stringtie.merged.gtf 40 | 41 | cat <<-END_VERSIONS > versions.yml 42 | "${task.process}": 43 | stringtie: \$(stringtie --version 2>&1) 44 | END_VERSIONS 45 | """ 46 | } 47 | -------------------------------------------------------------------------------- /modules/nf-core/stringtie/merge/meta.yml: -------------------------------------------------------------------------------- 1 | name: stringtie_merge 2 | description: Merges the annotation gtf file and the stringtie output gtf files 3 | keywords: 4 | - merge 5 | - gtf 6 | - reference 7 | tools: 8 | - stringtie2: 9 | description: | 10 | Transcript assembly and quantification for RNA-Seq 11 | homepage: https://ccb.jhu.edu/software/stringtie/index.shtml 12 | documentation: https://ccb.jhu.edu/software/stringtie/index.shtml?t=manual 13 | licence: ["MIT"] 14 | input: 15 | - stringtie_gtf: 16 | type: file 17 | description: | 18 | Stringtie transcript gtf output(s). 19 | pattern: "*.gtf" 20 | - annotation_gtf: 21 | type: file 22 | description: | 23 | Annotation gtf file (optional). 24 | pattern: "*.gtf" 25 | output: 26 | - merged_gtf: 27 | type: map 28 | description: | 29 | Merged gtf from annotation and stringtie output gtfs. 30 | pattern: "*.gtf" 31 | - versions: 32 | type: file 33 | description: File containing software versions 34 | pattern: "versions.yml" 35 | authors: 36 | - "@yuukiiwa" 37 | maintainers: 38 | - "@yuukiiwa" 39 | -------------------------------------------------------------------------------- /modules/nf-core/stringtie/merge/tests/main.nf.test.snap: -------------------------------------------------------------------------------- 1 | { 2 | "rs_versions": { 3 | "content": [ 4 | [ 5 | "versions.yml:md5,b73d45fdebf4c8c446bb01817db1665d" 6 | ] 7 | ], 8 | "timestamp": "2023-11-23T14:14:39.697712988" 9 | }, 10 | "rs_gtf": { 11 | "content": [ 12 | [ 13 | "stringtie.merged.gtf:md5,6da479298d73d5b3216d4e1576a2bdf4" 14 | ] 15 | ], 16 | "timestamp": "2023-11-23T14:14:39.691894799" 17 | }, 18 | "fs_gtf": { 19 | "content": [ 20 | [ 21 | "stringtie.merged.gtf:md5,d959eb2fab0db48ded7275e0a2e83c05" 22 | ] 23 | ], 24 | "timestamp": "2023-11-23T14:14:20.872841278" 25 | }, 26 | "fs_versions": { 27 | "content": [ 28 | [ 29 | "versions.yml:md5,b73d45fdebf4c8c446bb01817db1665d" 30 | ] 31 | ], 32 | "timestamp": "2023-11-23T14:14:20.883140097" 33 | } 34 | } -------------------------------------------------------------------------------- /modules/nf-core/stringtie/merge/tests/tags.yml: -------------------------------------------------------------------------------- 1 | stringtie/merge: 2 | - modules/nf-core/stringtie/merge/** 3 | -------------------------------------------------------------------------------- /modules/nf-core/stringtie/stringtie/environment.yml: -------------------------------------------------------------------------------- 1 | name: stringtie_stringtie 2 | channels: 3 | - conda-forge 4 | - bioconda 5 | - defaults 6 | dependencies: 7 | - bioconda::stringtie=2.2.1 8 | -------------------------------------------------------------------------------- /modules/nf-core/stringtie/stringtie/main.nf: -------------------------------------------------------------------------------- 1 | process STRINGTIE_STRINGTIE { 2 | tag "$meta.id" 3 | label 'process_medium' 4 | 5 | conda "${moduleDir}/environment.yml" 6 | container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? 7 | 'https://depot.galaxyproject.org/singularity/stringtie:2.2.1--hecb563c_2' : 8 | 'biocontainers/stringtie:2.2.1--hecb563c_2' }" 9 | 10 | input: 11 | tuple val(meta), path(bam) 12 | path annotation_gtf 13 | 14 | output: 15 | tuple val(meta), path("*.transcripts.gtf"), emit: transcript_gtf 16 | tuple val(meta), path("*.abundance.txt") , emit: abundance 17 | tuple val(meta), path("*.coverage.gtf") , optional: true, emit: coverage_gtf 18 | tuple val(meta), path("*.ballgown") , optional: true, emit: ballgown 19 | path "versions.yml" , emit: versions 20 | 21 | when: 22 | task.ext.when == null || task.ext.when 23 | 24 | script: 25 | def args = task.ext.args ?: '' 26 | def prefix = task.ext.prefix ?: "${meta.id}" 27 | def reference = annotation_gtf ? "-G $annotation_gtf" : "" 28 | def ballgown = annotation_gtf ? "-b ${prefix}.ballgown" : "" 29 | def coverage = annotation_gtf ? "-C ${prefix}.coverage.gtf" : "" 30 | 31 | def strandedness = '' 32 | if (meta.strandedness == 'forward') { 33 | strandedness = '--fr' 34 | } else if (meta.strandedness == 'reverse') { 35 | strandedness = '--rf' 36 | } 37 | """ 38 | stringtie \\ 39 | $bam \\ 40 | $strandedness \\ 41 | $reference \\ 42 | -o ${prefix}.transcripts.gtf \\ 43 | -A ${prefix}.gene.abundance.txt \\ 44 | $coverage \\ 45 | $ballgown \\ 46 | -p $task.cpus \\ 47 | $args 48 | 49 | cat <<-END_VERSIONS > versions.yml 50 | "${task.process}": 51 | stringtie: \$(stringtie --version 2>&1) 52 | END_VERSIONS 53 | """ 54 | 55 | stub: 56 | def prefix = task.ext.prefix ?: "${meta.id}" 57 | """ 58 | touch ${prefix}.transcripts.gtf 59 | touch ${prefix}.gene.abundance.txt 60 | touch ${prefix}.coverage.gtf 61 | touch ${prefix}.ballgown 62 | 63 | cat <<-END_VERSIONS > versions.yml 64 | "${task.process}": 65 | stringtie: \$(stringtie --version 2>&1) 66 | END_VERSIONS 67 | """ 68 | } 69 | -------------------------------------------------------------------------------- /modules/nf-core/stringtie/stringtie/meta.yml: -------------------------------------------------------------------------------- 1 | name: stringtie_stringtie 2 | description: Transcript assembly and quantification for RNA-Se 3 | keywords: 4 | - transcript 5 | - assembly 6 | - quantification 7 | - gtf 8 | tools: 9 | - stringtie2: 10 | description: | 11 | Transcript assembly and quantification for RNA-Seq 12 | homepage: https://ccb.jhu.edu/software/stringtie/index.shtml 13 | documentation: https://ccb.jhu.edu/software/stringtie/index.shtml?t=manual 14 | licence: ["MIT"] 15 | input: 16 | - meta: 17 | type: map 18 | description: | 19 | Groovy Map containing sample information 20 | e.g. [ id:'test', single_end:false ] 21 | - bam: 22 | type: file 23 | description: | 24 | Stringtie transcript gtf output(s). 25 | - annotation_gtf: 26 | type: file 27 | description: | 28 | Annotation gtf file (optional). 29 | output: 30 | - meta: 31 | type: map 32 | description: | 33 | Groovy Map containing sample information 34 | e.g. [ id:'test', single_end:false ] 35 | - transcript_gtf: 36 | type: file 37 | description: transcript gtf 38 | pattern: "*.{transcripts.gtf}" 39 | - coverage_gtf: 40 | type: file 41 | description: coverage gtf 42 | pattern: "*.{coverage.gtf}" 43 | - abudance: 44 | type: file 45 | description: abudance 46 | pattern: "*.{abudance.txt}" 47 | - ballgown: 48 | type: file 49 | description: for running ballgown 50 | pattern: "*.{ballgown}" 51 | - versions: 52 | type: file 53 | description: File containing software versions 54 | pattern: "versions.yml" 55 | authors: 56 | - "@drpatelh" 57 | maintainers: 58 | - "@drpatelh" 59 | -------------------------------------------------------------------------------- /modules/nf-core/stringtie/stringtie/tests/tags.yml: -------------------------------------------------------------------------------- 1 | stringtie/stringtie: 2 | - modules/nf-core/stringtie/stringtie/** 3 | -------------------------------------------------------------------------------- /nf-test.config: -------------------------------------------------------------------------------- 1 | config { 2 | 3 | testsDir "tests" 4 | workDir ".nf-test" 5 | configFile "tests/nextflow.config" 6 | profile "" 7 | 8 | } 9 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | # Config file for Python. Mostly used to configure linting of bin/*.py with Ruff. 2 | # Should be kept the same as nf-core/tools to avoid fighting with template synchronisation. 3 | [tool.ruff] 4 | line-length = 120 5 | target-version = "py38" 6 | cache-dir = "~/.cache/ruff" 7 | 8 | [tool.ruff.lint] 9 | select = ["I", "E1", "E4", "E7", "E9", "F", "UP", "N"] 10 | 11 | [tool.ruff.lint.isort] 12 | known-first-party = ["nf_core"] 13 | 14 | [tool.ruff.lint.per-file-ignores] 15 | "__init__.py" = ["E402", "F401"] 16 | -------------------------------------------------------------------------------- /subworkflows/local/fusioncatcher_workflow.nf: -------------------------------------------------------------------------------- 1 | include { FUSIONCATCHER } from '../../modules/local/fusioncatcher/detect/main' 2 | 3 | 4 | workflow FUSIONCATCHER_WORKFLOW { 5 | take: 6 | reads 7 | 8 | main: 9 | ch_versions = Channel.empty() 10 | ch_dummy_file = file("$baseDir/assets/dummy_file_fusioncatcher.txt", checkIfExists: true) 11 | 12 | if ((params.fusioncatcher || params.all) && !params.fusioninspector_only) { 13 | if (params.fusioncatcher_fusions){ 14 | ch_fusioncatcher_fusions = reads.combine(Channel.value(file(params.fusioncatcher_fusions, checkIfExists:true))) 15 | .map { meta, reads, fusions -> [ meta, fusions ] } 16 | } else { 17 | FUSIONCATCHER ( 18 | reads, 19 | params.fusioncatcher_ref 20 | ) 21 | ch_fusioncatcher_fusions = FUSIONCATCHER.out.fusions 22 | ch_versions = ch_versions.mix(FUSIONCATCHER.out.versions) 23 | } 24 | } 25 | else { 26 | ch_fusioncatcher_fusions = reads.combine(Channel.value(file(ch_dummy_file, checkIfExists:true))) 27 | .map { meta, reads, fusions -> [ meta, fusions ] } 28 | } 29 | 30 | emit: 31 | fusions = ch_fusioncatcher_fusions 32 | versions = ch_versions 33 | } 34 | 35 | -------------------------------------------------------------------------------- /subworkflows/local/fusionreport_workflow.nf: -------------------------------------------------------------------------------- 1 | include { FUSIONREPORT } from '../../modules/local/fusionreport/detect/main' 2 | 3 | 4 | workflow FUSIONREPORT_WORKFLOW { 5 | take: 6 | reads 7 | fusionreport_ref 8 | arriba_fusions 9 | starfusion_fusions 10 | fusioncatcher_fusions 11 | 12 | main: 13 | ch_versions = Channel.empty() 14 | ch_report = Channel.empty() 15 | ch_csv = Channel.empty() 16 | 17 | if (!params.fusioninspector_only) { 18 | reads_fusions = reads 19 | .join(arriba_fusions, remainder: true) 20 | .join(starfusion_fusions, remainder: true) 21 | .join(fusioncatcher_fusions, remainder: true) 22 | 23 | FUSIONREPORT(reads_fusions, fusionreport_ref, params.tools_cutoff) 24 | ch_fusion_list = FUSIONREPORT.out.fusion_list 25 | ch_fusion_list_filtered = FUSIONREPORT.out.fusion_list_filtered 26 | ch_versions = ch_versions.mix(FUSIONREPORT.out.versions) 27 | ch_report = FUSIONREPORT.out.report 28 | ch_csv = FUSIONREPORT.out.csv 29 | } else { 30 | ch_fusion_list = reads.combine(Channel.value(file(params.fusioninspector_fusions, checkIfExists:true))) 31 | .map { meta, reads, fusions -> [ meta, fusions ] } 32 | 33 | ch_fusion_list_filtered = ch_fusion_list 34 | } 35 | 36 | emit: 37 | versions = ch_versions 38 | fusion_list = ch_fusion_list 39 | fusion_list_filtered = ch_fusion_list_filtered 40 | report = ch_report.ifEmpty(null) 41 | csv = ch_csv.ifEmpty(null) 42 | 43 | } 44 | 45 | -------------------------------------------------------------------------------- /subworkflows/local/qc_workflow.nf: -------------------------------------------------------------------------------- 1 | // 2 | // Check input samplesheet and get read channels 3 | // 4 | 5 | include { PICARD_COLLECTRNASEQMETRICS } from '../../modules/local/picard/collectrnaseqmetrics/main' 6 | include { GATK4_MARKDUPLICATES } from '../../modules/nf-core/gatk4/markduplicates/main' 7 | include { PICARD_COLLECTINSERTSIZEMETRICS } from '../../modules/nf-core/picard/collectinsertsizemetrics/main' 8 | 9 | workflow QC_WORKFLOW { 10 | take: 11 | ch_bam_sorted 12 | ch_bam_sorted_indexed 13 | ch_chrgtf 14 | ch_refflat 15 | ch_fasta 16 | ch_fai 17 | ch_rrna_interval 18 | 19 | main: 20 | ch_versions = Channel.empty() 21 | 22 | PICARD_COLLECTRNASEQMETRICS(ch_bam_sorted_indexed, ch_refflat, ch_rrna_interval) 23 | ch_versions = ch_versions.mix(PICARD_COLLECTRNASEQMETRICS.out.versions) 24 | ch_rnaseq_metrics = Channel.empty().mix(PICARD_COLLECTRNASEQMETRICS.out.metrics) 25 | 26 | GATK4_MARKDUPLICATES(ch_bam_sorted, ch_fasta.map { meta, fasta -> [ fasta ]}, ch_fai.map { meta, fasta_fai -> [ fasta_fai ]}) 27 | ch_versions = ch_versions.mix(GATK4_MARKDUPLICATES.out.versions) 28 | ch_duplicate_metrics = Channel.empty().mix(GATK4_MARKDUPLICATES.out.metrics) 29 | 30 | PICARD_COLLECTINSERTSIZEMETRICS(ch_bam_sorted) 31 | ch_versions = ch_versions.mix(PICARD_COLLECTINSERTSIZEMETRICS.out.versions) 32 | ch_insertsize_metrics = Channel.empty().mix(PICARD_COLLECTINSERTSIZEMETRICS.out.metrics) 33 | 34 | 35 | emit: 36 | versions = ch_versions 37 | rnaseq_metrics = ch_rnaseq_metrics 38 | duplicate_metrics = ch_duplicate_metrics 39 | insertsize_metrics = ch_insertsize_metrics 40 | 41 | } 42 | 43 | -------------------------------------------------------------------------------- /subworkflows/local/stringtie_workflow.nf: -------------------------------------------------------------------------------- 1 | include { STRINGTIE_STRINGTIE } from '../../modules/nf-core/stringtie/stringtie/main' 2 | include { STRINGTIE_MERGE } from '../../modules/nf-core/stringtie/merge/main' 3 | 4 | 5 | workflow STRINGTIE_WORKFLOW { 6 | take: 7 | bam_sorted 8 | ch_chrgtf 9 | 10 | main: 11 | ch_versions = Channel.empty() 12 | ch_stringtie_gtf = Channel.empty() 13 | 14 | if ((params.stringtie || params.all) && !params.fusioninspector_only) { 15 | STRINGTIE_STRINGTIE(bam_sorted, ch_chrgtf.map { meta, gtf -> [ gtf ]}) 16 | ch_versions = ch_versions.mix(STRINGTIE_STRINGTIE.out.versions) 17 | 18 | STRINGTIE_STRINGTIE 19 | .out 20 | .transcript_gtf 21 | .map { it -> it[1] } 22 | .set { stringtie_gtf } 23 | ch_versions = ch_versions.mix(STRINGTIE_STRINGTIE.out.versions) 24 | 25 | 26 | STRINGTIE_MERGE (stringtie_gtf, ch_chrgtf.map { meta, gtf -> [ gtf ]}) 27 | ch_versions = ch_versions.mix(STRINGTIE_MERGE.out.versions) 28 | ch_stringtie_gtf = STRINGTIE_MERGE.out.gtf 29 | } 30 | 31 | emit: 32 | stringtie_gtf = ch_stringtie_gtf.ifEmpty(null) 33 | versions = ch_versions 34 | 35 | } 36 | 37 | -------------------------------------------------------------------------------- /subworkflows/local/trim_workflow.nf: -------------------------------------------------------------------------------- 1 | include { FASTP } from '../../modules/nf-core/fastp/main' 2 | include { FASTQC as FASTQC_FOR_FASTP } from '../../modules/nf-core/fastqc/main' 3 | 4 | 5 | workflow TRIM_WORKFLOW { 6 | take: 7 | reads 8 | 9 | main: 10 | ch_versions = Channel.empty() 11 | ch_fastp_html = Channel.empty() 12 | ch_fastp_json = Channel.empty() 13 | ch_fastqc_trimmed = Channel.empty() 14 | 15 | if (params.fastp_trim) { 16 | FASTP(reads, params.adapter_fasta, false, false) 17 | ch_versions = ch_versions.mix(FASTP.out.versions) 18 | 19 | FASTQC_FOR_FASTP(FASTP.out.reads) 20 | ch_versions = ch_versions.mix(FASTQC_FOR_FASTP.out.versions) 21 | 22 | ch_reads_all = FASTP.out.reads 23 | ch_reads_fusioncatcher = ch_reads_all 24 | ch_fastp_html = FASTP.out.html 25 | ch_fastp_json = FASTP.out.json 26 | ch_fastqc_trimmed = FASTQC_FOR_FASTP.out.zip 27 | 28 | } 29 | else { 30 | ch_reads_all = reads 31 | ch_reads_fusioncatcher = reads 32 | } 33 | 34 | emit: 35 | ch_reads_all 36 | ch_reads_fusioncatcher 37 | ch_fastp_html 38 | ch_fastp_json 39 | ch_fastqc_trimmed 40 | versions = ch_versions 41 | } 42 | 43 | -------------------------------------------------------------------------------- /subworkflows/nf-core/utils_nextflow_pipeline/meta.yml: -------------------------------------------------------------------------------- 1 | # yaml-language-server: $schema=https://raw.githubusercontent.com/nf-core/modules/master/subworkflows/yaml-schema.json 2 | name: "UTILS_NEXTFLOW_PIPELINE" 3 | description: Subworkflow with functionality that may be useful for any Nextflow pipeline 4 | keywords: 5 | - utility 6 | - pipeline 7 | - initialise 8 | - version 9 | components: [] 10 | input: 11 | - print_version: 12 | type: boolean 13 | description: | 14 | Print the version of the pipeline and exit 15 | - dump_parameters: 16 | type: boolean 17 | description: | 18 | Dump the parameters of the pipeline to a JSON file 19 | - output_directory: 20 | type: directory 21 | description: Path to output dir to write JSON file to. 22 | pattern: "results/" 23 | - check_conda_channel: 24 | type: boolean 25 | description: | 26 | Check if the conda channel priority is correct. 27 | output: 28 | - dummy_emit: 29 | type: boolean 30 | description: | 31 | Dummy emit to make nf-core subworkflows lint happy 32 | authors: 33 | - "@adamrtalbot" 34 | - "@drpatelh" 35 | maintainers: 36 | - "@adamrtalbot" 37 | - "@drpatelh" 38 | - "@maxulysse" 39 | -------------------------------------------------------------------------------- /subworkflows/nf-core/utils_nextflow_pipeline/tests/main.function.nf.test: -------------------------------------------------------------------------------- 1 | 2 | nextflow_function { 3 | 4 | name "Test Functions" 5 | script "subworkflows/nf-core/utils_nextflow_pipeline/main.nf" 6 | config "subworkflows/nf-core/utils_nextflow_pipeline/tests/nextflow.config" 7 | tag 'subworkflows' 8 | tag 'utils_nextflow_pipeline' 9 | tag 'subworkflows/utils_nextflow_pipeline' 10 | 11 | test("Test Function getWorkflowVersion") { 12 | 13 | function "getWorkflowVersion" 14 | 15 | then { 16 | assertAll( 17 | { assert function.success }, 18 | { assert snapshot(function.result).match() } 19 | ) 20 | } 21 | } 22 | 23 | test("Test Function dumpParametersToJSON") { 24 | 25 | function "dumpParametersToJSON" 26 | 27 | when { 28 | function { 29 | """ 30 | // define inputs of the function here. Example: 31 | input[0] = "$outputDir" 32 | """.stripIndent() 33 | } 34 | } 35 | 36 | then { 37 | assertAll( 38 | { assert function.success } 39 | ) 40 | } 41 | } 42 | 43 | test("Test Function checkCondaChannels") { 44 | 45 | function "checkCondaChannels" 46 | 47 | then { 48 | assertAll( 49 | { assert function.success }, 50 | { assert snapshot(function.result).match() } 51 | ) 52 | } 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /subworkflows/nf-core/utils_nextflow_pipeline/tests/main.function.nf.test.snap: -------------------------------------------------------------------------------- 1 | { 2 | "Test Function getWorkflowVersion": { 3 | "content": [ 4 | "v9.9.9" 5 | ], 6 | "meta": { 7 | "nf-test": "0.8.4", 8 | "nextflow": "23.10.1" 9 | }, 10 | "timestamp": "2024-02-28T12:02:05.308243" 11 | }, 12 | "Test Function checkCondaChannels": { 13 | "content": null, 14 | "meta": { 15 | "nf-test": "0.8.4", 16 | "nextflow": "23.10.1" 17 | }, 18 | "timestamp": "2024-02-28T12:02:12.425833" 19 | } 20 | } -------------------------------------------------------------------------------- /subworkflows/nf-core/utils_nextflow_pipeline/tests/nextflow.config: -------------------------------------------------------------------------------- 1 | manifest { 2 | name = 'nextflow_workflow' 3 | author = """nf-core""" 4 | homePage = 'https://127.0.0.1' 5 | description = """Dummy pipeline""" 6 | nextflowVersion = '!>=23.04.0' 7 | version = '9.9.9' 8 | doi = 'https://doi.org/10.5281/zenodo.5070524' 9 | } 10 | -------------------------------------------------------------------------------- /subworkflows/nf-core/utils_nextflow_pipeline/tests/tags.yml: -------------------------------------------------------------------------------- 1 | subworkflows/utils_nextflow_pipeline: 2 | - subworkflows/nf-core/utils_nextflow_pipeline/** 3 | -------------------------------------------------------------------------------- /subworkflows/nf-core/utils_nfcore_pipeline/meta.yml: -------------------------------------------------------------------------------- 1 | # yaml-language-server: $schema=https://raw.githubusercontent.com/nf-core/modules/master/subworkflows/yaml-schema.json 2 | name: "UTILS_NFCORE_PIPELINE" 3 | description: Subworkflow with utility functions specific to the nf-core pipeline template 4 | keywords: 5 | - utility 6 | - pipeline 7 | - initialise 8 | - version 9 | components: [] 10 | input: 11 | - nextflow_cli_args: 12 | type: list 13 | description: | 14 | Nextflow CLI positional arguments 15 | output: 16 | - success: 17 | type: boolean 18 | description: | 19 | Dummy output to indicate success 20 | authors: 21 | - "@adamrtalbot" 22 | maintainers: 23 | - "@adamrtalbot" 24 | - "@maxulysse" 25 | -------------------------------------------------------------------------------- /subworkflows/nf-core/utils_nfcore_pipeline/tests/main.workflow.nf.test: -------------------------------------------------------------------------------- 1 | nextflow_workflow { 2 | 3 | name "Test Workflow UTILS_NFCORE_PIPELINE" 4 | script "../main.nf" 5 | config "subworkflows/nf-core/utils_nfcore_pipeline/tests/nextflow.config" 6 | workflow "UTILS_NFCORE_PIPELINE" 7 | tag "subworkflows" 8 | tag "subworkflows_nfcore" 9 | tag "utils_nfcore_pipeline" 10 | tag "subworkflows/utils_nfcore_pipeline" 11 | 12 | test("Should run without failures") { 13 | 14 | when { 15 | workflow { 16 | """ 17 | input[0] = [] 18 | """ 19 | } 20 | } 21 | 22 | then { 23 | assertAll( 24 | { assert workflow.success }, 25 | { assert snapshot(workflow.out).match() } 26 | ) 27 | } 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /subworkflows/nf-core/utils_nfcore_pipeline/tests/main.workflow.nf.test.snap: -------------------------------------------------------------------------------- 1 | { 2 | "Should run without failures": { 3 | "content": [ 4 | { 5 | "0": [ 6 | true 7 | ], 8 | "valid_config": [ 9 | true 10 | ] 11 | } 12 | ], 13 | "meta": { 14 | "nf-test": "0.8.4", 15 | "nextflow": "23.10.1" 16 | }, 17 | "timestamp": "2024-02-28T12:03:25.726491" 18 | } 19 | } -------------------------------------------------------------------------------- /subworkflows/nf-core/utils_nfcore_pipeline/tests/nextflow.config: -------------------------------------------------------------------------------- 1 | manifest { 2 | name = 'nextflow_workflow' 3 | author = """nf-core""" 4 | homePage = 'https://127.0.0.1' 5 | description = """Dummy pipeline""" 6 | nextflowVersion = '!>=23.04.0' 7 | version = '9.9.9' 8 | doi = 'https://doi.org/10.5281/zenodo.5070524' 9 | } 10 | -------------------------------------------------------------------------------- /subworkflows/nf-core/utils_nfcore_pipeline/tests/tags.yml: -------------------------------------------------------------------------------- 1 | subworkflows/utils_nfcore_pipeline: 2 | - subworkflows/nf-core/utils_nfcore_pipeline/** 3 | -------------------------------------------------------------------------------- /subworkflows/nf-core/utils_nfvalidation_plugin/main.nf: -------------------------------------------------------------------------------- 1 | // 2 | // Subworkflow that uses the nf-validation plugin to render help text and parameter summary 3 | // 4 | 5 | /* 6 | ======================================================================================== 7 | IMPORT NF-VALIDATION PLUGIN 8 | ======================================================================================== 9 | */ 10 | 11 | include { paramsHelp } from 'plugin/nf-validation' 12 | include { paramsSummaryLog } from 'plugin/nf-validation' 13 | include { validateParameters } from 'plugin/nf-validation' 14 | 15 | /* 16 | ======================================================================================== 17 | SUBWORKFLOW DEFINITION 18 | ======================================================================================== 19 | */ 20 | 21 | workflow UTILS_NFVALIDATION_PLUGIN { 22 | 23 | take: 24 | print_help // boolean: print help 25 | workflow_command // string: default commmand used to run pipeline 26 | pre_help_text // string: string to be printed before help text and summary log 27 | post_help_text // string: string to be printed after help text and summary log 28 | validate_params // boolean: validate parameters 29 | schema_filename // path: JSON schema file, null to use default value 30 | 31 | main: 32 | 33 | log.debug "Using schema file: ${schema_filename}" 34 | 35 | // Default values for strings 36 | pre_help_text = pre_help_text ?: '' 37 | post_help_text = post_help_text ?: '' 38 | workflow_command = workflow_command ?: '' 39 | 40 | // 41 | // Print help message if needed 42 | // 43 | if (print_help) { 44 | log.info pre_help_text + paramsHelp(workflow_command, parameters_schema: schema_filename) + post_help_text 45 | System.exit(0) 46 | } 47 | 48 | // 49 | // Print parameter summary to stdout 50 | // 51 | log.info pre_help_text + paramsSummaryLog(workflow, parameters_schema: schema_filename) + post_help_text 52 | 53 | // 54 | // Validate parameters relative to the parameter JSON schema 55 | // 56 | if (validate_params){ 57 | validateParameters(parameters_schema: schema_filename) 58 | } 59 | 60 | emit: 61 | dummy_emit = true 62 | } 63 | -------------------------------------------------------------------------------- /subworkflows/nf-core/utils_nfvalidation_plugin/meta.yml: -------------------------------------------------------------------------------- 1 | # yaml-language-server: $schema=https://raw.githubusercontent.com/nf-core/modules/master/subworkflows/yaml-schema.json 2 | name: "UTILS_NFVALIDATION_PLUGIN" 3 | description: Use nf-validation to initiate and validate a pipeline 4 | keywords: 5 | - utility 6 | - pipeline 7 | - initialise 8 | - validation 9 | components: [] 10 | input: 11 | - print_help: 12 | type: boolean 13 | description: | 14 | Print help message and exit 15 | - workflow_command: 16 | type: string 17 | description: | 18 | The command to run the workflow e.g. "nextflow run main.nf" 19 | - pre_help_text: 20 | type: string 21 | description: | 22 | Text to print before the help message 23 | - post_help_text: 24 | type: string 25 | description: | 26 | Text to print after the help message 27 | - validate_params: 28 | type: boolean 29 | description: | 30 | Validate the parameters and error if invalid. 31 | - schema_filename: 32 | type: string 33 | description: | 34 | The filename of the schema to validate against. 35 | output: 36 | - dummy_emit: 37 | type: boolean 38 | description: | 39 | Dummy emit to make nf-core subworkflows lint happy 40 | authors: 41 | - "@adamrtalbot" 42 | maintainers: 43 | - "@adamrtalbot" 44 | - "@maxulysse" 45 | -------------------------------------------------------------------------------- /subworkflows/nf-core/utils_nfvalidation_plugin/tests/tags.yml: -------------------------------------------------------------------------------- 1 | subworkflows/utils_nfvalidation_plugin: 2 | - subworkflows/nf-core/utils_nfvalidation_plugin/** 3 | -------------------------------------------------------------------------------- /tests/main.nf.test: -------------------------------------------------------------------------------- 1 | nextflow_pipeline { 2 | 3 | name "Test pipeline" 4 | script "../main.nf" 5 | tag "pipeline" 6 | tag "pipeline_rnafusion" 7 | 8 | test("Run build references with profile test") { 9 | 10 | when { 11 | params { 12 | build_references = true 13 | outdir = "results" 14 | genome_base = "references" 15 | max_cpus = 2 16 | max_memory = '6.GB' 17 | max_time = '6.h' 18 | input = 'https://raw.githubusercontent.com/nf-core/test-datasets/rnafusion/testdata/human/samplesheet_valid.csv' 19 | cosmic_username = COSMIC_USERNAME 20 | cosmic_passwd = COSMIC_PASSWD 21 | } 22 | } 23 | 24 | then { 25 | assertAll( 26 | { assert workflow.success } 27 | ) 28 | } 29 | } 30 | test("Run fusion detection with profile test") { 31 | 32 | when { 33 | params { 34 | outdir = "results" 35 | genome_base = "references" 36 | max_cpus = 2 37 | max_memory = '6.GB' 38 | max_time = '6.h' 39 | input = 'https://raw.githubusercontent.com/nf-core/test-datasets/rnafusion/testdata/human/samplesheet_valid.csv' 40 | } 41 | } 42 | 43 | then { 44 | assertAll( 45 | { assert workflow.success } 46 | ) 47 | } 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /tests/nextflow.config: -------------------------------------------------------------------------------- 1 | /* 2 | ======================================================================================== 3 | Nextflow config file for running tests 4 | ======================================================================================== 5 | */ 6 | 7 | params { 8 | // Base directory for nf-core/modules test data 9 | modules_testdata_base_path = 's3://ngi-igenomes/testdata/nf-core/modules/' 10 | 11 | // Base directory for nf-core/rnaseq test data 12 | pipelines_testdata_base_path = 's3://ngi-igenomes/testdata/nf-core/pipelines/rnaseq/3.15/' 13 | 14 | outdir = 'results' 15 | } 16 | 17 | // Impose sensible resource limits for testing 18 | process { 19 | withName: '.*' { 20 | cpus = 2 21 | memory = 3.GB 22 | time = 2.h 23 | } 24 | } 25 | 26 | // Impose same minimum Nextflow version as the pipeline for testing 27 | manifest { 28 | nextflowVersion = '!>=23.04.0' 29 | } 30 | 31 | // Disable all Nextflow reporting options 32 | timeline { enabled = false } 33 | report { enabled = false } 34 | trace { enabled = false } 35 | dag { enabled = false } 36 | -------------------------------------------------------------------------------- /tower.yml: -------------------------------------------------------------------------------- 1 | reports: 2 | multiqc_report.html: 3 | display: "MultiQC HTML report" 4 | "**/arriba/*.arriba.fusions.tsv": 5 | display: "Arriba identified fusion TSV report" 6 | "**/arriba_visualisation/*_combined_fusions_arriba_visualisation.pdf": 7 | display: "PDF visualisation of the transcripts involved in predicted fusions" 8 | "**/fastp/*fastp.html": 9 | display: "Post fastp trimming HTML report" 10 | "**/fusioncatcher/*.fusioncatcher.fusion-genes.txt": 11 | display: "FusionCatcher identified fusion TXT report" 12 | "**/fusioninspector/*.FusionInspector.fusions.abridged.tsv": 13 | display: "FusionInspector TSV report" 14 | "**/fusionreport/*/*_fusionreport_index.html": 15 | display: "Fusion-report HTML report" 16 | "**/vcf/*_fusion_data.vcf.gz": 17 | display: "Collected statistics on each fusion fed to FusionInspector in VCF format" 18 | "**/picard/*.MarkDuplicates.metrics.txt": 19 | display: "Picard: Metrics from CollectRnaMetrics" 20 | "**/picard/*_rna_metrics.txt": 21 | display: "GATK4: Metrics from MarkDuplicates" 22 | "**/picard/*insert*size*metrics.txt": 23 | display: "GATK4: Metrics from InsertSizeMetrics" 24 | "**/picard/*pdf": 25 | display: "GATK4: InsertSizeMetrics histogram" 26 | "**/star_for_starfusion/*ReadsPerGene.out.tab": 27 | display: "Number of reads per gene" 28 | "**/starfusion/*.starfusion.fusion_predictions.tsv": 29 | display: "STAR-Fusion identified fusion TSV report" 30 | "**/stringtie/*/*stringtie.merged.gtf": 31 | display: "Merged GTFs from StringTie with annotations" 32 | --------------------------------------------------------------------------------