├── .devcontainer └── devcontainer.json ├── .editorconfig ├── .gitattributes ├── .github ├── .dockstore.yml ├── CONTRIBUTING.md ├── ISSUE_TEMPLATE │ ├── bug_report.yml │ ├── config.yml │ └── feature_request.yml ├── PULL_REQUEST_TEMPLATE.md └── workflows │ ├── awsfulltest.yml │ ├── awstest.yml │ ├── branch.yml │ ├── ci.yml │ ├── clean-up.yml │ ├── download_pipeline.yml │ ├── fix-linting.yml │ ├── linting.yml │ ├── linting_comment.yml │ ├── release-announcements.yml │ └── template_version_comment.yml ├── .gitignore ├── .gitpod.yml ├── .nf-core.yml ├── .pre-commit-config.yaml ├── .prettierignore ├── .prettierrc.yml ├── .vscode └── settings.json ├── CHANGELOG.md ├── CITATIONS.md ├── CODE_OF_CONDUCT.md ├── LICENSE ├── README.md ├── assets ├── adaptivecard.json ├── email_template.html ├── email_template.txt ├── methods_description_template.yml ├── multiqc_config.yml ├── nf-core-molkart_logo_light.png ├── samplesheet.csv ├── schema_input.json ├── sendmail_template.txt └── slackreport.json ├── bin ├── apply_clahe.dask.py ├── collect_QC.py ├── create_anndata.py ├── crop_hdf5.py ├── crop_tiff.py ├── maskfilter.py ├── spot2cell.py └── stack.py ├── conf ├── base.config ├── igenomes.config ├── modules.config ├── test.config └── test_full.config ├── docs ├── README.md ├── images │ ├── molkart_workflow.png │ ├── nf-core-molkart_logo_dark.png │ └── nf-core-molkart_logo_light.png ├── output.md └── usage.md ├── main.nf ├── modules.json ├── modules ├── local │ ├── clahe │ │ └── main.nf │ ├── createanndata │ │ └── main.nf │ ├── createstack │ │ └── main.nf │ ├── crophdf5 │ │ └── main.nf │ ├── croptiff │ │ └── main.nf │ ├── maskfilter │ │ └── main.nf │ ├── molkartqc │ │ └── main.nf │ ├── molkartqcpng │ │ └── main.nf │ ├── spot2cell │ │ └── main.nf │ └── tiffh5convert │ │ └── main.nf └── nf-core │ ├── cellpose │ ├── main.nf │ ├── meta.yml │ └── tests │ │ ├── main.nf.test │ │ ├── main.nf.test.snap │ │ ├── nextflow_wflows.config │ │ └── tags.yml │ ├── deepcell │ └── mesmer │ │ ├── main.nf │ │ ├── meta.yml │ │ └── tests │ │ ├── main.nf.test │ │ ├── main.nf.test.snap │ │ ├── nextflow.config │ │ └── tags.yml │ ├── ilastik │ ├── multicut │ │ ├── main.nf │ │ └── meta.yml │ └── pixelclassification │ │ ├── main.nf │ │ └── meta.yml │ ├── mindagap │ ├── duplicatefinder │ │ ├── environment.yml │ │ ├── main.nf │ │ ├── meta.yml │ │ └── tests │ │ │ ├── main.nf.test │ │ │ ├── main.nf.test.snap │ │ │ ├── nextflow.config │ │ │ └── tags.yml │ └── mindagap │ │ ├── environment.yml │ │ ├── main.nf │ │ ├── meta.yml │ │ └── tests │ │ ├── main.nf.test │ │ ├── main.nf.test.snap │ │ ├── nextflow.config │ │ └── tags.yml │ ├── multiqc │ ├── environment.yml │ ├── main.nf │ ├── meta.yml │ └── tests │ │ ├── main.nf.test │ │ ├── main.nf.test.snap │ │ ├── nextflow.config │ │ └── tags.yml │ └── stardist │ ├── environment.yml │ ├── main.nf │ ├── meta.yml │ └── tests │ ├── main.nf.test │ ├── main.nf.test.snap │ └── nextflow.config ├── nextflow.config ├── nextflow_schema.json ├── nf-test.config ├── ro-crate-metadata.json ├── subworkflows ├── local │ └── utils_nfcore_molkart_pipeline │ │ └── main.nf └── nf-core │ ├── utils_nextflow_pipeline │ ├── main.nf │ ├── meta.yml │ └── tests │ │ ├── main.function.nf.test │ │ ├── main.function.nf.test.snap │ │ ├── main.workflow.nf.test │ │ ├── nextflow.config │ │ └── tags.yml │ ├── utils_nfcore_pipeline │ ├── main.nf │ ├── meta.yml │ └── tests │ │ ├── main.function.nf.test │ │ ├── main.function.nf.test.snap │ │ ├── main.workflow.nf.test │ │ ├── main.workflow.nf.test.snap │ │ ├── nextflow.config │ │ └── tags.yml │ └── utils_nfschema_plugin │ ├── main.nf │ ├── meta.yml │ └── tests │ ├── main.nf.test │ ├── nextflow.config │ └── nextflow_schema.json ├── tests ├── .nftignore ├── main.nf.test ├── main.nf.test.snap └── tags.yml ├── tower.yml └── workflows └── molkart.nf /.devcontainer/devcontainer.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "nfcore", 3 | "image": "nfcore/gitpod:latest", 4 | "remoteUser": "gitpod", 5 | "runArgs": ["--privileged"], 6 | 7 | // Configure tool-specific properties. 8 | "customizations": { 9 | // Configure properties specific to VS Code. 10 | "vscode": { 11 | // Set *default* container specific settings.json values on container create. 12 | "settings": { 13 | "python.defaultInterpreterPath": "/opt/conda/bin/python" 14 | }, 15 | 16 | // Add the IDs of extensions you want installed when the container is created. 17 | "extensions": ["ms-python.python", "ms-python.vscode-pylance", "nf-core.nf-core-extensionpack"] 18 | } 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /.editorconfig: -------------------------------------------------------------------------------- 1 | root = true 2 | 3 | [*] 4 | charset = utf-8 5 | end_of_line = lf 6 | insert_final_newline = true 7 | trim_trailing_whitespace = true 8 | indent_size = 4 9 | indent_style = space 10 | 11 | [*.{md,yml,yaml,html,css,scss,js}] 12 | indent_size = 2 13 | 14 | # These files are edited and tested upstream in nf-core/modules 15 | [/modules/nf-core/**] 16 | charset = unset 17 | end_of_line = unset 18 | insert_final_newline = unset 19 | trim_trailing_whitespace = unset 20 | indent_style = unset 21 | [/subworkflows/nf-core/**] 22 | charset = unset 23 | end_of_line = unset 24 | insert_final_newline = unset 25 | trim_trailing_whitespace = unset 26 | indent_style = unset 27 | 28 | [/assets/email*] 29 | indent_size = unset 30 | 31 | # ignore python and markdown 32 | [*.{py,md}] 33 | indent_style = unset 34 | 35 | # ignore ro-crate metadata files 36 | [**/ro-crate-metadata.json] 37 | insert_final_newline = unset 38 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | *.config linguist-language=nextflow 2 | *.nf.test linguist-language=nextflow 3 | modules/nf-core/** linguist-generated 4 | subworkflows/nf-core/** linguist-generated 5 | -------------------------------------------------------------------------------- /.github/.dockstore.yml: -------------------------------------------------------------------------------- 1 | # Dockstore config version, not pipeline version 2 | version: 1.2 3 | workflows: 4 | - subclass: nfl 5 | primaryDescriptorPath: /nextflow.config 6 | publish: True 7 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.yml: -------------------------------------------------------------------------------- 1 | name: Bug report 2 | description: Report something that is broken or incorrect 3 | labels: bug 4 | body: 5 | - type: markdown 6 | attributes: 7 | value: | 8 | Before you post this issue, please check the documentation: 9 | 10 | - [nf-core website: troubleshooting](https://nf-co.re/usage/troubleshooting) 11 | - [nf-core/molkart pipeline documentation](https://nf-co.re/molkart/usage) 12 | - type: textarea 13 | id: description 14 | attributes: 15 | label: Description of the bug 16 | description: A clear and concise description of what the bug is. 17 | validations: 18 | required: true 19 | 20 | - type: textarea 21 | id: command_used 22 | attributes: 23 | label: Command used and terminal output 24 | description: Steps to reproduce the behaviour. Please paste the command you used to launch the pipeline and the output from your terminal. 25 | render: console 26 | placeholder: | 27 | $ nextflow run ... 28 | 29 | Some output where something broke 30 | 31 | - type: textarea 32 | id: files 33 | attributes: 34 | label: Relevant files 35 | description: | 36 | Please drag and drop the relevant files here. Create a `.zip` archive if the extension is not allowed. 37 | Your verbose log file `.nextflow.log` is often useful _(this is a hidden file in the directory where you launched the pipeline)_ as well as custom Nextflow configuration files. 38 | 39 | - type: textarea 40 | id: system 41 | attributes: 42 | label: System information 43 | description: | 44 | * Nextflow version _(eg. 23.04.0)_ 45 | * Hardware _(eg. HPC, Desktop, Cloud)_ 46 | * Executor _(eg. slurm, local, awsbatch)_ 47 | * Container engine: _(e.g. Docker, Singularity, Conda, Podman, Shifter, Charliecloud, or Apptainer)_ 48 | * OS _(eg. CentOS Linux, macOS, Linux Mint)_ 49 | * Version of nf-core/molkart _(eg. 1.1, 1.5, 1.8.2)_ 50 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/config.yml: -------------------------------------------------------------------------------- 1 | contact_links: 2 | - name: Join nf-core 3 | url: https://nf-co.re/join 4 | about: Please join the nf-core community here 5 | - name: "Slack #molkart channel" 6 | url: https://nfcore.slack.com/channels/molkart 7 | about: Discussion about the nf-core/molkart pipeline 8 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.yml: -------------------------------------------------------------------------------- 1 | name: Feature request 2 | description: Suggest an idea for the nf-core/molkart pipeline 3 | labels: enhancement 4 | body: 5 | - type: textarea 6 | id: description 7 | attributes: 8 | label: Description of feature 9 | description: Please describe your suggestion for a new feature. It might help to describe a problem or use case, plus any alternatives that you have considered. 10 | validations: 11 | required: true 12 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | 13 | 14 | ## PR checklist 15 | 16 | - [ ] This comment contains a description of changes (with reason). 17 | - [ ] If you've fixed a bug or added code that should be tested, add tests! 18 | - [ ] If you've added a new tool - have you followed the pipeline conventions in the [contribution docs](https://github.com/nf-core/molkart/tree/master/.github/CONTRIBUTING.md) 19 | - [ ] If necessary, also make a PR on the nf-core/molkart _branch_ on the [nf-core/test-datasets](https://github.com/nf-core/test-datasets) repository. 20 | - [ ] Make sure your code lints (`nf-core pipelines lint`). 21 | - [ ] Ensure the test suite passes (`nextflow run . -profile test,docker --outdir `). 22 | - [ ] Check for unexpected warnings in debug mode (`nextflow run . -profile debug,test,docker --outdir `). 23 | - [ ] Usage Documentation in `docs/usage.md` is updated. 24 | - [ ] Output Documentation in `docs/output.md` is updated. 25 | - [ ] `CHANGELOG.md` is updated. 26 | - [ ] `README.md` is updated (including new tool citations and authors/contributors). 27 | -------------------------------------------------------------------------------- /.github/workflows/awsfulltest.yml: -------------------------------------------------------------------------------- 1 | name: nf-core AWS full size tests 2 | # This workflow is triggered on PRs opened against the main/master branch. 3 | # It can be additionally triggered manually with GitHub actions workflow dispatch button. 4 | # It runs the -profile 'test_full' on AWS batch 5 | 6 | on: 7 | pull_request: 8 | branches: 9 | - main 10 | - master 11 | workflow_dispatch: 12 | pull_request_review: 13 | types: [submitted] 14 | 15 | jobs: 16 | run-platform: 17 | name: Run AWS full tests 18 | # run only if the PR is approved by at least 2 reviewers and against the master branch or manually triggered 19 | if: github.repository == 'nf-core/molkart' && github.event.review.state == 'approved' && github.event.pull_request.base.ref == 'master' || github.event_name == 'workflow_dispatch' 20 | runs-on: ubuntu-latest 21 | steps: 22 | - name: Get PR reviews 23 | uses: octokit/request-action@v2.x 24 | if: github.event_name != 'workflow_dispatch' 25 | id: check_approvals 26 | continue-on-error: true 27 | with: 28 | route: GET /repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}/reviews?per_page=100 29 | env: 30 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 31 | 32 | - name: Check for approvals 33 | if: ${{ failure() && github.event_name != 'workflow_dispatch' }} 34 | run: | 35 | echo "No review approvals found. At least 2 approvals are required to run this action automatically." 36 | exit 1 37 | 38 | - name: Check for enough approvals (>=2) 39 | id: test_variables 40 | if: github.event_name != 'workflow_dispatch' 41 | run: | 42 | JSON_RESPONSE='${{ steps.check_approvals.outputs.data }}' 43 | CURRENT_APPROVALS_COUNT=$(echo $JSON_RESPONSE | jq -c '[.[] | select(.state | contains("APPROVED")) ] | length') 44 | test $CURRENT_APPROVALS_COUNT -ge 2 || exit 1 # At least 2 approvals are required 45 | 46 | - name: Launch workflow via Seqera Platform 47 | uses: seqeralabs/action-tower-launch@v2 48 | with: 49 | workspace_id: ${{ secrets.TOWER_WORKSPACE_ID }} 50 | access_token: ${{ secrets.TOWER_ACCESS_TOKEN }} 51 | compute_env: ${{ secrets.TOWER_COMPUTE_ENV }} 52 | revision: ${{ github.sha }} 53 | workdir: s3://${{ secrets.AWS_S3_BUCKET }}/work/molkart/work-${{ github.sha }} 54 | parameters: | 55 | { 56 | "hook_url": "${{ secrets.MEGATESTS_ALERTS_SLACK_HOOK_URL }}", 57 | "outdir": "s3://${{ secrets.AWS_S3_BUCKET }}/molkart/results-${{ github.sha }}" 58 | } 59 | profiles: test_full 60 | 61 | - uses: actions/upload-artifact@v4 62 | with: 63 | name: Seqera Platform debug log file 64 | path: | 65 | seqera_platform_action_*.log 66 | seqera_platform_action_*.json 67 | -------------------------------------------------------------------------------- /.github/workflows/awstest.yml: -------------------------------------------------------------------------------- 1 | name: nf-core AWS test 2 | # This workflow can be triggered manually with the GitHub actions workflow dispatch button. 3 | # It runs the -profile 'test' on AWS batch 4 | 5 | on: 6 | workflow_dispatch: 7 | jobs: 8 | run-platform: 9 | name: Run AWS tests 10 | if: github.repository == 'nf-core/molkart' 11 | runs-on: ubuntu-latest 12 | steps: 13 | # Launch workflow using Seqera Platform CLI tool action 14 | - name: Launch workflow via Seqera Platform 15 | uses: seqeralabs/action-tower-launch@v2 16 | with: 17 | workspace_id: ${{ secrets.TOWER_WORKSPACE_ID }} 18 | access_token: ${{ secrets.TOWER_ACCESS_TOKEN }} 19 | compute_env: ${{ secrets.TOWER_COMPUTE_ENV }} 20 | revision: ${{ github.sha }} 21 | workdir: s3://${{ secrets.AWS_S3_BUCKET }}/work/molkart/work-${{ github.sha }} 22 | parameters: | 23 | { 24 | "outdir": "s3://${{ secrets.AWS_S3_BUCKET }}/molkart/results-test-${{ github.sha }}" 25 | } 26 | profiles: test 27 | 28 | - uses: actions/upload-artifact@v4 29 | with: 30 | name: Seqera Platform debug log file 31 | path: | 32 | seqera_platform_action_*.log 33 | seqera_platform_action_*.json 34 | -------------------------------------------------------------------------------- /.github/workflows/branch.yml: -------------------------------------------------------------------------------- 1 | name: nf-core branch protection 2 | # This workflow is triggered on PRs to `main`/`master` branch on the repository 3 | # It fails when someone tries to make a PR against the nf-core `main`/`master` branch instead of `dev` 4 | on: 5 | pull_request_target: 6 | branches: 7 | - main 8 | - master 9 | 10 | jobs: 11 | test: 12 | runs-on: ubuntu-latest 13 | steps: 14 | # PRs to the nf-core repo main/master branch are only ok if coming from the nf-core repo `dev` or any `patch` branches 15 | - name: Check PRs 16 | if: github.repository == 'nf-core/molkart' 17 | run: | 18 | { [[ ${{github.event.pull_request.head.repo.full_name }} == nf-core/molkart ]] && [[ $GITHUB_HEAD_REF == "dev" ]]; } || [[ $GITHUB_HEAD_REF == "patch" ]] 19 | 20 | # If the above check failed, post a comment on the PR explaining the failure 21 | # NOTE - this doesn't currently work if the PR is coming from a fork, due to limitations in GitHub actions secrets 22 | - name: Post PR comment 23 | if: failure() 24 | uses: mshick/add-pr-comment@b8f338c590a895d50bcbfa6c5859251edc8952fc # v2 25 | with: 26 | message: | 27 | ## This PR is against the `${{github.event.pull_request.base.ref}}` branch :x: 28 | 29 | * Do not close this PR 30 | * Click _Edit_ and change the `base` to `dev` 31 | * This CI test will remain failed until you push a new commit 32 | 33 | --- 34 | 35 | Hi @${{ github.event.pull_request.user.login }}, 36 | 37 | It looks like this pull-request is has been made against the [${{github.event.pull_request.head.repo.full_name }}](https://github.com/${{github.event.pull_request.head.repo.full_name }}) ${{github.event.pull_request.base.ref}} branch. 38 | The ${{github.event.pull_request.base.ref}} branch on nf-core repositories should always contain code from the latest release. 39 | Because of this, PRs to ${{github.event.pull_request.base.ref}} are only allowed if they come from the [${{github.event.pull_request.head.repo.full_name }}](https://github.com/${{github.event.pull_request.head.repo.full_name }}) `dev` branch. 40 | 41 | You do not need to close this PR, you can change the target branch to `dev` by clicking the _"Edit"_ button at the top of this page. 42 | Note that even after this, the test will continue to show as failing until you push a new commit. 43 | 44 | Thanks again for your contribution! 45 | repo-token: ${{ secrets.GITHUB_TOKEN }} 46 | allow-repeats: false 47 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: nf-core CI 2 | # This workflow runs the pipeline with the minimal test dataset to check that it completes without any syntax errors 3 | on: 4 | push: 5 | branches: 6 | - dev 7 | pull_request: 8 | release: 9 | types: [published] 10 | workflow_dispatch: 11 | 12 | env: 13 | NXF_ANSI_LOG: false 14 | NXF_SINGULARITY_CACHEDIR: ${{ github.workspace }}/.singularity 15 | NXF_SINGULARITY_LIBRARYDIR: ${{ github.workspace }}/.singularity 16 | 17 | concurrency: 18 | group: "${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}" 19 | cancel-in-progress: true 20 | 21 | jobs: 22 | test: 23 | name: "Run pipeline with test data (${{ matrix.NXF_VER }} | ${{ matrix.test_name }} | ${{ matrix.profile }})" 24 | # Only run on push if this is the nf-core dev branch (merged PRs) 25 | if: "${{ github.event_name != 'push' || (github.event_name == 'push' && github.repository == 'nf-core/molkart') }}" 26 | runs-on: ubuntu-latest 27 | strategy: 28 | matrix: 29 | NXF_VER: 30 | - "24.04.2" 31 | - "latest-everything" 32 | profile: 33 | - "conda" 34 | - "docker" 35 | - "singularity" 36 | test_name: 37 | - "test" 38 | isMaster: 39 | - ${{ github.base_ref == 'master' }} 40 | exclude: 41 | - profile: "conda" 42 | 43 | steps: 44 | - name: Check out pipeline code 45 | uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 46 | with: 47 | fetch-depth: 0 48 | 49 | - name: Set up Nextflow 50 | uses: nf-core/setup-nextflow@v2 51 | with: 52 | version: "${{ matrix.NXF_VER }}" 53 | 54 | - name: Set up Apptainer 55 | if: matrix.profile == 'singularity' 56 | uses: eWaterCycle/setup-apptainer@main 57 | 58 | - name: Set up Singularity 59 | if: matrix.profile == 'singularity' 60 | run: | 61 | mkdir -p $NXF_SINGULARITY_CACHEDIR 62 | mkdir -p $NXF_SINGULARITY_LIBRARYDIR 63 | 64 | - name: Clean up Disk space 65 | uses: jlumbroso/free-disk-space@54081f138730dfa15788a46383842cd2f914a1be # v1.3.1 66 | 67 | - name: "Run pipeline with test data ${{ matrix.NXF_VER }} | ${{ matrix.test_name }} | ${{ matrix.profile }}" 68 | run: | 69 | nextflow run ${GITHUB_WORKSPACE} -profile ${{ matrix.test_name }},${{ matrix.profile }} --outdir ./results 70 | -------------------------------------------------------------------------------- /.github/workflows/clean-up.yml: -------------------------------------------------------------------------------- 1 | name: "Close user-tagged issues and PRs" 2 | on: 3 | schedule: 4 | - cron: "0 0 * * 0" # Once a week 5 | 6 | jobs: 7 | clean-up: 8 | runs-on: ubuntu-latest 9 | permissions: 10 | issues: write 11 | pull-requests: write 12 | steps: 13 | - uses: actions/stale@28ca1036281a5e5922ead5184a1bbf96e5fc984e # v9 14 | with: 15 | stale-issue-message: "This issue has been tagged as awaiting-changes or awaiting-feedback by an nf-core contributor. Remove stale label or add a comment otherwise this issue will be closed in 20 days." 16 | stale-pr-message: "This PR has been tagged as awaiting-changes or awaiting-feedback by an nf-core contributor. Remove stale label or add a comment if it is still useful." 17 | close-issue-message: "This issue was closed because it has been tagged as awaiting-changes or awaiting-feedback by an nf-core contributor and then staled for 20 days with no activity." 18 | days-before-stale: 30 19 | days-before-close: 20 20 | days-before-pr-close: -1 21 | any-of-labels: "awaiting-changes,awaiting-feedback" 22 | exempt-issue-labels: "WIP" 23 | exempt-pr-labels: "WIP" 24 | repo-token: "${{ secrets.GITHUB_TOKEN }}" 25 | -------------------------------------------------------------------------------- /.github/workflows/download_pipeline.yml: -------------------------------------------------------------------------------- 1 | name: Test successful pipeline download with 'nf-core pipelines download' 2 | 3 | # Run the workflow when: 4 | # - dispatched manually 5 | # - when a PR is opened or reopened to main/master branch 6 | # - the head branch of the pull request is updated, i.e. if fixes for a release are pushed last minute to dev. 7 | on: 8 | workflow_dispatch: 9 | inputs: 10 | testbranch: 11 | description: "The specific branch you wish to utilize for the test execution of nf-core pipelines download." 12 | required: true 13 | default: "dev" 14 | pull_request: 15 | types: 16 | - opened 17 | - edited 18 | - synchronize 19 | branches: 20 | - main 21 | - master 22 | pull_request_target: 23 | branches: 24 | - main 25 | - master 26 | 27 | env: 28 | NXF_ANSI_LOG: false 29 | 30 | jobs: 31 | configure: 32 | runs-on: ubuntu-latest 33 | outputs: 34 | REPO_LOWERCASE: ${{ steps.get_repo_properties.outputs.REPO_LOWERCASE }} 35 | REPOTITLE_LOWERCASE: ${{ steps.get_repo_properties.outputs.REPOTITLE_LOWERCASE }} 36 | REPO_BRANCH: ${{ steps.get_repo_properties.outputs.REPO_BRANCH }} 37 | steps: 38 | - name: Get the repository name and current branch 39 | id: get_repo_properties 40 | run: | 41 | echo "REPO_LOWERCASE=${GITHUB_REPOSITORY,,}" >> "$GITHUB_OUTPUT" 42 | echo "REPOTITLE_LOWERCASE=$(basename ${GITHUB_REPOSITORY,,})" >> "$GITHUB_OUTPUT" 43 | echo "REPO_BRANCH=${{ github.event.inputs.testbranch || 'dev' }}" >> "$GITHUB_OUTPUT" 44 | 45 | download: 46 | runs-on: ubuntu-latest 47 | needs: configure 48 | steps: 49 | - name: Install Nextflow 50 | uses: nf-core/setup-nextflow@v2 51 | 52 | - name: Disk space cleanup 53 | uses: jlumbroso/free-disk-space@54081f138730dfa15788a46383842cd2f914a1be # v1.3.1 54 | 55 | - uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b # v5 56 | with: 57 | python-version: "3.12" 58 | architecture: "x64" 59 | 60 | - name: Setup Apptainer 61 | uses: eWaterCycle/setup-apptainer@4bb22c52d4f63406c49e94c804632975787312b3 # v2.0.0 62 | with: 63 | apptainer-version: 1.3.4 64 | 65 | - name: Install dependencies 66 | run: | 67 | python -m pip install --upgrade pip 68 | pip install git+https://github.com/nf-core/tools.git@dev 69 | 70 | - name: Make a cache directory for the container images 71 | run: | 72 | mkdir -p ./singularity_container_images 73 | 74 | - name: Download the pipeline 75 | env: 76 | NXF_SINGULARITY_CACHEDIR: ./singularity_container_images 77 | run: | 78 | nf-core pipelines download ${{ needs.configure.outputs.REPO_LOWERCASE }} \ 79 | --revision ${{ needs.configure.outputs.REPO_BRANCH }} \ 80 | --outdir ./${{ needs.configure.outputs.REPOTITLE_LOWERCASE }} \ 81 | --compress "none" \ 82 | --container-system 'singularity' \ 83 | --container-library "quay.io" -l "docker.io" -l "community.wave.seqera.io/library/" \ 84 | --container-cache-utilisation 'amend' \ 85 | --download-configuration 'yes' 86 | 87 | - name: Inspect download 88 | run: tree ./${{ needs.configure.outputs.REPOTITLE_LOWERCASE }} 89 | 90 | - name: Inspect container images 91 | run: tree ./singularity_container_images | tee ./container_initial 92 | 93 | - name: Count the downloaded number of container images 94 | id: count_initial 95 | run: | 96 | image_count=$(ls -1 ./singularity_container_images | wc -l | xargs) 97 | echo "Initial container image count: $image_count" 98 | echo "IMAGE_COUNT_INITIAL=$image_count" >> "$GITHUB_OUTPUT" 99 | 100 | - name: Run the downloaded pipeline (stub) 101 | id: stub_run_pipeline 102 | continue-on-error: true 103 | env: 104 | NXF_SINGULARITY_CACHEDIR: ./singularity_container_images 105 | NXF_SINGULARITY_HOME_MOUNT: true 106 | run: nextflow run ./${{needs.configure.outputs.REPOTITLE_LOWERCASE }}/$( sed 's/\W/_/g' <<< ${{ needs.configure.outputs.REPO_BRANCH }}) -stub -profile test,singularity --outdir ./results 107 | - name: Run the downloaded pipeline (stub run not supported) 108 | id: run_pipeline 109 | if: ${{ steps.stub_run_pipeline.outcome == 'failure' }} 110 | env: 111 | NXF_SINGULARITY_CACHEDIR: ./singularity_container_images 112 | NXF_SINGULARITY_HOME_MOUNT: true 113 | run: nextflow run ./${{ needs.configure.outputs.REPOTITLE_LOWERCASE }}/$( sed 's/\W/_/g' <<< ${{ needs.configure.outputs.REPO_BRANCH }}) -profile test,singularity --outdir ./results 114 | 115 | - name: Count the downloaded number of container images 116 | id: count_afterwards 117 | run: | 118 | image_count=$(ls -1 ./singularity_container_images | wc -l | xargs) 119 | echo "Post-pipeline run container image count: $image_count" 120 | echo "IMAGE_COUNT_AFTER=$image_count" >> "$GITHUB_OUTPUT" 121 | 122 | - name: Compare container image counts 123 | run: | 124 | if [ "${{ steps.count_initial.outputs.IMAGE_COUNT_INITIAL }}" -ne "${{ steps.count_afterwards.outputs.IMAGE_COUNT_AFTER }}" ]; then 125 | initial_count=${{ steps.count_initial.outputs.IMAGE_COUNT_INITIAL }} 126 | final_count=${{ steps.count_afterwards.outputs.IMAGE_COUNT_AFTER }} 127 | difference=$((final_count - initial_count)) 128 | echo "$difference additional container images were \n downloaded at runtime . The pipeline has no support for offline runs!" 129 | tree ./singularity_container_images > ./container_afterwards 130 | diff ./container_initial ./container_afterwards 131 | exit 1 132 | else 133 | echo "The pipeline can be downloaded successfully!" 134 | fi 135 | -------------------------------------------------------------------------------- /.github/workflows/fix-linting.yml: -------------------------------------------------------------------------------- 1 | name: Fix linting from a comment 2 | on: 3 | issue_comment: 4 | types: [created] 5 | 6 | jobs: 7 | fix-linting: 8 | # Only run if comment is on a PR with the main repo, and if it contains the magic keywords 9 | if: > 10 | contains(github.event.comment.html_url, '/pull/') && 11 | contains(github.event.comment.body, '@nf-core-bot fix linting') && 12 | github.repository == 'nf-core/molkart' 13 | runs-on: ubuntu-latest 14 | steps: 15 | # Use the @nf-core-bot token to check out so we can push later 16 | - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 17 | with: 18 | token: ${{ secrets.nf_core_bot_auth_token }} 19 | 20 | # indication that the linting is being fixed 21 | - name: React on comment 22 | uses: peter-evans/create-or-update-comment@71345be0265236311c031f5c7866368bd1eff043 # v4 23 | with: 24 | comment-id: ${{ github.event.comment.id }} 25 | reactions: eyes 26 | 27 | # Action runs on the issue comment, so we don't get the PR by default 28 | # Use the gh cli to check out the PR 29 | - name: Checkout Pull Request 30 | run: gh pr checkout ${{ github.event.issue.number }} 31 | env: 32 | GITHUB_TOKEN: ${{ secrets.nf_core_bot_auth_token }} 33 | 34 | # Install and run pre-commit 35 | - uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b # v5 36 | with: 37 | python-version: "3.12" 38 | 39 | - name: Install pre-commit 40 | run: pip install pre-commit 41 | 42 | - name: Run pre-commit 43 | id: pre-commit 44 | run: pre-commit run --all-files 45 | continue-on-error: true 46 | 47 | # indication that the linting has finished 48 | - name: react if linting finished succesfully 49 | if: steps.pre-commit.outcome == 'success' 50 | uses: peter-evans/create-or-update-comment@71345be0265236311c031f5c7866368bd1eff043 # v4 51 | with: 52 | comment-id: ${{ github.event.comment.id }} 53 | reactions: "+1" 54 | 55 | - name: Commit & push changes 56 | id: commit-and-push 57 | if: steps.pre-commit.outcome == 'failure' 58 | run: | 59 | git config user.email "core@nf-co.re" 60 | git config user.name "nf-core-bot" 61 | git config push.default upstream 62 | git add . 63 | git status 64 | git commit -m "[automated] Fix code linting" 65 | git push 66 | 67 | - name: react if linting errors were fixed 68 | id: react-if-fixed 69 | if: steps.commit-and-push.outcome == 'success' 70 | uses: peter-evans/create-or-update-comment@71345be0265236311c031f5c7866368bd1eff043 # v4 71 | with: 72 | comment-id: ${{ github.event.comment.id }} 73 | reactions: hooray 74 | 75 | - name: react if linting errors were not fixed 76 | if: steps.commit-and-push.outcome == 'failure' 77 | uses: peter-evans/create-or-update-comment@71345be0265236311c031f5c7866368bd1eff043 # v4 78 | with: 79 | comment-id: ${{ github.event.comment.id }} 80 | reactions: confused 81 | 82 | - name: react if linting errors were not fixed 83 | if: steps.commit-and-push.outcome == 'failure' 84 | uses: peter-evans/create-or-update-comment@71345be0265236311c031f5c7866368bd1eff043 # v4 85 | with: 86 | issue-number: ${{ github.event.issue.number }} 87 | body: | 88 | @${{ github.actor }} I tried to fix the linting errors, but it didn't work. Please fix them manually. 89 | See [CI log](https://github.com/nf-core/molkart/actions/runs/${{ github.run_id }}) for more details. 90 | -------------------------------------------------------------------------------- /.github/workflows/linting.yml: -------------------------------------------------------------------------------- 1 | name: nf-core linting 2 | # This workflow is triggered on pushes and PRs to the repository. 3 | # It runs the `nf-core pipelines lint` and markdown lint tests to ensure 4 | # that the code meets the nf-core guidelines. 5 | on: 6 | push: 7 | branches: 8 | - dev 9 | pull_request: 10 | release: 11 | types: [published] 12 | 13 | jobs: 14 | pre-commit: 15 | runs-on: ubuntu-latest 16 | steps: 17 | - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 18 | 19 | - name: Set up Python 3.12 20 | uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b # v5 21 | with: 22 | python-version: "3.12" 23 | 24 | - name: Install pre-commit 25 | run: pip install pre-commit 26 | 27 | - name: Run pre-commit 28 | run: pre-commit run --all-files 29 | 30 | nf-core: 31 | runs-on: ubuntu-latest 32 | steps: 33 | - name: Check out pipeline code 34 | uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 35 | 36 | - name: Install Nextflow 37 | uses: nf-core/setup-nextflow@v2 38 | 39 | - uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b # v5 40 | with: 41 | python-version: "3.12" 42 | architecture: "x64" 43 | 44 | - name: read .nf-core.yml 45 | uses: pietrobolcato/action-read-yaml@1.1.0 46 | id: read_yml 47 | with: 48 | config: ${{ github.workspace }}/.nf-core.yml 49 | 50 | - name: Install dependencies 51 | run: | 52 | python -m pip install --upgrade pip 53 | pip install nf-core==${{ steps.read_yml.outputs['nf_core_version'] }} 54 | 55 | - name: Run nf-core pipelines lint 56 | if: ${{ github.base_ref != 'master' }} 57 | env: 58 | GITHUB_COMMENTS_URL: ${{ github.event.pull_request.comments_url }} 59 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 60 | GITHUB_PR_COMMIT: ${{ github.event.pull_request.head.sha }} 61 | run: nf-core -l lint_log.txt pipelines lint --dir ${GITHUB_WORKSPACE} --markdown lint_results.md 62 | 63 | - name: Run nf-core pipelines lint --release 64 | if: ${{ github.base_ref == 'master' }} 65 | env: 66 | GITHUB_COMMENTS_URL: ${{ github.event.pull_request.comments_url }} 67 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 68 | GITHUB_PR_COMMIT: ${{ github.event.pull_request.head.sha }} 69 | run: nf-core -l lint_log.txt pipelines lint --release --dir ${GITHUB_WORKSPACE} --markdown lint_results.md 70 | 71 | - name: Save PR number 72 | if: ${{ always() }} 73 | run: echo ${{ github.event.pull_request.number }} > PR_number.txt 74 | 75 | - name: Upload linting log file artifact 76 | if: ${{ always() }} 77 | uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4 78 | with: 79 | name: linting-logs 80 | path: | 81 | lint_log.txt 82 | lint_results.md 83 | PR_number.txt 84 | -------------------------------------------------------------------------------- /.github/workflows/linting_comment.yml: -------------------------------------------------------------------------------- 1 | name: nf-core linting comment 2 | # This workflow is triggered after the linting action is complete 3 | # It posts an automated comment to the PR, even if the PR is coming from a fork 4 | 5 | on: 6 | workflow_run: 7 | workflows: ["nf-core linting"] 8 | 9 | jobs: 10 | test: 11 | runs-on: ubuntu-latest 12 | steps: 13 | - name: Download lint results 14 | uses: dawidd6/action-download-artifact@20319c5641d495c8a52e688b7dc5fada6c3a9fbc # v8 15 | with: 16 | workflow: linting.yml 17 | workflow_conclusion: completed 18 | 19 | - name: Get PR number 20 | id: pr_number 21 | run: echo "pr_number=$(cat linting-logs/PR_number.txt)" >> $GITHUB_OUTPUT 22 | 23 | - name: Post PR comment 24 | uses: marocchino/sticky-pull-request-comment@331f8f5b4215f0445d3c07b4967662a32a2d3e31 # v2 25 | with: 26 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 27 | number: ${{ steps.pr_number.outputs.pr_number }} 28 | path: linting-logs/lint_results.md 29 | -------------------------------------------------------------------------------- /.github/workflows/release-announcements.yml: -------------------------------------------------------------------------------- 1 | name: release-announcements 2 | # Automatic release toot and tweet anouncements 3 | on: 4 | release: 5 | types: [published] 6 | workflow_dispatch: 7 | 8 | jobs: 9 | toot: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - name: get topics and convert to hashtags 13 | id: get_topics 14 | run: | 15 | echo "topics=$(curl -s https://nf-co.re/pipelines.json | jq -r '.remote_workflows[] | select(.full_name == "${{ github.repository }}") | .topics[]' | awk '{print "#"$0}' | tr '\n' ' ')" | sed 's/-//g' >> $GITHUB_OUTPUT 16 | 17 | - uses: rzr/fediverse-action@master 18 | with: 19 | access-token: ${{ secrets.MASTODON_ACCESS_TOKEN }} 20 | host: "mstdn.science" # custom host if not "mastodon.social" (default) 21 | # GitHub event payload 22 | # https://docs.github.com/en/developers/webhooks-and-events/webhooks/webhook-events-and-payloads#release 23 | message: | 24 | Pipeline release! ${{ github.repository }} v${{ github.event.release.tag_name }} - ${{ github.event.release.name }}! 25 | 26 | Please see the changelog: ${{ github.event.release.html_url }} 27 | 28 | ${{ steps.get_topics.outputs.topics }} #nfcore #openscience #nextflow #bioinformatics 29 | 30 | bsky-post: 31 | runs-on: ubuntu-latest 32 | steps: 33 | - uses: zentered/bluesky-post-action@80dbe0a7697de18c15ad22f4619919ceb5ccf597 # v0.1.0 34 | with: 35 | post: | 36 | Pipeline release! ${{ github.repository }} v${{ github.event.release.tag_name }} - ${{ github.event.release.name }}! 37 | 38 | Please see the changelog: ${{ github.event.release.html_url }} 39 | env: 40 | BSKY_IDENTIFIER: ${{ secrets.BSKY_IDENTIFIER }} 41 | BSKY_PASSWORD: ${{ secrets.BSKY_PASSWORD }} 42 | # 43 | -------------------------------------------------------------------------------- /.github/workflows/template_version_comment.yml: -------------------------------------------------------------------------------- 1 | name: nf-core template version comment 2 | # This workflow is triggered on PRs to check if the pipeline template version matches the latest nf-core version. 3 | # It posts a comment to the PR, even if it comes from a fork. 4 | 5 | on: pull_request_target 6 | 7 | jobs: 8 | template_version: 9 | runs-on: ubuntu-latest 10 | steps: 11 | - name: Check out pipeline code 12 | uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 13 | with: 14 | ref: ${{ github.event.pull_request.head.sha }} 15 | 16 | - name: Read template version from .nf-core.yml 17 | uses: nichmor/minimal-read-yaml@v0.0.2 18 | id: read_yml 19 | with: 20 | config: ${{ github.workspace }}/.nf-core.yml 21 | 22 | - name: Install nf-core 23 | run: | 24 | python -m pip install --upgrade pip 25 | pip install nf-core==${{ steps.read_yml.outputs['nf_core_version'] }} 26 | 27 | - name: Check nf-core outdated 28 | id: nf_core_outdated 29 | run: echo "OUTPUT=$(pip list --outdated | grep nf-core)" >> ${GITHUB_ENV} 30 | 31 | - name: Post nf-core template version comment 32 | uses: mshick/add-pr-comment@b8f338c590a895d50bcbfa6c5859251edc8952fc # v2 33 | if: | 34 | contains(env.OUTPUT, 'nf-core') 35 | with: 36 | repo-token: ${{ secrets.NF_CORE_BOT_AUTH_TOKEN }} 37 | allow-repeats: false 38 | message: | 39 | > [!WARNING] 40 | > Newer version of the nf-core template is available. 41 | > 42 | > Your pipeline is using an old version of the nf-core template: ${{ steps.read_yml.outputs['nf_core_version'] }}. 43 | > Please update your pipeline to the latest version. 44 | > 45 | > For more documentation on how to update your pipeline, please see the [nf-core documentation](https://github.com/nf-core/tools?tab=readme-ov-file#sync-a-pipeline-with-the-template) and [Synchronisation documentation](https://nf-co.re/docs/contributing/sync). 46 | # 47 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .nextflow* 2 | work/ 3 | data/ 4 | results/ 5 | .DS_Store 6 | testing/ 7 | testing* 8 | *.pyc 9 | .nf-test.log 10 | .nf-test/ 11 | -------------------------------------------------------------------------------- /.gitpod.yml: -------------------------------------------------------------------------------- 1 | image: nfcore/gitpod:latest 2 | tasks: 3 | - name: Update Nextflow and setup pre-commit 4 | command: | 5 | pre-commit install --install-hooks 6 | nextflow self-update 7 | 8 | vscode: 9 | extensions: 10 | - nf-core.nf-core-extensionpack # https://github.com/nf-core/vscode-extensionpack 11 | -------------------------------------------------------------------------------- /.nf-core.yml: -------------------------------------------------------------------------------- 1 | lint: 2 | files_unchanged: 3 | - .gitignore 4 | - assets/nf-core-molkart_logo_light.png 5 | - docs/images/nf-core-molkart_logo_dark.png 6 | - docs/images/nf-core-molkart_logo_light.png 7 | files_exist: 8 | - conf/igenomes_ignored.config 9 | nf_core_version: 3.2.0 10 | repository_type: pipeline 11 | template: 12 | author: "@kbestak, @FloWuenne" 13 | description: An analysis pipeline for Molecular Cartography data from Resolve Biosciences. 14 | force: false 15 | is_nfcore: true 16 | name: molkart 17 | org: nf-core 18 | outdir: . 19 | version: 1.1.0 20 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/pre-commit/mirrors-prettier 3 | rev: "v3.1.0" 4 | hooks: 5 | - id: prettier 6 | additional_dependencies: 7 | - prettier@3.2.5 8 | 9 | - repo: https://github.com/editorconfig-checker/editorconfig-checker.python 10 | rev: "3.1.2" 11 | hooks: 12 | - id: editorconfig-checker 13 | alias: ec 14 | -------------------------------------------------------------------------------- /.prettierignore: -------------------------------------------------------------------------------- 1 | email_template.html 2 | adaptivecard.json 3 | slackreport.json 4 | .nextflow* 5 | work/ 6 | data/ 7 | results/ 8 | .DS_Store 9 | testing/ 10 | testing* 11 | *.pyc 12 | bin/ 13 | ro-crate-metadata.json 14 | -------------------------------------------------------------------------------- /.prettierrc.yml: -------------------------------------------------------------------------------- 1 | printWidth: 120 2 | -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "markdown.styles": ["public/vscode_markdown.css"] 3 | } 4 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # nf-core/molkart: Changelog 2 | 3 | The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) 4 | and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). 5 | 6 | ## 1.1.0 - Resolution Road 7 | 8 | ### Added 9 | 10 | - [PR #78](https://github.com/nf-core/molkart/pull/78) - Allow for Mindagap to be skipped using `skip_mindagap` parameter (@kbestak) 11 | - [PR #81](https://github.com/nf-core/molkart/pull/81) - Add Stardist as a segmentation method (@kbestak) 12 | 13 | ### Changed 14 | 15 | - [PR #71](https://github.com/nf-core/molkart/pull/71), [PR #88](https://github.com/nf-core/molkart/pull/88), [PR #94](https://github.com/nf-core/molkart/pull/94) - template updates from 2.11.1 to 3.2.0 (@kbestak) 16 | - [PR #98](https://github.com/nf-core/molkart/pull/98) - Update all nf-core modules (@FloWuenne) 17 | - [PR #99](https://github.com/nf-core/molkart/pull/99) - Clean up code to adhere to language server standards (@kbestak) 18 | - [PR #100](https://github.com/nf-core/molkart/pull/100) - Added author and license information to all bin scripts (@FloWuenne) 19 | - [PR #101](https://github.com/nf-core/molkart/pull/101) - Updated manifest and author information (@FloWuenne) 20 | - [PR #102](https://github.com/nf-core/molkart/pull/102) - Updated documentation (@kbestak) 21 | - [PR #107](https://github.com/nf-core/molkart/pull/107) - Updated nf-tests to use nft-utils (@FloWuenne) 22 | 23 | ### Fixed 24 | 25 | - [PR #76](https://github.com/nf-core/molkart/pull/76) - Fix issue with custom content in MultiQC output (@kbestak) 26 | 27 | ### Dependencies 28 | 29 | | Tool | Previous version | New version | 30 | | -------- | ---------------- | ----------- | 31 | | Cellpose | 2.2.2 | 3.0.1 | 32 | | Stardist | | 0.9.1 | 33 | | MultiQC | 1.19 | 1.27 | 34 | 35 | ## 1.0.0 - Spatial Circuit 36 | 37 | First release of nf-core/molkart. 38 | -------------------------------------------------------------------------------- /CITATIONS.md: -------------------------------------------------------------------------------- 1 | # nf-core/molkart: Citations 2 | 3 | ## [nf-core](https://pubmed.ncbi.nlm.nih.gov/32055031/) 4 | 5 | > Ewels PA, Peltzer A, Fillinger S, Patel H, Alneberg J, Wilm A, Garcia MU, Di Tommaso P, Nahnsen S. The nf-core framework for community-curated bioinformatics pipelines. Nat Biotechnol. 2020 Mar;38(3):276-278. doi: 10.1038/s41587-020-0439-x. PubMed PMID: 32055031. 6 | 7 | ## [Nextflow](https://pubmed.ncbi.nlm.nih.gov/28398311/) 8 | 9 | > Di Tommaso P, Chatzou M, Floden EW, Barja PP, Palumbo E, Notredame C. Nextflow enables reproducible computational workflows. Nat Biotechnol. 2017 Apr 11;35(4):316-319. doi: 10.1038/nbt.3820. PubMed PMID: 28398311. 10 | 11 | ## Pipeline tools 12 | 13 | - [anndata](https://anndata.readthedocs.io/en/latest/) 14 | 15 | > Isaac Virshup, Sergei Rybakov, Fabian J. Theis, Philipp Angerer, F. Alexander Wolf anndata: Annotated data > bioRxiv 2021.12.16.473007; doi: https://doi.org/10.1101/2021.12.16.473007 16 | 17 | - [Cellpose](https://www.cellpose.org/) 18 | 19 | > Stringer, C., Wang, T., Michaelos, M. et al. Cellpose: a generalist algorithm for cellular segmentation. Nat Methods 18, 100–106 (2021). https://doi.org/10.1038/s41592-020-01018-x 20 | > Pachitariu, M., Stringer, C. Cellpose 2.0: how to train your own model. Nat Methods 19, 1634–1641 (2022). https://doi.org/10.1038/s41592-022-01663-4 21 | 22 | - [ilastik](https://www.ilastik.org/) 23 | 24 | > Berg, S., Kutra, D., Kroeger, T. et al. ilastik: interactive machine learning for (bio)image analysis. Nat Methods 16, 1226–1232 (2019). https://doi.org/10.1038/s41592-019-0582-9 25 | 26 | - [Mesmer](https://deepcell.readthedocs.io/en/master/API/deepcell.applications.html) 27 | 28 | > Greenwald NF, Miller G, Moen E, Kong A, Kagel A, Dougherty T, Fullaway CC, McIntosh BJ, Leow KX, Schwartz MS, Pavelchek C, Cui S, Camplisson I, Bar-Tal O, Singh J, Fong M, Chaudhry G, Abraham Z, Moseley J, Warshawsky S, Soon E, Greenbaum S, Risom T, Hollmann T, Bendall SC, Keren L, Graf W, Angelo M, Van Valen D. Whole-cell segmentation of tissue images with human-level performance using large-scale data annotation and deep learning. Nat Biotechnol. 2022 Apr;40(4):555-565. doi: 10.1038/s41587-021-01094-0. Epub 2021 Nov 18. PMID: 34795433; PMCID: PMC9010346. 29 | 30 | - [Mindagap](https://github.com/ViriatoII/MindaGap) 31 | 32 | > Ricardo Guerreiro, Florian Wuennemann, & pvtodorov. (2023). ViriatoII/MindaGap: v0.0.3 (0.0.3). Zenodo. https://doi.org/10.5281/zenodo.8120559 33 | 34 | - [MultiQC](https://pubmed.ncbi.nlm.nih.gov/27312411/) 35 | 36 | > Ewels P, Magnusson M, Lundin S, Käller M. MultiQC: summarize analysis results for multiple tools and samples in a single report. Bioinformatics. 2016 Oct 1;32(19):3047-8. doi: 10.1093/bioinformatics/btw354. Epub 2016 Jun 16. PubMed PMID: 27312411; PubMed Central PMCID: PMC5039924. 37 | 38 | - [Stardist](https://github.com/stardist/stardist) 39 | 40 | > Schmidt, U., Weigert, M., Broaddus, C., Myers, G. (2018). Cell Detection with Star-Convex Polygons. In: Frangi, A., Schnabel, J., Davatzikos, C., Alberola-López, C., Fichtinger, G. (eds) Medical Image Computing and Computer Assisted Intervention – MICCAI 2018. MICCAI 2018. Lecture Notes in Computer Science(), vol 11071. Springer, Cham. https://doi.org/10.1007/978-3-030-00934-2_30 41 | 42 | ## Software packaging/containerisation tools 43 | 44 | - [Anaconda](https://anaconda.com) 45 | 46 | > Anaconda Software Distribution. Computer software. Vers. 2-2.4.0. Anaconda, Nov. 2016. Web. 47 | 48 | - [Bioconda](https://pubmed.ncbi.nlm.nih.gov/29967506/) 49 | 50 | > Grüning B, Dale R, Sjödin A, Chapman BA, Rowe J, Tomkins-Tinch CH, Valieris R, Köster J; Bioconda Team. Bioconda: sustainable and comprehensive software distribution for the life sciences. Nat Methods. 2018 Jul;15(7):475-476. doi: 10.1038/s41592-018-0046-7. PubMed PMID: 29967506. 51 | 52 | - [BioContainers](https://pubmed.ncbi.nlm.nih.gov/28379341/) 53 | 54 | > da Veiga Leprevost F, Grüning B, Aflitos SA, Röst HL, Uszkoreit J, Barsnes H, Vaudel M, Moreno P, Gatto L, Weber J, Bai M, Jimenez RC, Sachsenberg T, Pfeuffer J, Alvarez RV, Griss J, Nesvizhskii AI, Perez-Riverol Y. BioContainers: an open-source and community-driven framework for software standardization. Bioinformatics. 2017 Aug 15;33(16):2580-2582. doi: 10.1093/bioinformatics/btx192. PubMed PMID: 28379341; PubMed Central PMCID: PMC5870671. 55 | 56 | - [Docker](https://dl.acm.org/doi/10.5555/2600239.2600241) 57 | 58 | > Merkel, D. (2014). Docker: lightweight linux containers for consistent development and deployment. Linux Journal, 2014(239), 2. doi: 10.5555/2600239.2600241. 59 | 60 | - [Singularity](https://pubmed.ncbi.nlm.nih.gov/28494014/) 61 | 62 | > Kurtzer GM, Sochat V, Bauer MW. Singularity: Scientific containers for mobility of compute. PLoS One. 2017 May 11;12(5):e0177459. doi: 10.1371/journal.pone.0177459. eCollection 2017. PubMed PMID: 28494014; PubMed Central PMCID: PMC5426675. 63 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) The nf-core/molkart team 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 |

2 | 3 | 4 | nf-core/molkart 5 | 6 |

7 | 8 | [![GitHub Actions CI Status](https://github.com/nf-core/molkart/actions/workflows/ci.yml/badge.svg)](https://github.com/nf-core/molkart/actions/workflows/ci.yml) 9 | [![GitHub Actions Linting Status](https://github.com/nf-core/molkart/actions/workflows/linting.yml/badge.svg)](https://github.com/nf-core/molkart/actions/workflows/linting.yml)[![AWS CI](https://img.shields.io/badge/CI%20tests-full%20size-FF9900?labelColor=000000&logo=Amazon%20AWS)](https://nf-co.re/molkart/results)[![Cite with Zenodo](http://img.shields.io/badge/DOI-10.5281/zenodo.10650748-1073c8?labelColor=000000)](https://doi.org/10.5281/zenodo.10650748) 10 | [![nf-test](https://img.shields.io/badge/unit_tests-nf--test-337ab7.svg)](https://www.nf-test.com) 11 | 12 | [![Nextflow](https://img.shields.io/badge/nextflow%20DSL2-%E2%89%A524.04.2-23aa62.svg)](https://www.nextflow.io/) 13 | [![run with docker](https://img.shields.io/badge/run%20with-docker-0db7ed?labelColor=000000&logo=docker)](https://www.docker.com/) 14 | [![run with singularity](https://img.shields.io/badge/run%20with-singularity-1d355c.svg?labelColor=000000)](https://sylabs.io/docs/) 15 | [![Launch on Seqera Platform](https://img.shields.io/badge/Launch%20%F0%9F%9A%80-Seqera%20Platform-%234256e7)](https://cloud.seqera.io/launch?pipeline=https://github.com/nf-core/molkart) 16 | 17 | [![Get help on Slack](http://img.shields.io/badge/slack-nf--core%20%23molkart-4A154B?labelColor=000000&logo=slack)](https://nfcore.slack.com/channels/molkart)[![Follow on Twitter](http://img.shields.io/badge/twitter-%40nf__core-1DA1F2?labelColor=000000&logo=twitter)](https://twitter.com/nf_core)[![Follow on Mastodon](https://img.shields.io/badge/mastodon-nf__core-6364ff?labelColor=FFFFFF&logo=mastodon)](https://mstdn.science/@nf_core)[![Watch on YouTube](http://img.shields.io/badge/youtube-nf--core-FF0000?labelColor=000000&logo=youtube)](https://www.youtube.com/c/nf-core) 18 | 19 | ## Introduction 20 | 21 | **nf-core/molkart** is a pipeline for processing Molecular Cartography data from Resolve Bioscience (combinatorial FISH). It takes as input a table of FISH spot positions (x,y,z,gene), a corresponding DAPI image (`TIFF` format) and optionally an additional staining image in the `TIFF` format. nf-core/molkart performs end-to-end processing of the data including image processing, QC filtering of spots, cell segmentation, spot-to-cell assignment and reports quality metrics such as the spot assignment rate, average spots per cell and segmentation mask size ranges. 22 | 23 |

24 | 25 |

26 | 27 | Image preprocessing 28 | 29 | - Fill the grid pattern in provided images ([`Mindagap`](https://github.com/ViriatoII/MindaGap)) 30 | - Optionally apply contrast-limited adaptive histogram equalization 31 | - If a second (membrane) image is present, combine images into a multichannel stack (if required for segmentation) 32 | 33 | Cell segmentation 34 | 35 | - Apply cell segmentation based on provided images, available options are: - [`Cellpose`](https://www.cellpose.org/) - [`Mesmer`](https://deepcell.readthedocs.io/en/master/API/deepcell.applications.html#mesmer) - [`ilastik`](https://www.ilastik.org/) - [`Stardist`](https://github.com/stardist/stardist) 36 | - Filter cells based on cell size to remove artifacts 37 | 38 | Spot processing 39 | 40 | - Find duplicated spots near grid lines ([`Mindagap`](https://github.com/ViriatoII/MindaGap)) 41 | - Assign spots to segmented cells 42 | 43 | Quality control 44 | 45 | - Create quality-control metrics specific to this pipeline 46 | - provide them to ([`MultiQC`](http://multiqc.info/)) to create a report 47 | 48 | ## Usage 49 | 50 | :::note 51 | If you are new to Nextflow and nf-core, please refer to [this page](https://nf-co.re/docs/usage/installation) on how 52 | to set-up Nextflow. Make sure to [test your setup](https://nf-co.re/docs/usage/introduction#how-to-run-a-pipeline) 53 | with `-profile test` before running the workflow on actual data. 54 | ::: 55 | 56 | First, prepare a samplesheet with your input data that looks as follows: 57 | 58 | `samplesheet.csv`: 59 | 60 | ```csv 61 | sample,nuclear_image,spot_locations,membrane_image 62 | sample0,sample0_DAPI.tiff,sample0_spots.txt,sample0_WGA.tiff 63 | ``` 64 | 65 | Each row represents an FOV (field-of-view). Columns represent the sample ID (all must be unique), the path to the respective nuclear image, the spot table, and optionally the path to the respective membrane image (or any additional image to improve segmentation). 66 | 67 | Now, you can run the pipeline using all default values with: 68 | 69 | ```bash 70 | nextflow run nf-core/molkart \ 71 | -profile \ 72 | --input samplesheet.csv \ 73 | --outdir 74 | ``` 75 | 76 | > [!WARNING] 77 | > Please provide pipeline parameters via the CLI or Nextflow `-params-file` option. Custom config files including those provided by the `-c` Nextflow option can be used to provide any configuration _**except for parameters**_; see [docs](https://nf-co.re/docs/usage/getting_started/configuration#custom-configuration-files). 78 | 79 | For more details and further functionality, please refer to the [usage documentation](https://nf-co.re/molkart/usage) and the [parameter documentation](https://nf-co.re/molkart/parameters). 80 | 81 | ## Pipeline output 82 | 83 | The pipeline outputs a matched cell-by-transcript table based on deduplicated spots and segmented cells, as well as preprocessing and segmentation intermediaries. 84 | To see the results of an example test run with a full size dataset refer to the [results](https://nf-co.re/molkart/results) tab on the nf-core website pipeline page. 85 | For more details about the output files and reports, please refer to the 86 | [output documentation](https://nf-co.re/molkart/output). 87 | 88 | ## Credits 89 | 90 | nf-core/molkart was originally written by @kbestak, @FloWuenne. 91 | 92 | We thank [Maxime U Garcia](https://github.com/maxulysse) for his assistance and support in the development of this pipeline. 93 | 94 | ## Contributions and Support 95 | 96 | If you would like to contribute to this pipeline, please see the [contributing guidelines](.github/CONTRIBUTING.md). 97 | 98 | For further information or help, don't hesitate to get in touch on the [Slack `#molkart` channel](https://nfcore.slack.com/channels/molkart) (you can join with [this invite](https://nf-co.re/join/slack)). 99 | 100 | ## Citations 101 | 102 | If you use nf-core/molkart for your analysis, please cite it using the following doi: [10.5281/zenodo.10650749](https://doi.org/10.5281/zenodo.10650749) 103 | 104 | An extensive list of references for the tools used by the pipeline can be found in the [`CITATIONS.md`](CITATIONS.md) file. 105 | 106 | You can cite the `nf-core` publication as follows: 107 | 108 | > **The nf-core framework for community-curated bioinformatics pipelines.** 109 | > 110 | > Philip Ewels, Alexander Peltzer, Sven Fillinger, Harshil Patel, Johannes Alneberg, Andreas Wilm, Maxime Ulysse Garcia, Paolo Di Tommaso & Sven Nahnsen. 111 | > 112 | > _Nat Biotechnol._ 2020 Feb 13. doi: [10.1038/s41587-020-0439-x](https://dx.doi.org/10.1038/s41587-020-0439-x). 113 | -------------------------------------------------------------------------------- /assets/adaptivecard.json: -------------------------------------------------------------------------------- 1 | { 2 | "type": "message", 3 | "attachments": [ 4 | { 5 | "contentType": "application/vnd.microsoft.card.adaptive", 6 | "contentUrl": null, 7 | "content": { 8 | "\$schema": "http://adaptivecards.io/schemas/adaptive-card.json", 9 | "msteams": { 10 | "width": "Full" 11 | }, 12 | "type": "AdaptiveCard", 13 | "version": "1.2", 14 | "body": [ 15 | { 16 | "type": "TextBlock", 17 | "size": "Large", 18 | "weight": "Bolder", 19 | "color": "<% if (success) { %>Good<% } else { %>Attention<%} %>", 20 | "text": "nf-core/molkart v${version} - ${runName}", 21 | "wrap": true 22 | }, 23 | { 24 | "type": "TextBlock", 25 | "spacing": "None", 26 | "text": "Completed at ${dateComplete} (duration: ${duration})", 27 | "isSubtle": true, 28 | "wrap": true 29 | }, 30 | { 31 | "type": "TextBlock", 32 | "text": "<% if (success) { %>Pipeline completed successfully!<% } else { %>Pipeline completed with errors. The full error message was: ${errorReport}.<% } %>", 33 | "wrap": true 34 | }, 35 | { 36 | "type": "TextBlock", 37 | "text": "The command used to launch the workflow was as follows:", 38 | "wrap": true 39 | }, 40 | { 41 | "type": "TextBlock", 42 | "text": "${commandLine}", 43 | "isSubtle": true, 44 | "wrap": true 45 | } 46 | ], 47 | "actions": [ 48 | { 49 | "type": "Action.ShowCard", 50 | "title": "Pipeline Configuration", 51 | "card": { 52 | "type": "AdaptiveCard", 53 | "\$schema": "http://adaptivecards.io/schemas/adaptive-card.json", 54 | "body": [ 55 | { 56 | "type": "FactSet", 57 | "facts": [<% out << summary.collect{ k,v -> "{\"title\": \"$k\", \"value\" : \"$v\"}"}.join(",\n") %> 58 | ] 59 | } 60 | ] 61 | } 62 | } 63 | ] 64 | } 65 | } 66 | ] 67 | } 68 | -------------------------------------------------------------------------------- /assets/email_template.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | nf-core/molkart Pipeline Report 9 | 10 | 11 |
12 | 13 | 14 | 15 |

nf-core/molkart ${version}

16 |

Run Name: $runName

17 | 18 | <% if (!success){ 19 | out << """ 20 |
21 |

nf-core/molkart execution completed unsuccessfully!

22 |

The exit status of the task that caused the workflow execution to fail was: $exitStatus.

23 |

The full error message was:

24 |
${errorReport}
25 |
26 | """ 27 | } else { 28 | out << """ 29 |
30 | nf-core/molkart execution completed successfully! 31 |
32 | """ 33 | } 34 | %> 35 | 36 |

The workflow was completed at $dateComplete (duration: $duration)

37 |

The command used to launch the workflow was as follows:

38 |
$commandLine
39 | 40 |

Pipeline Configuration:

41 | 42 | 43 | <% out << summary.collect{ k,v -> "" }.join("\n") %> 44 | 45 |
$k
$v
46 | 47 |

nf-core/molkart

48 |

https://github.com/nf-core/molkart

49 | 50 |
51 | 52 | 53 | 54 | -------------------------------------------------------------------------------- /assets/email_template.txt: -------------------------------------------------------------------------------- 1 | ---------------------------------------------------- 2 | ,--./,-. 3 | ___ __ __ __ ___ /,-._.--~\\ 4 | |\\ | |__ __ / ` / \\ |__) |__ } { 5 | | \\| | \\__, \\__/ | \\ |___ \\`-._,-`-, 6 | `._,._,' 7 | nf-core/molkart ${version} 8 | ---------------------------------------------------- 9 | Run Name: $runName 10 | 11 | <% if (success){ 12 | out << "## nf-core/molkart execution completed successfully! ##" 13 | } else { 14 | out << """#################################################### 15 | ## nf-core/molkart execution completed unsuccessfully! ## 16 | #################################################### 17 | The exit status of the task that caused the workflow execution to fail was: $exitStatus. 18 | The full error message was: 19 | 20 | ${errorReport} 21 | """ 22 | } %> 23 | 24 | 25 | The workflow was completed at $dateComplete (duration: $duration) 26 | 27 | The command used to launch the workflow was as follows: 28 | 29 | $commandLine 30 | 31 | 32 | 33 | Pipeline Configuration: 34 | ----------------------- 35 | <% out << summary.collect{ k,v -> " - $k: $v" }.join("\n") %> 36 | 37 | -- 38 | nf-core/molkart 39 | https://github.com/nf-core/molkart 40 | -------------------------------------------------------------------------------- /assets/methods_description_template.yml: -------------------------------------------------------------------------------- 1 | id: "nf-core-molkart-methods-description" 2 | description: "Suggested text and references to use when describing pipeline usage within the methods section of a publication." 3 | section_name: "nf-core/molkart Methods Description" 4 | section_href: "https://github.com/nf-core/molkart" 5 | plot_type: "html" 6 | ## You inject any metadata in the Nextflow '${workflow}' object 7 | data: | 8 |

Methods

9 |

Data was processed using nf-core/molkart v${workflow.manifest.version} ${doi_text} of the nf-core collection of workflows (Ewels et al., 2020), utilising reproducible software environments from the Bioconda (Grüning et al., 2018) and Biocontainers (da Veiga Leprevost et al., 2017) projects.

10 |

The pipeline was executed with Nextflow v${workflow.nextflow.version} (Di Tommaso et al., 2017) with the following command:

11 |
${workflow.commandLine}
12 |

${tool_citations}

13 |

References

14 |
    15 |
  • Di Tommaso, P., Chatzou, M., Floden, E. W., Barja, P. P., Palumbo, E., & Notredame, C. (2017). Nextflow enables reproducible computational workflows. Nature Biotechnology, 35(4), 316-319. doi: 10.1038/nbt.3820
  • 16 |
  • Ewels, P. A., Peltzer, A., Fillinger, S., Patel, H., Alneberg, J., Wilm, A., Garcia, M. U., Di Tommaso, P., & Nahnsen, S. (2020). The nf-core framework for community-curated bioinformatics pipelines. Nature Biotechnology, 38(3), 276-278. doi: 10.1038/s41587-020-0439-x
  • 17 |
  • Grüning, B., Dale, R., Sjödin, A., Chapman, B. A., Rowe, J., Tomkins-Tinch, C. H., Valieris, R., Köster, J., & Bioconda Team. (2018). Bioconda: sustainable and comprehensive software distribution for the life sciences. Nature Methods, 15(7), 475–476. doi: 10.1038/s41592-018-0046-7
  • 18 |
  • da Veiga Leprevost, F., Grüning, B. A., Alves Aflitos, S., Röst, H. L., Uszkoreit, J., Barsnes, H., Vaudel, M., Moreno, P., Gatto, L., Weber, J., Bai, M., Jimenez, R. C., Sachsenberg, T., Pfeuffer, J., Vera Alvarez, R., Griss, J., Nesvizhskii, A. I., & Perez-Riverol, Y. (2017). BioContainers: an open-source and community-driven framework for software standardization. Bioinformatics (Oxford, England), 33(16), 2580–2582. doi: 10.1093/bioinformatics/btx192
  • 19 | ${tool_bibliography} 20 |
21 |
22 |
Notes:
23 |
    24 | ${nodoi_text} 25 |
  • The command above does not include parameters contained in any configs or profiles that may have been used. Ensure the config file is also uploaded with your publication!
  • 26 |
  • You should also cite all software used within this run. Check the "Software Versions" of this report to get version information.
  • 27 |
28 |
29 | -------------------------------------------------------------------------------- /assets/multiqc_config.yml: -------------------------------------------------------------------------------- 1 | custom_logo_url: https://github.com/nf-core/molkart/ 2 | custom_logo_title: "nf-core/molkart" 3 | 4 | report_comment: > 5 | This report has been generated by the nf-core/molkart analysis pipeline. For information about how 7 | to interpret these results, please see the documentation. 9 | report_section_order: 10 | segmentation_stats: 11 | order: 800 12 | my_custom_content_image: 13 | order: 800 14 | "nf-core-molkart-methods-description": 15 | order: -1000 16 | software_versions: 17 | order: -1001 18 | "nf-core-molkart-summary": 19 | order: -1002 20 | 21 | export_plots: true 22 | 23 | run_module: 24 | - custom_content 25 | 26 | custom_data: 27 | my_custom_content_image: 28 | section_name: "Crop selection overview" 29 | segmentation_stats: 30 | file_format: "csv" 31 | plot_type: "table" 32 | section_name: "QC statistics from segmentation" 33 | headers: 34 | sample_id: 35 | title: sample_id 36 | Description: "Sample" 37 | segmentation_method: 38 | title: Segmentation method 39 | description: "Segmentation method" 40 | total_cells: 41 | title: Total Number of cells 42 | description: "Total # cells" 43 | avg_area: 44 | title: Average cell area 45 | description: "Average area per cell" 46 | total_spots: 47 | title: Total number of spots 48 | description: "Average number of spots assigned per cell" 49 | spot_assign_per_cell: 50 | title: Average spots assigned per cell 51 | description: "Average number of spots assigned per cell" 52 | spot_assign_total: 53 | title: Total spots assigned 54 | description: "Total number of spots assigned" 55 | spot_assign_percent: 56 | title: Percentage of spots assigned to cell 57 | description: "% of spots assigned to cells" 58 | duplicated_total: 59 | title: Total number of duplicated spots in the area 60 | description: "Total number of duplicated spots" 61 | labels_total: 62 | title: Total number of cells before filtering 63 | description: "Total number of segmented labels" 64 | labels_below_thresh: 65 | title: Number of removed small cells 66 | description: "Total number of labels below min_area" 67 | labels_above_thresh: 68 | title: Number of removed large cells 69 | description: "Total number of labels above max_area" 70 | sp: 71 | my_custom_content_image: 72 | fn: "*overview.png" 73 | segmentation_stats: 74 | fn: "*spot_QC.csv" 75 | shared: true 76 | ignore_images: false 77 | 78 | disable_version_detection: true 79 | -------------------------------------------------------------------------------- /assets/nf-core-molkart_logo_light.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nf-core/molkart/d33d298b06ad8b4463a635bf02bb9f4f351e724b/assets/nf-core-molkart_logo_light.png -------------------------------------------------------------------------------- /assets/samplesheet.csv: -------------------------------------------------------------------------------- 1 | sample,nuclear_image,spot_table 2 | sample_fov1,/path/to/dapi/file/sample_fov1.DAPI.tiff,/path/to/spots/file/sample_fov1.spots.txt 3 | sample_fov2,/path/to/dapi/file/sample_fov2.DAPI.tiff,/path/to/spots/file/sample_fov2.spots.txt 4 | -------------------------------------------------------------------------------- /assets/schema_input.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://json-schema.org/draft/2020-12/schema", 3 | "$id": "https://raw.githubusercontent.com/nf-core/molkart/master/assets/schema_input.json", 4 | "title": "nf-core/molkart pipeline - params.input schema", 5 | "description": "Schema for the file provided with params.input", 6 | "type": "array", 7 | "items": { 8 | "type": "object", 9 | "properties": { 10 | "sample": { 11 | "type": "string", 12 | "pattern": "^\\S+$", 13 | "errorMessage": "Sample name must be provided and cannot contain spaces" 14 | }, 15 | "nuclear_image": { 16 | "type": "string", 17 | "pattern": "^\\S+\\.(tif|tiff)$", 18 | "format": "file-path", 19 | "errorMessage": "Nuclear image must be provided, cannot contain spaces and must have extension '.tif' or '.tiff'" 20 | }, 21 | "spot_table": { 22 | "type": "string", 23 | "pattern": "^\\S+\\.(txt|tsv)$", 24 | "format": "file-path", 25 | "errorMessage": "Spot table must be provided, has to have shape x,y,z,gene with sep = '\t', cannot contain spaces and must have extension '.txt'" 26 | }, 27 | "membrane_image": { 28 | "errorMessage": "Membrane image is optional, and cannot contain spaces and must have extension '.tif' or '.tiff'", 29 | "anyOf": [ 30 | { 31 | "type": "string", 32 | "pattern": "^\\S+\\.(tif|tiff)$", 33 | "format": "file-path" 34 | }, 35 | { 36 | "type": "string", 37 | "maxLength": 0 38 | } 39 | ] 40 | } 41 | }, 42 | "required": ["sample", "nuclear_image", "spot_table"] 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /assets/sendmail_template.txt: -------------------------------------------------------------------------------- 1 | To: $email 2 | Subject: $subject 3 | Mime-Version: 1.0 4 | Content-Type: multipart/related;boundary="nfcoremimeboundary" 5 | 6 | --nfcoremimeboundary 7 | Content-Type: text/html; charset=utf-8 8 | 9 | $email_html 10 | 11 | --nfcoremimeboundary 12 | Content-Type: image/png;name="nf-core-molkart_logo.png" 13 | Content-Transfer-Encoding: base64 14 | Content-ID: 15 | Content-Disposition: inline; filename="nf-core-molkart_logo_light.png" 16 | 17 | <% out << new File("$projectDir/assets/nf-core-molkart_logo_light.png"). 18 | bytes. 19 | encodeBase64(). 20 | toString(). 21 | tokenize( '\n' )*. 22 | toList()*. 23 | collate( 76 )*. 24 | collect { it.join() }. 25 | flatten(). 26 | join( '\n' ) %> 27 | 28 | <% 29 | if (mqcFile){ 30 | def mqcFileObj = new File("$mqcFile") 31 | if (mqcFileObj.length() < mqcMaxSize){ 32 | out << """ 33 | --nfcoremimeboundary 34 | Content-Type: text/html; name=\"multiqc_report\" 35 | Content-Transfer-Encoding: base64 36 | Content-ID: 37 | Content-Disposition: attachment; filename=\"${mqcFileObj.getName()}\" 38 | 39 | ${mqcFileObj. 40 | bytes. 41 | encodeBase64(). 42 | toString(). 43 | tokenize( '\n' )*. 44 | toList()*. 45 | collate( 76 )*. 46 | collect { it.join() }. 47 | flatten(). 48 | join( '\n' )} 49 | """ 50 | }} 51 | %> 52 | 53 | --nfcoremimeboundary-- 54 | -------------------------------------------------------------------------------- /assets/slackreport.json: -------------------------------------------------------------------------------- 1 | { 2 | "attachments": [ 3 | { 4 | "fallback": "Plain-text summary of the attachment.", 5 | "color": "<% if (success) { %>good<% } else { %>danger<%} %>", 6 | "author_name": "nf-core/molkart ${version} - ${runName}", 7 | "author_icon": "https://www.nextflow.io/docs/latest/_static/favicon.ico", 8 | "text": "<% if (success) { %>Pipeline completed successfully!<% } else { %>Pipeline completed with errors<% } %>", 9 | "fields": [ 10 | { 11 | "title": "Command used to launch the workflow", 12 | "value": "```${commandLine}```", 13 | "short": false 14 | } 15 | <% 16 | if (!success) { %> 17 | , 18 | { 19 | "title": "Full error message", 20 | "value": "```${errorReport}```", 21 | "short": false 22 | }, 23 | { 24 | "title": "Pipeline configuration", 25 | "value": "<% out << summary.collect{ k,v -> k == "hook_url" ? "_${k}_: (_hidden_)" : ( ( v.class.toString().contains('Path') || ( v.class.toString().contains('String') && v.contains('/') ) ) ? "_${k}_: `${v}`" : (v.class.toString().contains('DateTime') ? ("_${k}_: " + v.format(java.time.format.DateTimeFormatter.ofLocalizedDateTime(java.time.format.FormatStyle.MEDIUM))) : "_${k}_: ${v}") ) }.join(",\n") %>", 26 | "short": false 27 | } 28 | <% } 29 | %> 30 | ], 31 | "footer": "Completed at <% out << dateComplete.format(java.time.format.DateTimeFormatter.ofLocalizedDateTime(java.time.format.FormatStyle.MEDIUM)) %> (duration: ${duration})" 32 | } 33 | ] 34 | } 35 | -------------------------------------------------------------------------------- /bin/collect_QC.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | #### Written by Kresimir Bestak and Florian Wuennemann and released under the MIT license. 3 | #### This script takes regionprops_tabe output from mcquant and the raw spot tables from Resolve bioscience as input 4 | #### and calculates some QC metrics for masks and spot assignments 5 | #### If png files are provided, it combines them into one 6 | 7 | import argparse 8 | import pandas as pd 9 | from PIL import Image, ImageDraw, ImageFont 10 | import os 11 | import numpy as np 12 | 13 | 14 | def combine_png_files(input_paths, output_path): 15 | print(input_paths) 16 | images = [] 17 | for file_path in input_paths: 18 | img = Image.open(file_path) 19 | image_name = os.path.basename(file_path).replace(".ome", "").replace(".crop", "_crop") 20 | draw = ImageDraw.Draw(img) 21 | font_size = 50 22 | font = ImageFont.load_default(font_size) 23 | draw.text((100, 50), image_name, fill="black", font=font) 24 | images.append(img) 25 | 26 | width, height = images[0].size 27 | combined_image = Image.new("RGB", (width, len(images) * height)) 28 | for i, img in enumerate(images): 29 | combined_image.paste(img, (0, i * height)) 30 | combined_image.save(os.path.join(output_path, "crop_overview.png")) 31 | 32 | 33 | def summarize_spots(spot_table): 34 | ## Calculate number of spots per gene 35 | tx_per_gene = spot_table.groupby("gene").count().reset_index() 36 | 37 | ## Calculate the total number of spots in spot_table 38 | total_spots = spot_table.shape[0] 39 | 40 | ## Get list of genes 41 | genes = spot_table["gene"].unique() 42 | 43 | return (tx_per_gene, total_spots, genes) 44 | 45 | 46 | def summarize_segmasks(cellxgene_table, spots_summary): 47 | ## Calculate the total number of cells (rows) in cellxgene_table 48 | total_cells = cellxgene_table.shape[0] 49 | 50 | ## Calculate the average segmentation area from column Area in cellxgene_table 51 | avg_area = round(cellxgene_table["Area"].mean(), 2) 52 | 53 | ## Calculate the % of spots assigned 54 | ## Subset cellxgene_table for all columns with _intensity_sum in the column name and sum the column values 55 | spot_assign = cellxgene_table[spots_summary[2]].sum(axis=1) 56 | spot_assign_total = int(sum(spot_assign)) 57 | spot_assign_per_cell = total_cells and spot_assign_total / total_cells or 0 58 | spot_assign_per_cell = round(spot_assign_per_cell, 2) 59 | # spot_assign_per_cell = spot_assign_total / total_cells 60 | spot_assign_percent = spot_assign_total / spots_summary[1] * 100 61 | spot_assign_percent = round(spot_assign_percent, 2) 62 | 63 | return (total_cells, avg_area, spot_assign_per_cell, spot_assign_total, spot_assign_percent) 64 | 65 | 66 | if __name__ == "__main__": 67 | # Write an argparse with input options cellxgene_table, spots and output options outdir, sample_id 68 | parser = argparse.ArgumentParser() 69 | parser.add_argument("-i", "--cellxgene", help="cellxgene regionprops_table.") 70 | parser.add_argument("-s", "--spots", help="Resolve biosciences spot table.") 71 | parser.add_argument("-o", "--outdir", help="Output directory.") 72 | parser.add_argument("-d", "--sample_id", help="Sample ID.") 73 | parser.add_argument("-g", "--segmentation_method", help="Segmentation method used.") 74 | parser.add_argument("--filterqc", required=False, help="QC from mask filter step") 75 | parser.add_argument("--png_overview", nargs="+", help="Crop overview image paths") 76 | parser.add_argument("--version", action="version", version="0.1.0") 77 | 78 | args = parser.parse_args() 79 | 80 | if args.png_overview != None: 81 | combine_png_files(args.png_overview, args.outdir) 82 | 83 | else: 84 | ## Read in cellxgene_table table 85 | cellxgene_table = pd.read_csv(args.cellxgene, sep=",") 86 | 87 | ## Read in spot table 88 | spots = pd.read_table(args.spots, sep="\t", names=["x", "y", "z", "gene"]) 89 | # below code had to be added to account for the spots.txt inputs if mindagap is skipped 90 | if (([val for val in spots.index.values] == [val for val in range(len(spots.index.values))]) == False): 91 | spots["gene"] = spots["z"] 92 | spots["z"] = spots["y"] 93 | spots["y"] = spots["x"] 94 | spots["x"] = spots.index 95 | spots.index = range(len(spots)) 96 | duplicated = sum(spots.gene.str.contains("Duplicated")) 97 | spots = spots[~spots.gene.str.contains("Duplicated")] 98 | 99 | ## Pass on filterqc values 100 | filterqc = pd.read_csv( 101 | args.filterqc, 102 | names=["below_min_area", "below_percentage", "above_max_area", "above_percentage", "total_labels"], 103 | header=None, 104 | ) 105 | 106 | ## Summarize spots table 107 | summary_spots = summarize_spots(spots) 108 | summary_segmentation = summarize_segmasks(cellxgene_table, summary_spots) 109 | 110 | ## Create pandas data frame with one row per parameter and write each value in summary_segmentation to a new row in the data frame 111 | summary_df = pd.DataFrame( 112 | columns=[ 113 | "sample_id", 114 | "segmentation_method", 115 | "total_cells", 116 | "avg_area", 117 | "total_spots", 118 | "spot_assign_per_cell", 119 | "spot_assign_total", 120 | "spot_assign_percent", 121 | "duplicated_total", 122 | "labels_total", 123 | "labels_below_thresh", 124 | "labels_above_thresh", 125 | ] 126 | ) 127 | summary_df.loc[0] = [ 128 | ##args.sample_id, 129 | args.sample_id + "_" + args.segmentation_method, 130 | args.segmentation_method, 131 | summary_segmentation[0], 132 | summary_segmentation[1], 133 | summary_spots[1], 134 | summary_segmentation[2], 135 | summary_segmentation[3], 136 | summary_segmentation[4], 137 | duplicated, 138 | filterqc.total_labels[1], 139 | filterqc.below_min_area[1], 140 | filterqc.above_max_area[1], 141 | ] 142 | print(args.sample_id) 143 | # Write summary_df to a csv file 144 | summary_df.to_csv( 145 | f"{args.outdir}/{args.sample_id}.{args.segmentation_method}.spot_QC.csv", header=True, index=False 146 | ) 147 | -------------------------------------------------------------------------------- /bin/create_anndata.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | ## Written by Kresimir Bestak and Florian Wuennemann and released under the MIT license. 3 | import pandas as pd 4 | import numpy as np 5 | from anndata import AnnData 6 | import argparse 7 | from argparse import ArgumentParser as AP 8 | from os.path import abspath 9 | import time 10 | from scipy.sparse import csr_matrix 11 | 12 | 13 | def get_args(): 14 | # Script description 15 | description = """Anndata object creation""" 16 | 17 | # Add parser 18 | parser = AP(description=description, formatter_class=argparse.RawDescriptionHelpFormatter) 19 | 20 | # Sections 21 | inputs = parser.add_argument_group(title="Required Input", description="Path to required input file") 22 | inputs.add_argument("-i", "--input", type=str, help="Path to the spot2cell csv file.") 23 | inputs.add_argument("-s", "--spatial_cols", nargs="+", help="Column names for location data.") 24 | inputs.add_argument( 25 | "-o", "--output", dest="output", action="store", required=True, help="Path to output anndata object." 26 | ) 27 | inputs.add_argument("--version", action="version", version="0.1.0") 28 | arg = parser.parse_args() 29 | arg.input = abspath(arg.input) 30 | arg.output = abspath(arg.output) 31 | return arg 32 | 33 | 34 | def create_spatial_anndata(input, spatial_cols): 35 | df = pd.read_csv(input) 36 | spatial_coords = np.array(df[args.spatial_cols].values.tolist()) 37 | # Find the index of 'Y_centroid' column 38 | y_centroid_index = df.columns.get_loc("X_centroid") 39 | # Create a list of all columns from 'Y_centroid' to the end 40 | metadata_cols = df.columns[y_centroid_index:] 41 | # Extract the excluded columns as metadata 42 | metadata = df[metadata_cols] 43 | 44 | count_table = csr_matrix(df.drop(list(metadata_cols), axis=1).values.tolist()) 45 | adata = AnnData(count_table, obsm={"spatial": spatial_coords}) 46 | # Add the metadata to adata.obs 47 | for col in metadata.columns: 48 | adata.obs[col] = metadata[col].values 49 | adata.obs_names = [f"Cell_{i:d}" for i in range(adata.n_obs)] 50 | return adata 51 | 52 | 53 | def main(args): 54 | adata = create_spatial_anndata(args.input, args.spatial_cols) 55 | adata.write(args.output) 56 | 57 | 58 | if __name__ == "__main__": 59 | args = get_args() 60 | st = time.time() 61 | main(args) 62 | rt = time.time() - st 63 | print(f"Script finished in {rt // 60:.0f}m {rt % 60:.0f}s") 64 | -------------------------------------------------------------------------------- /bin/crop_tiff.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | ## Written by Kresimir Bestak and Florian Wuennemann and released under the MIT license 3 | import ast 4 | import tifffile as tiff 5 | import os 6 | import argparse 7 | import matplotlib.pyplot as plt 8 | import numpy as np 9 | 10 | 11 | # Create a function to create crops from a tiff image and a dictionary of crop coordinates 12 | def create_crops(tiff_image, crop_dict): 13 | for index, (crop_name, crop) in enumerate(crop_dict.items()): 14 | crop_image = tiff_image[:, crop[0][0] : crop[0][1], crop[1][0] : crop[1][1]] 15 | basename = os.path.basename(args.input) 16 | basename = os.path.splitext(basename)[0] 17 | tiff.imsave(f"./{basename}_crop{index}.tiff", crop_image) 18 | ## Create a plot with all crop regions highlighted on the full image for easier selection 19 | # Create a maximum projection of the channels in tiff_image 20 | tiff_image_max = np.max(tiff_image, axis=0) 21 | plt.imshow(tiff_image_max, cmap="gray") 22 | plt.plot( 23 | [crop[1][0], crop[1][1], crop[1][1], crop[1][0], crop[1][0]], 24 | [crop[0][0], crop[0][0], crop[0][1], crop[0][1], crop[0][0]], 25 | "red", 26 | linewidth=1, 27 | ) 28 | plt.text( 29 | crop[1][0], crop[0][0], str(index), color="white" 30 | ) # make the text red and add a label to each box with index of the crop 31 | plt.savefig(f"{basename}.crop_overview.png", dpi=300) 32 | 33 | 34 | ## Run the script 35 | if __name__ == "__main__": 36 | # Add argument parser with arguments for input tiffile, crop_summary input file and output tiffile 37 | parser = argparse.ArgumentParser() 38 | parser.add_argument("-i", "--input", help="Input tiffile.") 39 | parser.add_argument("-c", "--crop_summary", help="Crop summary file.") 40 | parser.add_argument("--version", action="version", version="0.1.0") 41 | args = parser.parse_args() 42 | 43 | # reading the crop information from the file 44 | with open(args.crop_summary) as f: 45 | crops = f.read() 46 | # reconstructing the data as a dictionary 47 | crops = ast.literal_eval(crops) 48 | ## Read in tiff image 49 | tiff_image = tiff.imread(args.input) 50 | if len(tiff_image.shape) == 2: 51 | tiff_image = np.expand_dims(tiff_image, axis=0) 52 | 53 | create_crops(tiff_image, crops) 54 | -------------------------------------------------------------------------------- /bin/maskfilter.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | ## Written by Kresimir Bestak and Florian Wuennemann and released under the MIT license 3 | import time 4 | import argparse 5 | from argparse import ArgumentParser as AP 6 | from os.path import abspath 7 | import argparse 8 | import numpy as np 9 | from skimage.measure import label, regionprops 10 | from skimage.io import imread, imsave 11 | from os.path import abspath 12 | from argparse import ArgumentParser as AP 13 | import time 14 | import pandas as pd 15 | 16 | 17 | def get_args(): 18 | # Script description 19 | description = """Segmentation mask filtering""" 20 | 21 | # Add parser 22 | parser = AP(description=description, formatter_class=argparse.RawDescriptionHelpFormatter) 23 | 24 | # Sections 25 | inputs = parser.add_argument_group(title="Required Input", description="Path to required input file") 26 | inputs.add_argument("-r", "--input", dest="input", action="store", required=True, help="File path to input image.") 27 | inputs.add_argument("-o", "--output", dest="output", action="store", required=True, help="Path to output image.") 28 | inputs.add_argument( 29 | "--output_qc", dest="output_qc", action="store", required=False, help="Path to output qc csv file." 30 | ) 31 | inputs.add_argument( 32 | "--min_area", 33 | dest="min_area", 34 | action="store", 35 | type=int, 36 | default=None, 37 | help="Lower area (in px) threshold for cell removal", 38 | ) 39 | inputs.add_argument( 40 | "--max_area", 41 | dest="max_area", 42 | action="store", 43 | type=int, 44 | default=None, 45 | help="Upper area (in px) threshold for cell removal", 46 | ) 47 | inputs.add_argument("--version", action="version", version="0.1.0") 48 | arg = parser.parse_args() 49 | 50 | # Standardize paths 51 | arg.input = abspath(arg.input) 52 | arg.output = abspath(arg.output) 53 | if arg.output_qc is None: 54 | arg.output_qc = abspath(arg.output.replace(".tif", ".csv")) 55 | return arg 56 | 57 | 58 | def filter_areas(mask, min_area=None, max_area=None): 59 | labeled_mask = label(mask, background=0) 60 | measure_tmp = regionprops(labeled_mask) 61 | num_cells = len(measure_tmp) 62 | # Create a mapping between label and area 63 | label_area_map = {prop.label: prop.area for prop in measure_tmp} 64 | 65 | if min_area and max_area: 66 | small_valid_labels = np.array([label for label, area in label_area_map.items() if area >= min_area]) 67 | large_valid_labels = np.array([label for label, area in label_area_map.items() if area <= max_area]) 68 | valid_labels = np.intersect1d(small_valid_labels, large_valid_labels) 69 | retained_masks = np.isin(labeled_mask, valid_labels) * labeled_mask 70 | small_labels = num_cells - len(small_valid_labels) 71 | large_labels = num_cells - len(large_valid_labels) 72 | relabeled_mask = label(retained_masks, background=0) 73 | elif min_area: 74 | valid_labels = np.array([label for label, area in label_area_map.items() if area >= min_area]) 75 | retained_masks = np.isin(labeled_mask, valid_labels) * labeled_mask 76 | large_labels = 0 77 | small_labels = num_cells - len(valid_labels) 78 | relabeled_mask = label(retained_masks, background=0) 79 | elif max_area: 80 | valid_labels = np.array([label for label, area in label_area_map.items() if area <= max_area]) 81 | retained_masks = np.isin(labeled_mask, valid_labels) * labeled_mask 82 | large_labels = num_cells - len(valid_labels) 83 | small_labels = 0 84 | relabeled_mask = label(retained_masks, background=0) 85 | else: 86 | small_labels = 0 87 | large_labels = 0 88 | relabeled_mask = labeled_mask 89 | 90 | return relabeled_mask, small_labels, large_labels, num_cells 91 | 92 | 93 | def main(args): 94 | print(f"Head directory = {args.input}") 95 | 96 | # Example usage 97 | in_path = args.input 98 | output = args.output 99 | min_area = args.min_area 100 | max_area = args.max_area 101 | 102 | mask = imread(in_path) 103 | mask, small, big, total = filter_areas(mask, min_area=min_area, max_area=max_area) 104 | imsave(output, mask.astype("int32"), check_contrast=False) 105 | print(f"Filtered mask saved to {output}") 106 | 107 | qc_df = pd.DataFrame( 108 | { 109 | "below_min_area": [small], 110 | "below_percentage": [small / total], 111 | "above_max_area": [big], 112 | "above_percentage": [big / total], 113 | "total_labels": [total], 114 | }, 115 | index=None, 116 | ) 117 | qc_df.to_csv(output.replace(".tif", ".csv"), index=False) 118 | print() 119 | 120 | 121 | if __name__ == "__main__": 122 | # Read in arguments 123 | args = get_args() 124 | 125 | # Run script 126 | st = time.time() 127 | main(args) 128 | rt = time.time() - st 129 | print(f"Script finished in {rt // 60:.0f}m {rt % 60:.0f}s") 130 | -------------------------------------------------------------------------------- /bin/spot2cell.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | ## Written by Kresimir Bestak and Florian Wuennemann and released under the MIT license 3 | 4 | ## Import packages 5 | import pandas as pd 6 | import numpy as np 7 | from skimage.measure import regionprops_table 8 | import tifffile as tiff 9 | import argparse 10 | import os 11 | 12 | 13 | def assign_spots2cell(spot_table, cell_mask): 14 | # Initialize a dictionary to hold the counts 15 | gene_counts = {} 16 | 17 | # Calculate cell properties for cell_mask using regionprops_table 18 | cell_props = regionprops_table( 19 | cell_mask, 20 | properties=[ 21 | "label", 22 | "centroid", 23 | "area", 24 | "major_axis_length", 25 | "minor_axis_length", 26 | "eccentricity", 27 | "solidity", 28 | "extent", 29 | "orientation", 30 | ], 31 | ) 32 | 33 | # Turn cell props into a pandas DataFrame and add a Cell_ID column 34 | name_map = { 35 | "CellID": "label", 36 | "X_centroid": "centroid-1", 37 | "Y_centroid": "centroid-0", 38 | "Area": "area", 39 | "MajorAxisLength": "major_axis_length", 40 | "MinorAxisLength": "minor_axis_length", 41 | "Eccentricity": "eccentricity", 42 | "Solidity": "solidity", 43 | "Extent": "extent", 44 | "Orientation": "orientation", 45 | } 46 | 47 | for new_name, old_name in name_map.items(): 48 | cell_props[new_name] = cell_props[old_name] 49 | 50 | for old_name in set(name_map.values()): 51 | del cell_props[old_name] 52 | 53 | cell_props = pd.DataFrame(cell_props) 54 | 55 | # Exclude any rows that contain Duplicated in the gene column from spot_table 56 | spot_table = spot_table[~spot_table["gene"].str.contains("Duplicated")] 57 | 58 | # Iterate over each row in the grouped DataFrame 59 | for index, row in spot_table.iterrows(): 60 | # Get the x and y positions and gene 61 | x = int(row["x"]) 62 | y = int(row["y"]) 63 | gene = row["gene"] 64 | 65 | # Get the cell ID from the labeled mask 66 | cell_id = cell_mask[y, x] 67 | 68 | # If the cell ID is not in the dictionary, add it 69 | if cell_id not in gene_counts: 70 | gene_counts[cell_id] = {} 71 | if gene not in gene_counts[cell_id]: 72 | gene_counts[cell_id][gene] = 1 73 | else: 74 | gene_counts[cell_id][gene] += 1 75 | else: 76 | if gene not in gene_counts[cell_id]: 77 | gene_counts[cell_id][gene] = 1 78 | else: 79 | # Add the count for this gene in this cell ID 80 | gene_counts[cell_id][gene] += 1 81 | 82 | # Convert the dictionary of counts into a DataFrame 83 | gene_counts_df = pd.DataFrame(gene_counts).T 84 | 85 | # Add a column to gene_counts_df for the Cell_ID, make it the first column of the table 86 | gene_counts_df["CellID"] = gene_counts_df.index 87 | 88 | # Add the regionprops data from cell_props for each cell ID to gene_counts_df add NA when cell_ID exists in cell_props but not in gene_counts_df 89 | gene_counts_df = gene_counts_df.merge(cell_props, on="CellID", how="outer") 90 | 91 | # Convert NaN values to 0 92 | gene_counts_df = gene_counts_df.fillna(0) 93 | 94 | # Sort by Cell_ID in ascending order 95 | gene_counts_df = gene_counts_df.sort_values(by=["CellID"]) 96 | 97 | # Make Cell_ID the first column in gene_counts_df 98 | gene_counts_df = gene_counts_df.set_index("CellID").reset_index() 99 | 100 | gene_counts_df[spot_table.gene.unique()] = gene_counts_df[spot_table.gene.unique()].astype(int) 101 | 102 | # Filter out cell_ID = 0 into it's own dataframe called background 103 | background = gene_counts_df[gene_counts_df["CellID"] == 0] 104 | gene_counts_df = gene_counts_df[gene_counts_df["CellID"] != 0] 105 | 106 | # Return both gene_counts_df and background 107 | return gene_counts_df, background 108 | 109 | 110 | if __name__ == "__main__": 111 | # Add a python argument parser with options for input, output and image size in x and y 112 | parser = argparse.ArgumentParser() 113 | parser.add_argument("-s", "--spot_table", help="Spot table to project.") 114 | parser.add_argument("-c", "--cell_mask", help="Sample ID.") 115 | parser.add_argument("--output", type=str, help="Output path") 116 | parser.add_argument("--version", action="version", version="0.1.0") 117 | 118 | args = parser.parse_args() 119 | 120 | ## Read in spot table 121 | spot_data = pd.read_csv( 122 | args.spot_table, names=["x", "y", "z", "gene", "empty"], sep="\t", header=None, index_col=None 123 | ) 124 | 125 | cell_mask = tiff.imread(args.cell_mask) 126 | 127 | gene_counts_df, background = assign_spots2cell(spot_data, cell_mask) 128 | 129 | gene_counts_df.to_csv(args.output, sep=",", header=True, index=False) 130 | -------------------------------------------------------------------------------- /bin/stack.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | ## Written by Kresimir Bestak and Florian Wuennemann and released under the MIT license 3 | import numpy as np 4 | import argparse 5 | import tifffile 6 | import dask.array as da 7 | from argparse import ArgumentParser as AP 8 | import palom.pyramid 9 | import palom.reader 10 | import copy 11 | import math 12 | import time 13 | 14 | 15 | def get_args(): 16 | parser = AP(description="Stack a list of images into a single image stack using Dask") 17 | parser.add_argument("-i", "--input", nargs="+", help="List of images to stack") 18 | parser.add_argument("-o", "--output", dest="output", type=str) 19 | parser.add_argument("--pixel_size", dest="pixel_size", type=float, default=0.138) 20 | parser.add_argument("--tile_size", dest="tilesize", type=int, default=1072) 21 | parser.add_argument("--version", action="version", version="0.1.0") 22 | return parser.parse_args() 23 | 24 | 25 | def num_levels_patch(self, base_shape): 26 | factor = max(base_shape) / self.max_pyramid_img_size 27 | return math.ceil(math.log(max(1, factor), self.downscale_factor)) + 1 28 | 29 | 30 | def main(args): 31 | img = palom.reader.OmePyramidReader(args.input[0]) 32 | mosaic = img.pyramid[0] 33 | mosaic_out = copy.copy(mosaic) 34 | 35 | for i in range(1, len(args.input)): 36 | img = palom.reader.OmePyramidReader(args.input[i]) 37 | mosaic = img.pyramid[0] 38 | mosaic_out = da.concatenate([mosaic_out, copy.copy(mosaic)], axis=0) 39 | 40 | palom.pyramid.PyramidSetting.num_levels = num_levels_patch 41 | palom.pyramid.write_pyramid( 42 | [mosaic_out], args.output, channel_names=["stack"], downscale_factor=2, pixel_size=0.138, tile_size=368 43 | ) 44 | 45 | 46 | if __name__ == "__main__": 47 | # Read in arguments 48 | args = get_args() 49 | 50 | # Run script 51 | st = time.time() 52 | main(args) 53 | rt = time.time() - st 54 | print(f"Script finished in {rt // 60:.0f}m {rt % 60:.0f}s") 55 | -------------------------------------------------------------------------------- /conf/base.config: -------------------------------------------------------------------------------- 1 | /* 2 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 3 | nf-core/molkart Nextflow base config file 4 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 5 | A 'blank slate' config file, appropriate for general use on most high performance 6 | compute environments. Assumes that all software is installed and available on 7 | the PATH. Runs in `local` mode - all jobs will be run on the logged in environment. 8 | ---------------------------------------------------------------------------------------- 9 | */ 10 | 11 | process { 12 | cpus = { 1 * task.attempt } 13 | memory = { 6.GB * task.attempt } 14 | time = { 4.h * task.attempt } 15 | 16 | errorStrategy = { task.exitStatus in ((130..145) + 104) ? 'retry' : 'finish' } 17 | maxRetries = 1 18 | maxErrors = '-1' 19 | 20 | // Process-specific resource requirements 21 | // NOTE - Please try and reuse the labels below as much as possible. 22 | // These labels are used and recognised by default in DSL2 files hosted on nf-core/modules. 23 | // If possible, it would be nice to keep the same label naming convention when 24 | // adding in your local modules too. 25 | withLabel:process_single { 26 | cpus = { 1 } 27 | memory = { 6.GB * task.attempt } 28 | time = { 4.h * task.attempt } 29 | } 30 | withLabel:process_low { 31 | cpus = { 2 * task.attempt } 32 | memory = { 12.GB * task.attempt } 33 | time = { 4.h * task.attempt } 34 | } 35 | withLabel:process_medium { 36 | cpus = { 6 * task.attempt } 37 | memory = { 36.GB * task.attempt } 38 | time = { 8.h * task.attempt } 39 | } 40 | withLabel:process_high { 41 | cpus = { 12 * task.attempt } 42 | memory = { 72.GB * task.attempt } 43 | time = { 16.h * task.attempt } 44 | } 45 | withLabel:process_long { 46 | time = { 20.h * task.attempt } 47 | } 48 | withLabel:process_high_memory { 49 | memory = { 200.GB * task.attempt } 50 | } 51 | withLabel:error_ignore { 52 | errorStrategy = 'ignore' 53 | } 54 | withLabel:error_retry { 55 | errorStrategy = 'retry' 56 | maxRetries = 3 57 | } 58 | withName:'ILASTIK_PIXELCLASSIFICATION|ILASTIK_MULTICUT' { 59 | label = "process_high" 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /conf/test.config: -------------------------------------------------------------------------------- 1 | /* 2 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 3 | Nextflow config file for running minimal tests 4 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 5 | Defines input files and everything required to run a fast and simple pipeline test. 6 | 7 | Use as follows: 8 | nextflow run nf-core/molkart -profile test, --outdir 9 | 10 | ---------------------------------------------------------------------------------------- 11 | */ 12 | 13 | process { 14 | resourceLimits = [ 15 | cpus: 4, 16 | memory: '15.GB', 17 | time: '1.h' 18 | ] 19 | } 20 | 21 | params { 22 | config_profile_name = 'Test profile' 23 | config_profile_description = 'Minimal test dataset to check pipeline function' 24 | 25 | // Input data 26 | input = 'https://raw.githubusercontent.com/nf-core/test-datasets/molkart/test_data/samplesheets/samplesheet_membrane.csv' 27 | mindagap_tilesize = 90 28 | mindagap_boxsize = 7 29 | mindagap_loopnum = 100 30 | clahe_pyramid_tile = 368 31 | segmentation_method = "mesmer,cellpose,stardist" 32 | 33 | // Only used if additionally params.create_training_subset is used 34 | crop_size_x = 30 35 | crop_size_y = 30 36 | crop_amount = 2 37 | } 38 | -------------------------------------------------------------------------------- /conf/test_full.config: -------------------------------------------------------------------------------- 1 | /* 2 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 3 | Nextflow config file for running full-size tests 4 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 5 | Defines input files and everything required to run a full size pipeline test. 6 | 7 | Use as follows: 8 | nextflow run nf-core/molkart -profile test_full, --outdir 9 | 10 | ---------------------------------------------------------------------------------------- 11 | */ 12 | 13 | params { 14 | config_profile_name = 'Full test profile' 15 | config_profile_description = 'Full test dataset to check pipeline function' 16 | 17 | input = params.pipelines_testdata_base_path + 'molkart/test_data/samplesheets/samplesheet_full_test.csv' 18 | 19 | segmentation_method = "mesmer,cellpose,stardist" 20 | mindagap_boxsize = 3 21 | mindagap_loopnum = 40 22 | cellpose_pretrained_model = "nuclei" 23 | stardist_n_tiles_x = 20 24 | stardist_n_tiles_y = 20 25 | } 26 | -------------------------------------------------------------------------------- /docs/README.md: -------------------------------------------------------------------------------- 1 | # nf-core/molkart: Documentation 2 | 3 | The nf-core/molkart documentation is split into the following pages: 4 | 5 | - [Usage](usage.md) 6 | - An overview of how the pipeline works, how to run it and a description of all of the different command-line flags. 7 | - [Output](output.md) 8 | - An overview of the different results produced by the pipeline and how to interpret them. 9 | 10 | You can find a lot more documentation about installing, configuring and running nf-core pipelines on the website: [https://nf-co.re](https://nf-co.re) 11 | -------------------------------------------------------------------------------- /docs/images/molkart_workflow.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nf-core/molkart/d33d298b06ad8b4463a635bf02bb9f4f351e724b/docs/images/molkart_workflow.png -------------------------------------------------------------------------------- /docs/images/nf-core-molkart_logo_dark.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nf-core/molkart/d33d298b06ad8b4463a635bf02bb9f4f351e724b/docs/images/nf-core-molkart_logo_dark.png -------------------------------------------------------------------------------- /docs/images/nf-core-molkart_logo_light.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nf-core/molkart/d33d298b06ad8b4463a635bf02bb9f4f351e724b/docs/images/nf-core-molkart_logo_light.png -------------------------------------------------------------------------------- /main.nf: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env nextflow 2 | /* 3 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 4 | nf-core/molkart 5 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 6 | Github : https://github.com/nf-core/molkart 7 | Website: https://nf-co.re/molkart 8 | Slack : https://nfcore.slack.com/channels/molkart 9 | ---------------------------------------------------------------------------------------- 10 | */ 11 | 12 | /* 13 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 14 | IMPORT FUNCTIONS / MODULES / SUBWORKFLOWS / WORKFLOWS 15 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 16 | */ 17 | 18 | include { MOLKART } from './workflows/molkart' 19 | include { PIPELINE_INITIALISATION } from './subworkflows/local/utils_nfcore_molkart_pipeline' 20 | include { PIPELINE_COMPLETION } from './subworkflows/local/utils_nfcore_molkart_pipeline' 21 | 22 | /* 23 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 24 | NAMED WORKFLOWS FOR PIPELINE 25 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 26 | */ 27 | 28 | // 29 | // WORKFLOW: Run main analysis pipeline depending on type of input 30 | // 31 | workflow NFCORE_MOLKART { 32 | 33 | take: 34 | samplesheet // channel: samplesheet read in from --input 35 | 36 | main: 37 | 38 | // 39 | // WORKFLOW: Run pipeline 40 | // 41 | MOLKART ( 42 | samplesheet 43 | ) 44 | emit: 45 | multiqc_report = MOLKART.out.multiqc_report // channel: /path/to/multiqc_report.html 46 | } 47 | /* 48 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 49 | RUN MAIN WORKFLOW 50 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 51 | */ 52 | 53 | workflow { 54 | 55 | main: 56 | // 57 | // SUBWORKFLOW: Run initialisation tasks 58 | // 59 | PIPELINE_INITIALISATION ( 60 | params.version, 61 | params.validate_params, 62 | params.monochrome_logs, 63 | args, 64 | params.outdir, 65 | params.input 66 | ) 67 | 68 | // 69 | // WORKFLOW: Run main workflow 70 | // 71 | NFCORE_MOLKART ( 72 | PIPELINE_INITIALISATION.out.samplesheet 73 | ) 74 | // 75 | // SUBWORKFLOW: Run completion tasks 76 | // 77 | PIPELINE_COMPLETION ( 78 | params.email, 79 | params.email_on_fail, 80 | params.plaintext_email, 81 | params.outdir, 82 | params.monochrome_logs, 83 | params.hook_url, 84 | NFCORE_MOLKART.out.multiqc_report 85 | ) 86 | } 87 | 88 | /* 89 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 90 | THE END 91 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 92 | */ 93 | -------------------------------------------------------------------------------- /modules.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "nf-core/molkart", 3 | "homePage": "https://github.com/nf-core/molkart", 4 | "repos": { 5 | "https://github.com/nf-core/modules.git": { 6 | "modules": { 7 | "nf-core": { 8 | "cellpose": { 9 | "branch": "master", 10 | "git_sha": "5bca12d66725b17a486edc24d1d2381d4170da25", 11 | "installed_by": ["modules"] 12 | }, 13 | "deepcell/mesmer": { 14 | "branch": "master", 15 | "git_sha": "666652151335353eef2fcd58880bcef5bc2928e1", 16 | "installed_by": ["modules"] 17 | }, 18 | "ilastik/multicut": { 19 | "branch": "master", 20 | "git_sha": "a4d916a39231897a49c903089a548704c09b8ef3", 21 | "installed_by": ["modules"] 22 | }, 23 | "ilastik/pixelclassification": { 24 | "branch": "master", 25 | "git_sha": "a4d916a39231897a49c903089a548704c09b8ef3", 26 | "installed_by": ["modules"] 27 | }, 28 | "mindagap/duplicatefinder": { 29 | "branch": "master", 30 | "git_sha": "81880787133db07d9b4c1febd152c090eb8325dc", 31 | "installed_by": ["modules"] 32 | }, 33 | "mindagap/mindagap": { 34 | "branch": "master", 35 | "git_sha": "81880787133db07d9b4c1febd152c090eb8325dc", 36 | "installed_by": ["modules"] 37 | }, 38 | "multiqc": { 39 | "branch": "master", 40 | "git_sha": "81880787133db07d9b4c1febd152c090eb8325dc", 41 | "installed_by": ["modules"] 42 | }, 43 | "stardist": { 44 | "branch": "master", 45 | "git_sha": "666652151335353eef2fcd58880bcef5bc2928e1", 46 | "installed_by": ["modules"] 47 | } 48 | } 49 | }, 50 | "subworkflows": { 51 | "nf-core": { 52 | "utils_nextflow_pipeline": { 53 | "branch": "master", 54 | "git_sha": "c2b22d85f30a706a3073387f30380704fcae013b", 55 | "installed_by": ["subworkflows"] 56 | }, 57 | "utils_nfcore_pipeline": { 58 | "branch": "master", 59 | "git_sha": "51ae5406a030d4da1e49e4dab49756844fdd6c7a", 60 | "installed_by": ["subworkflows"] 61 | }, 62 | "utils_nfschema_plugin": { 63 | "branch": "master", 64 | "git_sha": "2fd2cd6d0e7b273747f32e465fdc6bcc3ae0814e", 65 | "installed_by": ["subworkflows"] 66 | } 67 | } 68 | } 69 | } 70 | } 71 | } 72 | -------------------------------------------------------------------------------- /modules/local/clahe/main.nf: -------------------------------------------------------------------------------- 1 | process CLAHE{ 2 | tag "$meta.id" 3 | label 'process_medium' 4 | 5 | container 'ghcr.io/schapirolabor/molkart-local:v0.0.4' 6 | 7 | input: 8 | tuple val(meta), path(image) 9 | 10 | output: 11 | tuple val(meta), path("*.tiff") , emit: img_clahe 12 | path "versions.yml" , emit: versions 13 | 14 | when: 15 | task.ext.when == null || task.ext.when 16 | 17 | script: 18 | def args = task.ext.args ?: '' 19 | def prefix = task.ext.prefix ?: "${meta.id}" 20 | """ 21 | apply_clahe.dask.py \\ 22 | --input ${image} \\ 23 | --output ${prefix}.tiff \\ 24 | $args 25 | 26 | cat <<-END_VERSIONS > versions.yml 27 | "${task.process}": 28 | molkart_clahe: \$(apply_clahe.dask.py --version) 29 | END_VERSIONS 30 | """ 31 | 32 | } 33 | -------------------------------------------------------------------------------- /modules/local/createanndata/main.nf: -------------------------------------------------------------------------------- 1 | process CREATE_ANNDATA { 2 | tag "$meta.id" 3 | label 'process_low' 4 | 5 | container 'ghcr.io/schapirolabor/molkart-local:v0.0.4' 6 | 7 | input: 8 | tuple val(meta), path(spot2cell) 9 | 10 | output: 11 | tuple val(meta), path("*.adata") , emit: stack 12 | path "versions.yml" , emit: versions 13 | 14 | when: 15 | task.ext.when == null || task.ext.when 16 | 17 | script: 18 | def args = task.ext.args ?: '' 19 | def prefix = task.ext.prefix ?: "${meta.id}" 20 | """ 21 | create_anndata.py \\ 22 | --input ${spot2cell} \\ 23 | --spatial_cols X_centroid Y_centroid \\ 24 | --output ${prefix}.adata \\ 25 | ${args} 26 | 27 | cat <<-END_VERSIONS > versions.yml 28 | "${task.process}": 29 | molkart_createanndata: \$(create_anndata.py --version) 30 | END_VERSIONS 31 | """ 32 | } 33 | -------------------------------------------------------------------------------- /modules/local/createstack/main.nf: -------------------------------------------------------------------------------- 1 | process CREATE_STACK { 2 | tag "$meta.id" 3 | label 'process_low' 4 | 5 | container 'ghcr.io/schapirolabor/molkart-local:v0.0.4' 6 | 7 | input: 8 | tuple val(meta), path(image) 9 | 10 | output: 11 | tuple val(meta), path("*.ome.tif") , emit: stack 12 | path "versions.yml" , emit: versions 13 | 14 | when: 15 | task.ext.when == null || task.ext.when 16 | 17 | script: 18 | def args = task.ext.args ?: '' 19 | def prefix = task.ext.prefix ?: "${meta.id}" 20 | """ 21 | stack.py \\ 22 | --input ${image} \\ 23 | --output ${prefix}.ome.tif \\ 24 | ${args} 25 | 26 | cat <<-END_VERSIONS > versions.yml 27 | "${task.process}": 28 | molkart_stack: \$(stack.py --version) 29 | END_VERSIONS 30 | """ 31 | } 32 | -------------------------------------------------------------------------------- /modules/local/crophdf5/main.nf: -------------------------------------------------------------------------------- 1 | process CROPHDF5 { 2 | tag "$meta.id" 3 | label 'process_single' 4 | 5 | container 'ghcr.io/schapirolabor/molkart-local:v0.0.4' 6 | 7 | input: 8 | tuple val(meta), path(image_stack), val(num_channels) 9 | 10 | output: 11 | tuple val(meta), path("*.hdf5"), emit: ilastik_training 12 | tuple val(meta), path("*.txt") , emit: crop_summary 13 | path "versions.yml" , emit: versions 14 | 15 | when: 16 | task.ext.when == null || task.ext.when 17 | 18 | script: 19 | def args = task.ext.args ?: '' 20 | """ 21 | crop_hdf5.py \\ 22 | --input ${image_stack} \\ 23 | --output . \\ 24 | --num_channels ${num_channels} \\ 25 | ${args} 26 | 27 | cat <<-END_VERSIONS > versions.yml 28 | "${task.process}": 29 | molkart_crophdf5: \$(crop_hdf5.py --version) 30 | END_VERSIONS 31 | """ 32 | } 33 | -------------------------------------------------------------------------------- /modules/local/croptiff/main.nf: -------------------------------------------------------------------------------- 1 | process CROPTIFF { 2 | tag "$meta.id" 3 | label 'process_single' 4 | 5 | container 'ghcr.io/schapirolabor/molkart-local:v0.0.4' 6 | 7 | input: 8 | tuple val(meta), path(image_stack), path(crop_summary) 9 | 10 | output: 11 | tuple val(meta), path("*.tiff"), emit: crop_tiff 12 | tuple val(meta), path("*.png") , emit: overview 13 | path "versions.yml" , emit: versions 14 | 15 | when: 16 | task.ext.when == null || task.ext.when 17 | 18 | script: 19 | def args = task.ext.args ?: '' 20 | """ 21 | crop_tiff.py \\ 22 | --input ${image_stack} \\ 23 | --crop_summary ${crop_summary} \\ 24 | ${args} 25 | 26 | cat <<-END_VERSIONS > versions.yml 27 | "${task.process}": 28 | molkart_croptiff: \$(crop_tiff.py --version) 29 | END_VERSIONS 30 | """ 31 | } 32 | -------------------------------------------------------------------------------- /modules/local/maskfilter/main.nf: -------------------------------------------------------------------------------- 1 | process MASKFILTER { 2 | tag "$meta.id" 3 | label 'process_medium' 4 | 5 | container 'ghcr.io/schapirolabor/molkart-local:v0.0.4' 6 | 7 | input: 8 | tuple val(meta), path(mask) 9 | 10 | output: 11 | tuple val(meta), path("*.tif"), emit: filtered_mask 12 | tuple val(meta), path("*.csv"), emit: filtered_qc 13 | path "versions.yml" , emit: versions 14 | 15 | when: 16 | task.ext.when == null || task.ext.when 17 | 18 | script: 19 | def args = task.ext.args ?: '' 20 | def prefix = task.ext.prefix ?: "${meta.id}" 21 | 22 | """ 23 | maskfilter.py \\ 24 | --input ${mask} \\ 25 | --output ${prefix}.tif \\ 26 | --output_qc ${prefix}.csv \\ 27 | $args 28 | 29 | cat <<-END_VERSIONS > versions.yml 30 | "${task.process}": 31 | molkart_maskfilter: \$(maskfilter.py --version) 32 | END_VERSIONS 33 | """ 34 | 35 | stub: 36 | def prefix = task.ext.prefix ?: "${meta.id}" 37 | 38 | """ 39 | touch ${prefix}.tif 40 | 41 | cat <<-END_VERSIONS > versions.yml 42 | "${task.process}": 43 | molkart_maskfilter: \$(maskfilter.py --version) 44 | END_VERSIONS 45 | """ 46 | } 47 | -------------------------------------------------------------------------------- /modules/local/molkartqc/main.nf: -------------------------------------------------------------------------------- 1 | process MOLKARTQC{ 2 | tag "$meta.id" 3 | label 'process_single' 4 | 5 | container 'ghcr.io/schapirolabor/molkart-local:v0.0.4' 6 | 7 | input: 8 | tuple val(meta), path(spot_table), path(cellxgene_table), val(segmethod), path(filterqc) 9 | 10 | output: 11 | tuple val(meta), path("*.csv"), emit: qc 12 | path "versions.yml" , emit: versions 13 | 14 | when: 15 | task.ext.when == null || task.ext.when 16 | 17 | script: 18 | def args = task.ext.args ?: '' 19 | def prefix = task.ext.prefix ?: "${meta.id}" 20 | 21 | """ 22 | collect_QC.py \\ 23 | --cellxgene ${cellxgene_table} \\ 24 | --spots ${spot_table} \\ 25 | --sample_id ${prefix} \\ 26 | --segmentation_method ${segmethod} \\ 27 | --filterqc ${filterqc} \\ 28 | --outdir . \\ 29 | ${args} 30 | 31 | cat <<-END_VERSIONS > versions.yml 32 | "${task.process}": 33 | molkartqc: \$(collect_QC.py --version) 34 | END_VERSIONS 35 | """ 36 | 37 | stub: 38 | def prefix = task.ext.prefix ?: "${meta.id}" 39 | 40 | """ 41 | touch ${prefix}.csv 42 | 43 | cat <<-END_VERSIONS > versions.yml 44 | "${task.process}": 45 | molkartqc: \$(collect_QC.py --version) 46 | END_VERSIONS 47 | """ 48 | } 49 | -------------------------------------------------------------------------------- /modules/local/molkartqcpng/main.nf: -------------------------------------------------------------------------------- 1 | process MOLKARTQCPNG { 2 | label 'process_single' 3 | 4 | container 'ghcr.io/schapirolabor/molkart-local:v0.0.4' 5 | 6 | input: 7 | path(png) 8 | 9 | output: 10 | path("*.png") , emit: png_overview 11 | path "versions.yml", emit: versions 12 | 13 | when: 14 | task.ext.when == null || task.ext.when 15 | 16 | script: 17 | def args = task.ext.args ?: '' 18 | 19 | """ 20 | collect_QC.py \\ 21 | --png_overview ${png} \\ 22 | --outdir . \\ 23 | ${args} 24 | 25 | cat <<-END_VERSIONS > versions.yml 26 | "${task.process}": 27 | molkartqc: \$(collect_QC.py --version) 28 | END_VERSIONS 29 | """ 30 | 31 | stub: 32 | def prefix = task.ext.prefix ?: ( png.name.toString().tokenize('.')[0] ) 33 | """ 34 | touch ${prefix}.png 35 | 36 | cat <<-END_VERSIONS > versions.yml 37 | "${task.process}": 38 | molkartqc: \$(collect_QC.py --version) 39 | END_VERSIONS 40 | """ 41 | } 42 | -------------------------------------------------------------------------------- /modules/local/spot2cell/main.nf: -------------------------------------------------------------------------------- 1 | process SPOT2CELL{ 2 | debug true 3 | tag "$meta.id" 4 | label 'process_single' 5 | 6 | container 'ghcr.io/schapirolabor/molkart-local:v0.0.4' 7 | 8 | input: 9 | tuple val(meta), path(spot_table), path(cell_mask) 10 | 11 | output: 12 | tuple val(meta), path("*.csv"), emit: cellxgene_table 13 | path "versions.yml" , emit: versions 14 | 15 | when: 16 | task.ext.when == null || task.ext.when 17 | 18 | script: 19 | def args = task.ext.args ?: '' 20 | def prefix = task.ext.prefix ?: "${meta.id}" 21 | 22 | """ 23 | spot2cell.py \\ 24 | --spot_table ${spot_table} \\ 25 | --cell_mask ${cell_mask} \\ 26 | --output ${prefix}.csv \\ 27 | $args 28 | 29 | cat <<-END_VERSIONS > versions.yml 30 | "${task.process}": 31 | molkart_spot2cell: \$(spot2cell.py --version) 32 | END_VERSIONS 33 | """ 34 | } 35 | -------------------------------------------------------------------------------- /modules/local/tiffh5convert/main.nf: -------------------------------------------------------------------------------- 1 | process TIFFH5CONVERT { 2 | tag "$meta.id" 3 | label 'process_single' 4 | 5 | container "ghcr.io/schapirolabor/molkart-local:v0.0.4" 6 | 7 | input: 8 | tuple val(meta), path(image), val(num_channels) 9 | 10 | output: 11 | tuple val(meta), path("*.hdf5"), emit: hdf5 12 | path "versions.yml" , emit: versions 13 | 14 | when: 15 | task.ext.when == null || task.ext.when 16 | 17 | script: 18 | def args = task.ext.args ?: '' 19 | """ 20 | crop_hdf5.py \\ 21 | --input ${image} \\ 22 | --output . \\ 23 | --num_channels ${num_channels} \\ 24 | ${args} 25 | 26 | cat <<-END_VERSIONS > versions.yml 27 | "${task.process}": 28 | molkart_crophdf5: \$(crop_hdf5.py --version) 29 | END_VERSIONS 30 | """ 31 | } 32 | -------------------------------------------------------------------------------- /modules/nf-core/cellpose/main.nf: -------------------------------------------------------------------------------- 1 | process CELLPOSE { 2 | tag "$meta.id" 3 | label 'process_medium' 4 | 5 | container "docker.io/biocontainers/cellpose:3.0.1_cv1" 6 | 7 | input: 8 | tuple val(meta), path(image) 9 | path(model) 10 | 11 | output: 12 | tuple val(meta), path("*masks.tif") , emit: mask 13 | tuple val(meta), path("*flows.tif") , emit: flows, optional: true 14 | path "versions.yml" , emit: versions 15 | 16 | when: 17 | task.ext.when == null || task.ext.when 18 | 19 | script: 20 | // Exit if running this module with -profile conda / -profile mamba 21 | if (workflow.profile.tokenize(',').intersect(['conda', 'mamba']).size() >= 1) { 22 | error "I did not manage to create a cellpose module in Conda that works in all OSes. Please use Docker / Singularity / Podman instead." 23 | } 24 | def args = task.ext.args ?: '' 25 | def prefix = task.ext.prefix ?: "${meta.id}" 26 | def model_command = model ? "--pretrained_model $model" : "" 27 | """ 28 | export OMP_NUM_THREADS=${task.cpus} 29 | export MKL_NUM_THREADS=${task.cpus} 30 | cellpose \\ 31 | --image_path $image \\ 32 | --save_tif \\ 33 | $model_command \\ 34 | $args 35 | 36 | cat <<-END_VERSIONS > versions.yml 37 | "${task.process}": 38 | cellpose: \$(cellpose --version | awk 'NR==2 {print \$3}') 39 | END_VERSIONS 40 | """ 41 | stub: 42 | // Exit if running this module with -profile conda / -profile mamba 43 | if (workflow.profile.tokenize(',').intersect(['conda', 'mamba']).size() >= 1) { 44 | error "I did not manage to create a cellpose module in Conda that works in all OSes. Please use Docker / Singularity / Podman instead." 45 | } 46 | def prefix = task.ext.prefix ?: "${meta.id}" 47 | def name = image.name 48 | def base = name.lastIndexOf('.') != -1 ? name[0..name.lastIndexOf('.') - 1] : name 49 | """ 50 | touch ${base}_cp_masks.tif 51 | 52 | cat <<-END_VERSIONS > versions.yml 53 | "${task.process}": 54 | cellpose: \$(cellpose --version | awk 'NR==2 {print \$3}') 55 | END_VERSIONS 56 | """ 57 | 58 | } 59 | -------------------------------------------------------------------------------- /modules/nf-core/cellpose/meta.yml: -------------------------------------------------------------------------------- 1 | name: "cellpose" 2 | description: cellpose segments cells in images 3 | keywords: 4 | - segmentation 5 | - image 6 | - cellpose 7 | tools: 8 | - "cellpose": 9 | description: "cellpose is an anatomical segmentation algorithm written in Python 10 | 3 by Carsen Stringer and Marius Pachitariu" 11 | homepage: "https://github.com/MouseLand/cellpose" 12 | documentation: "https://cellpose.readthedocs.io/en/latest/command.html" 13 | tool_dev_url: "https://github.com/MouseLand/cellpose" 14 | doi: 10.1038/s41592-022-01663-4 15 | licence: ["BSD 3-Clause"] 16 | identifier: biotools:cellpose 17 | input: 18 | - - meta: 19 | type: map 20 | description: | 21 | Groovy Map containing sample information 22 | (sample id) 23 | - image: 24 | type: file 25 | description: tif file for ready for segmentation 26 | pattern: "*.{tif,tiff}" 27 | - - model: 28 | type: file 29 | description: Optional input file. Cellpose 2 model trained by user using human-in-the-loop 30 | approach. 31 | output: 32 | - mask: 33 | - meta: 34 | type: map 35 | description: | 36 | Groovy Map containing sample information 37 | [sample id] 38 | - "*masks.tif": 39 | type: file 40 | description: labelled mask output from cellpose in tif format 41 | pattern: "*.{tif, tiff}" 42 | - flows: 43 | - meta: 44 | type: map 45 | description: | 46 | Groovy Map containing sample information 47 | [sample id] 48 | - "*flows.tif": 49 | type: file 50 | description: cell flow output from cellpose 51 | pattern: "*.{tif}" 52 | - versions: 53 | - versions.yml: 54 | type: file 55 | description: File containing software versions 56 | pattern: "versions.yml" 57 | authors: 58 | - "@josenimo" 59 | - "@FloWuenne" 60 | maintainers: 61 | - "@josenimo" 62 | - "@FloWuenne" 63 | - "@kbestak" 64 | -------------------------------------------------------------------------------- /modules/nf-core/cellpose/tests/main.nf.test: -------------------------------------------------------------------------------- 1 | nextflow_process { 2 | 3 | name "Test Process CELLPOSE" 4 | script "../main.nf" 5 | process "CELLPOSE" 6 | 7 | tag "modules" 8 | tag "modules_nfcore" 9 | tag "cellpose" 10 | 11 | test("cellpose - with flows, no model") { 12 | 13 | when { 14 | config "./nextflow_wflows.config" 15 | process { 16 | """ 17 | input[0] = [ 18 | [ id:'test' ], 19 | file(params.modules_testdata_base_path + 'imaging/segmentation/cycif_tonsil_registered.ome.tif', checkIfExists: true) 20 | ] 21 | input[1] = [] 22 | """ 23 | } 24 | } 25 | 26 | then { 27 | assertAll ( 28 | { assert process.success }, 29 | { assert snapshot(process.out.mask).match("mask") }, 30 | { assert snapshot(process.out.flows).match("flows") }, 31 | { assert snapshot(process.out.versions).match("versions") } 32 | ) 33 | } 34 | 35 | } 36 | 37 | 38 | test("cellpose - stub") { 39 | 40 | options "-stub" 41 | 42 | when { 43 | process { 44 | """ 45 | input[0] = [ 46 | [ id:'test' ], 47 | file(params.modules_testdata_base_path + 'imaging/segmentation/cycif_tonsil_registered.ome.tif', checkIfExists: true) 48 | ] 49 | input[1] = [] 50 | """ 51 | } 52 | } 53 | 54 | then { 55 | assertAll( 56 | { assert process.success }, 57 | { assert snapshot(process.out).match() } 58 | ) 59 | } 60 | 61 | } 62 | 63 | } 64 | -------------------------------------------------------------------------------- /modules/nf-core/cellpose/tests/main.nf.test.snap: -------------------------------------------------------------------------------- 1 | { 2 | "flows": { 3 | "content": [ 4 | [ 5 | [ 6 | { 7 | "id": "test" 8 | }, 9 | "cycif_tonsil_registered.ome_flows.tif:md5,de79a792d4bebd2f9753ceb47a0de5f7" 10 | ] 11 | ] 12 | ], 13 | "meta": { 14 | "nf-test": "0.8.4", 15 | "nextflow": "23.10.1" 16 | }, 17 | "timestamp": "2024-03-18T14:22:16.855256249" 18 | }, 19 | "versions": { 20 | "content": [ 21 | [ 22 | "versions.yml:md5,ce42208b574084f390cf58b4c19b5717" 23 | ] 24 | ], 25 | "meta": { 26 | "nf-test": "0.8.4", 27 | "nextflow": "23.10.1" 28 | }, 29 | "timestamp": "2024-03-18T14:22:16.875087557" 30 | }, 31 | "cellpose - stub": { 32 | "content": [ 33 | { 34 | "0": [ 35 | [ 36 | { 37 | "id": "test" 38 | }, 39 | "cycif_tonsil_registered.ome_cp_masks.tif:md5,d41d8cd98f00b204e9800998ecf8427e" 40 | ] 41 | ], 42 | "1": [ 43 | 44 | ], 45 | "2": [ 46 | "versions.yml:md5,ce42208b574084f390cf58b4c19b5717" 47 | ], 48 | "flows": [ 49 | 50 | ], 51 | "mask": [ 52 | [ 53 | { 54 | "id": "test" 55 | }, 56 | "cycif_tonsil_registered.ome_cp_masks.tif:md5,d41d8cd98f00b204e9800998ecf8427e" 57 | ] 58 | ], 59 | "versions": [ 60 | "versions.yml:md5,ce42208b574084f390cf58b4c19b5717" 61 | ] 62 | } 63 | ], 64 | "meta": { 65 | "nf-test": "0.8.4", 66 | "nextflow": "23.10.1" 67 | }, 68 | "timestamp": "2024-03-18T14:22:39.339792992" 69 | }, 70 | "mask": { 71 | "content": [ 72 | [ 73 | [ 74 | { 75 | "id": "test" 76 | }, 77 | "cycif_tonsil_registered.ome_cp_masks.tif:md5,001ad312413f18bc2615741bd3ad12cf" 78 | ] 79 | ] 80 | ], 81 | "meta": { 82 | "nf-test": "0.8.4", 83 | "nextflow": "23.10.1" 84 | }, 85 | "timestamp": "2024-03-18T14:22:16.8369758" 86 | } 87 | } -------------------------------------------------------------------------------- /modules/nf-core/cellpose/tests/nextflow_wflows.config: -------------------------------------------------------------------------------- 1 | process { 2 | withName: "CELLPOSE" { 3 | ext.args = '--pretrained_model nuclei --diameter 9 --channel_axis 0 --no_npy --save_flows' 4 | } 5 | } 6 | -------------------------------------------------------------------------------- /modules/nf-core/cellpose/tests/tags.yml: -------------------------------------------------------------------------------- 1 | cellpose: 2 | - "modules/nf-core/cellpose/**" 3 | -------------------------------------------------------------------------------- /modules/nf-core/deepcell/mesmer/main.nf: -------------------------------------------------------------------------------- 1 | process DEEPCELL_MESMER { 2 | tag "$meta.id" 3 | label 'process_low' 4 | 5 | container "nf-core/deepcell_mesmer:0.4.1_noentry" 6 | 7 | input: 8 | tuple val(meta) , path(img) 9 | tuple val(meta2), path(membrane_img) 10 | 11 | // Output a .tif image, don't touch versions 12 | output: 13 | tuple val(meta), path("*.tif"), emit: mask 14 | path "versions.yml" , emit: versions 15 | 16 | when: 17 | task.ext.when == null || task.ext.when 18 | 19 | script: 20 | def args = task.ext.args ?: '' 21 | def prefix = task.ext.prefix ?: "${meta.id}" 22 | def membrane_command = membrane_img ? "--membrane-image $membrane_img" : "" 23 | def VERSION = "0.4.1" 24 | 25 | """ 26 | python /usr/src/app/run_app.py mesmer \\ 27 | --squeeze \\ 28 | --nuclear-image $img \\ 29 | --output-directory . \\ 30 | --output-name ${prefix}.tif \\ 31 | $membrane_command \\ 32 | $args 33 | 34 | cat <<-END_VERSIONS > versions.yml 35 | "${task.process}": 36 | deepcell_mesmer: $VERSION 37 | END_VERSIONS 38 | """ 39 | } 40 | -------------------------------------------------------------------------------- /modules/nf-core/deepcell/mesmer/meta.yml: -------------------------------------------------------------------------------- 1 | name: "deepcell_mesmer" 2 | description: Deepcell/mesmer segmentation for whole-cell 3 | keywords: 4 | - imaging 5 | - spatial_omics 6 | - segmentation 7 | tools: 8 | - "mesmer": 9 | description: "Deep cell is a collection of tools to segment imaging data" 10 | homepage: "https://github.com/vanvalenlab/deepcell-tf" 11 | documentation: "https://github.com/vanvalenlab/intro-to-deepcell/tree/master/pretrained_models" 12 | tool_dev_url: "https://githu/b.com/vanvalenlab/deepcell-tf" 13 | doi: 10.1038/s41587-021-01094-0 14 | licence: ["APACHE2"] 15 | identifier: biotools:deepcell 16 | input: 17 | # Only when we have meta 18 | - - meta: 19 | type: map 20 | description: | 21 | Groovy Map containing sample information 22 | e.g. [ id:'test', single_end:false ] 23 | - img: 24 | type: file 25 | description: Multichannel image file 26 | pattern: "*.{tiff,tif,h5,hdf5}" 27 | - - meta2: 28 | type: map 29 | description: | 30 | Groovy Map containing sample information 31 | e.g. [ id:'test', single_end:false ] 32 | - membrane_img: 33 | type: file 34 | description: Optional membrane image to be provided separately. 35 | pattern: "*.{tiff,tif,h5,hdf5}" 36 | output: 37 | #Only when we have meta 38 | - mask: 39 | - meta: 40 | type: map 41 | description: | 42 | Groovy Map containing sample information 43 | e.g. [ id:'test', single_end:false ] 44 | - "*.tif": 45 | type: file 46 | description: File containing the mask. 47 | pattern: "*.{tif, tiff}" 48 | - versions: 49 | - versions.yml: 50 | type: file 51 | description: File containing software versions 52 | pattern: "versions.yml" 53 | authors: 54 | - "@migueLib" 55 | - "@chiarasch" 56 | maintainers: 57 | - "@migueLib" 58 | - "@chiarasch" 59 | -------------------------------------------------------------------------------- /modules/nf-core/deepcell/mesmer/tests/main.nf.test: -------------------------------------------------------------------------------- 1 | nextflow_process { 2 | 3 | name "Test Process DEEPCELL_MESMER" 4 | script "../main.nf" 5 | config "./nextflow.config" 6 | process "DEEPCELL_MESMER" 7 | 8 | tag "modules" 9 | tag "modules_nfcore" 10 | tag "deepcell" 11 | tag "deepcell/mesmer" 12 | 13 | test("mesmer - tif") { 14 | 15 | when { 16 | process { 17 | """ 18 | input[0] = [ 19 | [ id: 'test_img' ], 20 | file(params.modules_testdata_base_path + 'imaging/segmentation/cycif_tonsil_registered.ome.tif', checkIfExists: true) 21 | ] 22 | input[1] = [ 23 | [:], 24 | [] 25 | ] 26 | """ 27 | } 28 | } 29 | 30 | then { 31 | assertAll( 32 | { assert process.success }, 33 | { assert snapshot(process.out.mask).match("mask") }, 34 | { assert snapshot(process.out.versions).match("versions") } 35 | ) 36 | } 37 | 38 | } 39 | 40 | } 41 | -------------------------------------------------------------------------------- /modules/nf-core/deepcell/mesmer/tests/main.nf.test.snap: -------------------------------------------------------------------------------- 1 | { 2 | "versions": { 3 | "content": [ 4 | [ 5 | "versions.yml:md5,922bf813163d265f8a7f12fa09fc18c2" 6 | ] 7 | ], 8 | "meta": { 9 | "nf-test": "0.8.4", 10 | "nextflow": "23.10.1" 11 | }, 12 | "timestamp": "2024-03-18T13:44:19.214421951" 13 | }, 14 | "mask": { 15 | "content": [ 16 | [ 17 | [ 18 | { 19 | "id": "test_img" 20 | }, 21 | "mask.tif:md5,1550535389bd24d4ea4a8288502b0afa" 22 | ] 23 | ] 24 | ], 25 | "meta": { 26 | "nf-test": "0.8.4", 27 | "nextflow": "23.10.1" 28 | }, 29 | "timestamp": "2024-03-18T13:44:19.190927583" 30 | } 31 | } -------------------------------------------------------------------------------- /modules/nf-core/deepcell/mesmer/tests/nextflow.config: -------------------------------------------------------------------------------- 1 | process { 2 | 3 | withName: "DEEPCELL_MESMER" { 4 | ext.prefix = 'mask' 5 | ext.args = '--image-mpp=0.65 --compartment=whole-cell --nuclear-channel 0 --membrane-channel 1' 6 | } 7 | 8 | } 9 | -------------------------------------------------------------------------------- /modules/nf-core/deepcell/mesmer/tests/tags.yml: -------------------------------------------------------------------------------- 1 | deepcell/mesmer: 2 | - "modules/nf-core/deepcell/mesmer/**" 3 | -------------------------------------------------------------------------------- /modules/nf-core/ilastik/multicut/main.nf: -------------------------------------------------------------------------------- 1 | process ILASTIK_MULTICUT { 2 | tag "$meta.id" 3 | label 'process_low' 4 | 5 | container "docker.io/biocontainers/ilastik:1.4.0_cv1" 6 | 7 | input: 8 | tuple val(meta), path(h5) 9 | tuple val(meta2), path (ilp) 10 | tuple val(meta3), path (probs) 11 | 12 | output: 13 | tuple val(meta), path("*.tiff") , emit: out_tiff 14 | path "versions.yml" , emit: versions 15 | 16 | when: 17 | task.ext.when == null || task.ext.when 18 | 19 | script: 20 | // Exit if running this module with -profile conda / -profile mamba 21 | if (workflow.profile.tokenize(',').intersect(['conda', 'mamba']).size() >= 1) { 22 | error "ILASTIK_MULTICUT module does not support Conda. Please use Docker / Singularity / Podman instead." 23 | } 24 | def args = task.ext.args ?: '' 25 | def prefix = task.ext.prefix ?: "${meta.id}" 26 | 27 | """ 28 | /opt/ilastik-1.4.0-Linux/run_ilastik.sh \\ 29 | --headless \\ 30 | --readonly 1 \\ 31 | --project=$ilp \\ 32 | --raw_data=$h5 \\ 33 | --probabilities=$probs \\ 34 | --export_source="Multicut Segmentation" \\ 35 | --output_filename_format=${prefix}.tiff \\ 36 | $args 37 | 38 | cat <<-END_VERSIONS > versions.yml 39 | "${task.process}": 40 | ilastik: \$(/opt/ilastik-1.4.0-Linux/run_ilastik.sh --headless --version) 41 | END_VERSIONS 42 | """ 43 | 44 | stub: 45 | // Exit if running this module with -profile conda / -profile mamba 46 | if (workflow.profile.tokenize(',').intersect(['conda', 'mamba']).size() >= 1) { 47 | error "ILASTIK_MULTICUT module does not support Conda. Please use Docker / Singularity / Podman instead." 48 | } 49 | def prefix = task.ext.prefix ?: "${meta.id}" 50 | def VERSION = "1.4.0" // WARN: Version information not provided by tool on CLI. Please update this string when bumping container versions. 51 | """ 52 | touch ${prefix}.tiff 53 | cat <<-END_VERSIONS > versions.yml 54 | "${task.process}": 55 | ilastik:: $VERSION 56 | END_VERSIONS 57 | """ 58 | } 59 | -------------------------------------------------------------------------------- /modules/nf-core/ilastik/multicut/meta.yml: -------------------------------------------------------------------------------- 1 | name: "ilastik_multicut" 2 | description: Ilastik is a tool that utilizes machine learning algorithms to classify 3 | pixels, segment, track and count cells in images. Ilastik contains a graphical user 4 | interface to interactively label pixels. However, this nextflow module will implement 5 | the --headless mode, to apply pixel classification using a pre-trained .ilp file 6 | on an input image. 7 | keywords: 8 | - multicut 9 | - segmentation 10 | - pixel classification 11 | tools: 12 | - "ilastik": 13 | description: "Ilastik is a user friendly tool that enables pixel classification, 14 | segmentation and analysis." 15 | homepage: "https://www.ilastik.org/" 16 | documentation: "https://www.ilastik.org/documentation/" 17 | tool_dev_url: "https://github.com/ilastik/ilastik" 18 | license: ["GPL3"] 19 | identifier: biotools:ilastik 20 | input: 21 | - - meta: 22 | type: map 23 | description: | 24 | Groovy Map containing sample information 25 | e.g. [ id:'test', single_end:false ] 26 | - h5: 27 | type: file 28 | description: h5 file containing image stack to classify file 29 | pattern: "*.{h5,hdf5}" 30 | - - meta2: 31 | type: map 32 | description: | 33 | Groovy Map containing sample information 34 | e.g. [ id:'test', single_end:false ] 35 | - ilp: 36 | type: file 37 | description: Trained ilastik .ilp project file 38 | pattern: "*.{ilp}" 39 | - - meta3: 40 | type: map 41 | description: | 42 | Groovy Map containing sample information 43 | e.g. [ id:'test', single_end:false ] 44 | - probs: 45 | type: file 46 | description: Probability map for boundary based segmentation 47 | pattern: "*.{h5,,hdf5}" 48 | output: 49 | - out_tiff: 50 | - meta: 51 | type: map 52 | description: | 53 | Groovy Map containing sample information 54 | e.g. [ id:'test', single_end:false ] 55 | - "*.tiff": 56 | type: file 57 | description: Multicut segmentation mask output. 58 | pattern: "*.{tiff}" 59 | - versions: 60 | - versions.yml: 61 | type: file 62 | description: File containing software versions 63 | pattern: "versions.yml" 64 | authors: 65 | - "@FloWuenne" 66 | maintainers: 67 | - "@FloWuenne" 68 | -------------------------------------------------------------------------------- /modules/nf-core/ilastik/pixelclassification/main.nf: -------------------------------------------------------------------------------- 1 | process ILASTIK_PIXELCLASSIFICATION { 2 | tag "$meta.id" 3 | label 'process_single' 4 | 5 | container "docker.io/biocontainers/ilastik:1.4.0_cv1" 6 | 7 | input: 8 | tuple val(meta), path(input_img) 9 | tuple val(meta2), path(ilp) 10 | 11 | output: 12 | tuple val(meta), path("*.${suffix}") , emit: output 13 | path "versions.yml" , emit: versions 14 | 15 | when: 16 | task.ext.when == null || task.ext.when 17 | 18 | script: 19 | // Exit if running this module with -profile conda / -profile mamba 20 | if (workflow.profile.tokenize(',').intersect(['conda', 'mamba']).size() >= 1) { 21 | error "ILASTIK_PIXELCLASSIFICATION module does not support Conda. Please use Docker / Singularity / Podman instead." 22 | } 23 | def args = task.ext.args ?: '' 24 | def prefix = task.ext.prefix ?: "${meta.id}" 25 | suffix = task.ext.suffix ?: "h5" 26 | 27 | """ 28 | /opt/ilastik-1.4.0-Linux/run_ilastik.sh \\ 29 | --headless \\ 30 | --readonly 1 \\ 31 | --project=$ilp \\ 32 | --output_filename_format=${prefix}.${suffix} \\ 33 | $args \\ 34 | $input_img 35 | 36 | cat <<-END_VERSIONS > versions.yml 37 | "${task.process}": 38 | ilastik: \$(/opt/ilastik-1.4.0-Linux/run_ilastik.sh --headless --version) 39 | END_VERSIONS 40 | """ 41 | 42 | stub: 43 | // Exit if running this module with -profile conda / -profile mamba 44 | if (workflow.profile.tokenize(',').intersect(['conda', 'mamba']).size() >= 1) { 45 | error "ILASTIK_PIXELCLASSIFICATION module does not support Conda. Please use Docker / Singularity / Podman instead." 46 | } 47 | def prefix = task.ext.prefix ?: "${meta.id}" 48 | suffix = task.ext.suffix ?: "h5" 49 | 50 | """ 51 | touch ${prefix}.${suffix} 52 | 53 | cat <<-END_VERSIONS > versions.yml 54 | "${task.process}": 55 | ilastik:: \$(/opt/ilastik-1.4.0-Linux/run_ilastik.sh --headless --version) 56 | END_VERSIONS 57 | """ 58 | } 59 | -------------------------------------------------------------------------------- /modules/nf-core/ilastik/pixelclassification/meta.yml: -------------------------------------------------------------------------------- 1 | name: "ilastik_pixelclassification" 2 | description: Ilastik is a tool that utilizes machine learning algorithms to classify 3 | pixels, segment, track and count cells in images. Ilastik contains a graphical user 4 | interface to interactively label pixels. However, this nextflow module will implement 5 | the --headless mode, to apply pixel classification using a pre-trained .ilp file 6 | on an input image. 7 | keywords: 8 | - pixel_classification 9 | - segmentation 10 | - probability_maps 11 | tools: 12 | - "ilastik": 13 | description: "Ilastik is a user friendly tool that enables pixel classification, 14 | segmentation and analysis." 15 | homepage: "https://www.ilastik.org/" 16 | documentation: "https://www.ilastik.org/documentation/" 17 | tool_dev_url: "https://github.com/ilastik/ilastik" 18 | licence: ["GPL3"] 19 | identifier: biotools:ilastik 20 | input: 21 | - - meta: 22 | type: map 23 | description: | 24 | Groovy Map containing sample information for h5 file 25 | e.g. [ id:'test', single_end:false ] 26 | - input_img: 27 | type: file 28 | description: Input img file containing image stack to classify 29 | - - meta2: 30 | type: map 31 | description: | 32 | Groovy Map containing sample information for ilp file 33 | e.g. [ id:'test', single_end:false ] 34 | - ilp: 35 | type: file 36 | description: Trained ilastik pixel classification .ilp project file 37 | pattern: "*.{ilp}" 38 | output: 39 | - output: 40 | - meta: 41 | type: map 42 | description: | 43 | Groovy Map containing sample information 44 | e.g. [ id:'test', single_end:false ] 45 | - "*.${suffix}": 46 | type: file 47 | description: Output file from ilastik pixel classification. 48 | - versions: 49 | - versions.yml: 50 | type: file 51 | description: File containing software versions 52 | pattern: "versions.yml" 53 | authors: 54 | - "@FloWuenne" 55 | maintainers: 56 | - "@FloWuenne" 57 | -------------------------------------------------------------------------------- /modules/nf-core/mindagap/duplicatefinder/environment.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # yaml-language-server: $schema=https://raw.githubusercontent.com/nf-core/modules/master/modules/environment-schema.json 3 | channels: 4 | - conda-forge 5 | - bioconda 6 | dependencies: 7 | - bioconda::mindagap=0.0.2 8 | -------------------------------------------------------------------------------- /modules/nf-core/mindagap/duplicatefinder/main.nf: -------------------------------------------------------------------------------- 1 | process MINDAGAP_DUPLICATEFINDER { 2 | tag "$meta.id" 3 | label 'process_single' 4 | 5 | conda "bioconda::mindagap=0.0.2" 6 | container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? 7 | 'https://depot.galaxyproject.org/singularity/mindagap:0.0.2--pyhdfd78af_1': 8 | 'biocontainers/mindagap:0.0.2--pyhdfd78af_1' }" 9 | 10 | input: 11 | tuple val(meta), path(spot_table) 12 | 13 | output: 14 | tuple val(meta), path("*markedDups.txt"), emit: marked_dups_spots 15 | path "versions.yml" , emit: versions 16 | 17 | when: 18 | task.ext.when == null || task.ext.when 19 | 20 | script: 21 | def args = task.ext.args ?: '' 22 | def prefix = task.ext.prefix ?: "${meta.id}" 23 | """ 24 | duplicate_finder.py \\ 25 | $spot_table \\ 26 | $args 27 | 28 | cat <<-END_VERSIONS > versions.yml 29 | "${task.process}": 30 | mindagap: \$(mindagap.py test -v) 31 | END_VERSIONS 32 | """ 33 | } 34 | -------------------------------------------------------------------------------- /modules/nf-core/mindagap/duplicatefinder/meta.yml: -------------------------------------------------------------------------------- 1 | name: "MINDAGAP_DUPLICATEFINDER" 2 | description: marks duplicate spots along gridline edges. 3 | keywords: 4 | - imaging 5 | - resolve_bioscience 6 | - spatial_transcriptomics 7 | tools: 8 | - "mindagap": 9 | description: "Takes a single panorama image and fills the empty grid lines with 10 | neighbour-weighted values." 11 | homepage: "https://github.com/ViriatoII/MindaGap/blob/main/README.md" 12 | documentation: "https://github.com/ViriatoII/MindaGap/blob/main/README.md" 13 | tool_dev_url: "https://github.com/ViriatoII/MindaGap" 14 | licence: ["BSD 3-clause License"] 15 | identifier: "" 16 | 17 | input: 18 | - - meta: 19 | type: map 20 | description: | 21 | Groovy Map containing sample information 22 | e.g. [ id:'test' ] 23 | - spot_table: 24 | type: file 25 | description: tsv file containing one spot per row with order x,y,z,gene without 26 | column header. 27 | pattern: "*.{tsv,txt}" 28 | output: 29 | - marked_dups_spots: 30 | - meta: 31 | type: map 32 | description: | 33 | Groovy Map containing sample information 34 | e.g. [ id:'test' ] 35 | - "*markedDups.txt": 36 | type: file 37 | description: tsv file containing one spot per row, with duplicated spots labeled 38 | with "Duplicated" in their gene column. 39 | pattern: "*.{markedDups.txt}" 40 | - versions: 41 | - versions.yml: 42 | type: file 43 | description: File containing software versions 44 | pattern: "versions.yml" 45 | authors: 46 | - "@FloWuenne" 47 | maintainers: 48 | - "@FloWuenne" 49 | -------------------------------------------------------------------------------- /modules/nf-core/mindagap/duplicatefinder/tests/main.nf.test: -------------------------------------------------------------------------------- 1 | nextflow_process { 2 | 3 | name "Test Process MINDAGAP_DUPLICATEFINDER" 4 | script "../main.nf" 5 | config "./nextflow.config" 6 | process "MINDAGAP_DUPLICATEFINDER" 7 | tag "modules" 8 | tag "modules_nfcore" 9 | tag "mindagap" 10 | tag "mindagap/duplicatefinder" 11 | 12 | test("test_mindagap_duplicatefinder_spots") { 13 | 14 | when { 15 | process { 16 | """ 17 | input[0] = [ 18 | [ id:'test'], // meta map 19 | file('https://raw.githubusercontent.com/nf-core/test-datasets/molkart/test_data/input_data/spots.txt') 20 | ] 21 | """ 22 | } 23 | } 24 | 25 | then { 26 | assertAll( 27 | { assert process.success }, 28 | { assert snapshot(process.out).match() } 29 | ) 30 | } 31 | 32 | } 33 | 34 | } 35 | -------------------------------------------------------------------------------- /modules/nf-core/mindagap/duplicatefinder/tests/main.nf.test.snap: -------------------------------------------------------------------------------- 1 | { 2 | "test_mindagap_duplicatefinder_spots": { 3 | "content": [ 4 | { 5 | "0": [ 6 | [ 7 | { 8 | "id": "test" 9 | }, 10 | "spots_markedDups.txt:md5,4562caad05850d7dd7b6e9235e068a8b" 11 | ] 12 | ], 13 | "1": [ 14 | "versions.yml:md5,ae112b853ec32ee1c5eecaf421d01003" 15 | ], 16 | "marked_dups_spots": [ 17 | [ 18 | { 19 | "id": "test" 20 | }, 21 | "spots_markedDups.txt:md5,4562caad05850d7dd7b6e9235e068a8b" 22 | ] 23 | ], 24 | "versions": [ 25 | "versions.yml:md5,ae112b853ec32ee1c5eecaf421d01003" 26 | ] 27 | } 28 | ], 29 | "timestamp": "2023-11-30T22:56:20.101101751" 30 | } 31 | } -------------------------------------------------------------------------------- /modules/nf-core/mindagap/duplicatefinder/tests/nextflow.config: -------------------------------------------------------------------------------- 1 | process { 2 | 3 | withName: "MINDAGAP_DUPLICATEFINDER" { 4 | ext.args = '90' 5 | } 6 | 7 | } 8 | -------------------------------------------------------------------------------- /modules/nf-core/mindagap/duplicatefinder/tests/tags.yml: -------------------------------------------------------------------------------- 1 | mindagap/duplicatefinder: 2 | - modules/nf-core/mindagap/duplicatefinder/** 3 | -------------------------------------------------------------------------------- /modules/nf-core/mindagap/mindagap/environment.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # yaml-language-server: $schema=https://raw.githubusercontent.com/nf-core/modules/master/modules/environment-schema.json 3 | channels: 4 | - conda-forge 5 | - bioconda 6 | dependencies: 7 | - bioconda::mindagap=0.0.2 8 | -------------------------------------------------------------------------------- /modules/nf-core/mindagap/mindagap/main.nf: -------------------------------------------------------------------------------- 1 | process MINDAGAP_MINDAGAP { 2 | tag "$meta.id" 3 | label 'process_low' 4 | 5 | conda "${moduleDir}/environment.yml" 6 | container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? 7 | 'https://depot.galaxyproject.org/singularity/mindagap:0.0.2--pyhdfd78af_1' : 8 | 'biocontainers/mindagap:0.0.2--pyhdfd78af_1' }" 9 | 10 | input: 11 | tuple val(meta), path(panorama) 12 | 13 | output: 14 | tuple val(meta), path("*.{tif,tiff}"), emit: tiff 15 | path "versions.yml" , emit: versions 16 | 17 | when: 18 | task.ext.when == null || task.ext.when 19 | 20 | script: 21 | def args = task.ext.args ?: '' 22 | def prefix = task.ext.prefix ?: "${meta.id}" 23 | """ 24 | mindagap.py \\ 25 | $panorama \\ 26 | $args 27 | 28 | cat <<-END_VERSIONS > versions.yml 29 | "${task.process}": 30 | mindagap: \$(mindagap.py test -v) 31 | END_VERSIONS 32 | """ 33 | 34 | stub: 35 | def prefix = task.ext.prefix ?: "${meta.id}" 36 | """ 37 | touch ${panorama.baseName}_gridfilled.tiff 38 | 39 | cat <<-END_VERSIONS > versions.yml 40 | "${task.process}": 41 | mindagap: \$(mindagap.py test -v) 42 | END_VERSIONS 43 | """ 44 | } 45 | -------------------------------------------------------------------------------- /modules/nf-core/mindagap/mindagap/meta.yml: -------------------------------------------------------------------------------- 1 | name: "mindagap_mindagap" 2 | description: Takes a single panorama image and fills the empty grid lines with neighbour-weighted 3 | values. 4 | keywords: 5 | - imaging 6 | - resolve_bioscience 7 | - spatial_transcriptomics 8 | tools: 9 | - "mindagap": 10 | description: "Mindagap is a collection of tools to process multiplexed FISH data, 11 | such as produced by Resolve Biosciences Molecular Cartography." 12 | homepage: "https://github.com/ViriatoII/MindaGap" 13 | documentation: "https://github.com/ViriatoII/MindaGap/blob/main/README.md" 14 | tool_dev_url: "https://github.com/ViriatoII/MindaGap" 15 | licence: ["BSD-3-Clause license"] 16 | identifier: "" 17 | input: 18 | - - meta: 19 | type: map 20 | description: | 21 | Groovy Map containing sample information 22 | e.g. [ id:'test', single_end:false ] 23 | - panorama: 24 | type: file 25 | description: A tiff file containing gridlines as produced by Molecular Cartography 26 | imaging. 27 | pattern: "*.{tif,tiff}" 28 | output: 29 | - tiff: 30 | - meta: 31 | type: map 32 | description: | 33 | Groovy Map containing sample information 34 | e.g. [ id:'test', single_end:false ] 35 | - "*.{tif,tiff}": 36 | type: file 37 | description: A tiff file with gridlines filled based on consecutive gaussian 38 | blurring. 39 | pattern: "*.{tiff}" 40 | - versions: 41 | - versions.yml: 42 | type: file 43 | description: File containing software versions 44 | pattern: "versions.yml" 45 | authors: 46 | - "@ViriatoII" 47 | - "@flowuenne" 48 | maintainers: 49 | - "@ViriatoII" 50 | - "@flowuenne" 51 | -------------------------------------------------------------------------------- /modules/nf-core/mindagap/mindagap/tests/main.nf.test: -------------------------------------------------------------------------------- 1 | nextflow_process { 2 | 3 | name "Test Process MINDAGAP_MINDAGAP" 4 | script "../main.nf" 5 | config "./nextflow.config" 6 | process "MINDAGAP_MINDAGAP" 7 | 8 | tag "modules" 9 | tag "modules_nfcore" 10 | tag "mindagap" 11 | tag "mindagap/mindagap" 12 | 13 | test("mindgap - tiff") { 14 | 15 | when { 16 | process { 17 | """ 18 | input[0] = [ 19 | [ id:'test' ], 20 | file(params.test_data['imaging']['tiff']['mouse_heart_wga'], checkIfExists: true) 21 | ] 22 | """ 23 | } 24 | } 25 | 26 | then { 27 | assertAll( 28 | { assert process.success }, 29 | { assert snapshot(process.out.tiff).match("tiff") }, 30 | { assert snapshot(process.out.versions).match("versions") } 31 | ) 32 | } 33 | 34 | } 35 | 36 | } 37 | -------------------------------------------------------------------------------- /modules/nf-core/mindagap/mindagap/tests/main.nf.test.snap: -------------------------------------------------------------------------------- 1 | { 2 | "tiff": { 3 | "content": [ 4 | [ 5 | [ 6 | { 7 | "id": "test" 8 | }, 9 | "mindagap.mouse_heart.wga_gridfilled.tiff:md5,310cf0017baa54af32176b43a5b0adfd" 10 | ] 11 | ] 12 | ], 13 | "timestamp": "2023-12-15T11:01:20.825556802" 14 | }, 15 | "versions": { 16 | "content": [ 17 | [ 18 | "versions.yml:md5,937acaba2cb90efc2705b71839e6cefc" 19 | ] 20 | ], 21 | "timestamp": "2023-12-15T11:01:20.840211732" 22 | } 23 | } -------------------------------------------------------------------------------- /modules/nf-core/mindagap/mindagap/tests/nextflow.config: -------------------------------------------------------------------------------- 1 | process { 2 | 3 | withName: "MINDAGAP_MINDAGAP" { 4 | ext.args = "3 40 --Xtilesize 2144" 5 | } 6 | } 7 | -------------------------------------------------------------------------------- /modules/nf-core/mindagap/mindagap/tests/tags.yml: -------------------------------------------------------------------------------- 1 | mindagap/mindagap: 2 | - "modules/nf-core/mindagap/mindagap/**" 3 | -------------------------------------------------------------------------------- /modules/nf-core/multiqc/environment.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # yaml-language-server: $schema=https://raw.githubusercontent.com/nf-core/modules/master/modules/environment-schema.json 3 | channels: 4 | - conda-forge 5 | - bioconda 6 | dependencies: 7 | - bioconda::multiqc=1.27 8 | -------------------------------------------------------------------------------- /modules/nf-core/multiqc/main.nf: -------------------------------------------------------------------------------- 1 | process MULTIQC { 2 | label 'process_single' 3 | 4 | conda "${moduleDir}/environment.yml" 5 | container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? 6 | 'https://depot.galaxyproject.org/singularity/multiqc:1.27--pyhdfd78af_0' : 7 | 'biocontainers/multiqc:1.27--pyhdfd78af_0' }" 8 | 9 | input: 10 | path multiqc_files, stageAs: "?/*" 11 | path(multiqc_config) 12 | path(extra_multiqc_config) 13 | path(multiqc_logo) 14 | path(replace_names) 15 | path(sample_names) 16 | 17 | output: 18 | path "*multiqc_report.html", emit: report 19 | path "*_data" , emit: data 20 | path "*_plots" , optional:true, emit: plots 21 | path "versions.yml" , emit: versions 22 | 23 | when: 24 | task.ext.when == null || task.ext.when 25 | 26 | script: 27 | def args = task.ext.args ?: '' 28 | def prefix = task.ext.prefix ? "--filename ${task.ext.prefix}.html" : '' 29 | def config = multiqc_config ? "--config $multiqc_config" : '' 30 | def extra_config = extra_multiqc_config ? "--config $extra_multiqc_config" : '' 31 | def logo = multiqc_logo ? "--cl-config 'custom_logo: \"${multiqc_logo}\"'" : '' 32 | def replace = replace_names ? "--replace-names ${replace_names}" : '' 33 | def samples = sample_names ? "--sample-names ${sample_names}" : '' 34 | """ 35 | multiqc \\ 36 | --force \\ 37 | $args \\ 38 | $config \\ 39 | $prefix \\ 40 | $extra_config \\ 41 | $logo \\ 42 | $replace \\ 43 | $samples \\ 44 | . 45 | 46 | cat <<-END_VERSIONS > versions.yml 47 | "${task.process}": 48 | multiqc: \$( multiqc --version | sed -e "s/multiqc, version //g" ) 49 | END_VERSIONS 50 | """ 51 | 52 | stub: 53 | """ 54 | mkdir multiqc_data 55 | mkdir multiqc_plots 56 | touch multiqc_report.html 57 | 58 | cat <<-END_VERSIONS > versions.yml 59 | "${task.process}": 60 | multiqc: \$( multiqc --version | sed -e "s/multiqc, version //g" ) 61 | END_VERSIONS 62 | """ 63 | } 64 | -------------------------------------------------------------------------------- /modules/nf-core/multiqc/meta.yml: -------------------------------------------------------------------------------- 1 | name: multiqc 2 | description: Aggregate results from bioinformatics analyses across many samples into 3 | a single report 4 | keywords: 5 | - QC 6 | - bioinformatics tools 7 | - Beautiful stand-alone HTML report 8 | tools: 9 | - multiqc: 10 | description: | 11 | MultiQC searches a given directory for analysis logs and compiles a HTML report. 12 | It's a general use tool, perfect for summarising the output from numerous bioinformatics tools. 13 | homepage: https://multiqc.info/ 14 | documentation: https://multiqc.info/docs/ 15 | licence: ["GPL-3.0-or-later"] 16 | identifier: biotools:multiqc 17 | input: 18 | - - multiqc_files: 19 | type: file 20 | description: | 21 | List of reports / files recognised by MultiQC, for example the html and zip output of FastQC 22 | - - multiqc_config: 23 | type: file 24 | description: Optional config yml for MultiQC 25 | pattern: "*.{yml,yaml}" 26 | - - extra_multiqc_config: 27 | type: file 28 | description: Second optional config yml for MultiQC. Will override common sections 29 | in multiqc_config. 30 | pattern: "*.{yml,yaml}" 31 | - - multiqc_logo: 32 | type: file 33 | description: Optional logo file for MultiQC 34 | pattern: "*.{png}" 35 | - - replace_names: 36 | type: file 37 | description: | 38 | Optional two-column sample renaming file. First column a set of 39 | patterns, second column a set of corresponding replacements. Passed via 40 | MultiQC's `--replace-names` option. 41 | pattern: "*.{tsv}" 42 | - - sample_names: 43 | type: file 44 | description: | 45 | Optional TSV file with headers, passed to the MultiQC --sample_names 46 | argument. 47 | pattern: "*.{tsv}" 48 | output: 49 | - report: 50 | - "*multiqc_report.html": 51 | type: file 52 | description: MultiQC report file 53 | pattern: "multiqc_report.html" 54 | - data: 55 | - "*_data": 56 | type: directory 57 | description: MultiQC data dir 58 | pattern: "multiqc_data" 59 | - plots: 60 | - "*_plots": 61 | type: file 62 | description: Plots created by MultiQC 63 | pattern: "*_data" 64 | - versions: 65 | - versions.yml: 66 | type: file 67 | description: File containing software versions 68 | pattern: "versions.yml" 69 | authors: 70 | - "@abhi18av" 71 | - "@bunop" 72 | - "@drpatelh" 73 | - "@jfy133" 74 | maintainers: 75 | - "@abhi18av" 76 | - "@bunop" 77 | - "@drpatelh" 78 | - "@jfy133" 79 | -------------------------------------------------------------------------------- /modules/nf-core/multiqc/tests/main.nf.test: -------------------------------------------------------------------------------- 1 | nextflow_process { 2 | 3 | name "Test Process MULTIQC" 4 | script "../main.nf" 5 | process "MULTIQC" 6 | 7 | tag "modules" 8 | tag "modules_nfcore" 9 | tag "multiqc" 10 | 11 | config "./nextflow.config" 12 | 13 | test("sarscov2 single-end [fastqc]") { 14 | 15 | when { 16 | process { 17 | """ 18 | input[0] = Channel.of(file(params.modules_testdata_base_path + 'genomics/sarscov2/illumina/fastqc/test_fastqc.zip', checkIfExists: true)) 19 | input[1] = [] 20 | input[2] = [] 21 | input[3] = [] 22 | input[4] = [] 23 | input[5] = [] 24 | """ 25 | } 26 | } 27 | 28 | then { 29 | assertAll( 30 | { assert process.success }, 31 | { assert process.out.report[0] ==~ ".*/multiqc_report.html" }, 32 | { assert process.out.data[0] ==~ ".*/multiqc_data" }, 33 | { assert snapshot(process.out.versions).match("multiqc_versions_single") } 34 | ) 35 | } 36 | 37 | } 38 | 39 | test("sarscov2 single-end [fastqc] [config]") { 40 | 41 | when { 42 | process { 43 | """ 44 | input[0] = Channel.of(file(params.modules_testdata_base_path + 'genomics/sarscov2/illumina/fastqc/test_fastqc.zip', checkIfExists: true)) 45 | input[1] = Channel.of(file("https://github.com/nf-core/tools/raw/dev/nf_core/pipeline-template/assets/multiqc_config.yml", checkIfExists: true)) 46 | input[2] = [] 47 | input[3] = [] 48 | input[4] = [] 49 | input[5] = [] 50 | """ 51 | } 52 | } 53 | 54 | then { 55 | assertAll( 56 | { assert process.success }, 57 | { assert process.out.report[0] ==~ ".*/multiqc_report.html" }, 58 | { assert process.out.data[0] ==~ ".*/multiqc_data" }, 59 | { assert snapshot(process.out.versions).match("multiqc_versions_config") } 60 | ) 61 | } 62 | } 63 | 64 | test("sarscov2 single-end [fastqc] - stub") { 65 | 66 | options "-stub" 67 | 68 | when { 69 | process { 70 | """ 71 | input[0] = Channel.of(file(params.modules_testdata_base_path + 'genomics/sarscov2/illumina/fastqc/test_fastqc.zip', checkIfExists: true)) 72 | input[1] = [] 73 | input[2] = [] 74 | input[3] = [] 75 | input[4] = [] 76 | input[5] = [] 77 | """ 78 | } 79 | } 80 | 81 | then { 82 | assertAll( 83 | { assert process.success }, 84 | { assert snapshot(process.out.report.collect { file(it).getName() } + 85 | process.out.data.collect { file(it).getName() } + 86 | process.out.plots.collect { file(it).getName() } + 87 | process.out.versions ).match("multiqc_stub") } 88 | ) 89 | } 90 | 91 | } 92 | } 93 | -------------------------------------------------------------------------------- /modules/nf-core/multiqc/tests/main.nf.test.snap: -------------------------------------------------------------------------------- 1 | { 2 | "multiqc_versions_single": { 3 | "content": [ 4 | [ 5 | "versions.yml:md5,8f3b8c1cec5388cf2708be948c9fa42f" 6 | ] 7 | ], 8 | "meta": { 9 | "nf-test": "0.9.2", 10 | "nextflow": "24.10.4" 11 | }, 12 | "timestamp": "2025-01-27T09:29:57.631982377" 13 | }, 14 | "multiqc_stub": { 15 | "content": [ 16 | [ 17 | "multiqc_report.html", 18 | "multiqc_data", 19 | "multiqc_plots", 20 | "versions.yml:md5,8f3b8c1cec5388cf2708be948c9fa42f" 21 | ] 22 | ], 23 | "meta": { 24 | "nf-test": "0.9.2", 25 | "nextflow": "24.10.4" 26 | }, 27 | "timestamp": "2025-01-27T09:30:34.743726958" 28 | }, 29 | "multiqc_versions_config": { 30 | "content": [ 31 | [ 32 | "versions.yml:md5,8f3b8c1cec5388cf2708be948c9fa42f" 33 | ] 34 | ], 35 | "meta": { 36 | "nf-test": "0.9.2", 37 | "nextflow": "24.10.4" 38 | }, 39 | "timestamp": "2025-01-27T09:30:21.44383553" 40 | } 41 | } -------------------------------------------------------------------------------- /modules/nf-core/multiqc/tests/nextflow.config: -------------------------------------------------------------------------------- 1 | process { 2 | withName: 'MULTIQC' { 3 | ext.prefix = null 4 | } 5 | } 6 | -------------------------------------------------------------------------------- /modules/nf-core/multiqc/tests/tags.yml: -------------------------------------------------------------------------------- 1 | multiqc: 2 | - modules/nf-core/multiqc/** 3 | -------------------------------------------------------------------------------- /modules/nf-core/stardist/environment.yml: -------------------------------------------------------------------------------- 1 | channels: 2 | - conda-forge 3 | - bioconda 4 | 5 | dependencies: 6 | - python=3.9 7 | - stardist==0.9.1 8 | - tensorflow==2.10.0 9 | - tifffile<2022.4.22 10 | -------------------------------------------------------------------------------- /modules/nf-core/stardist/main.nf: -------------------------------------------------------------------------------- 1 | process STARDIST { 2 | tag "$meta.id" 3 | label 'process_medium' 4 | 5 | conda "${moduleDir}/environment.yml" 6 | container "ghcr.io/schapirolabor/stardist:0.9.1" 7 | 8 | input: 9 | tuple val(meta), path(image) 10 | 11 | output: 12 | tuple val(meta), path("*.stardist.tif"), emit: mask 13 | path "versions.yml" , emit: versions 14 | 15 | when: 16 | task.ext.when == null || task.ext.when 17 | 18 | script: 19 | def args = task.ext.args ?: '' 20 | def prefix = task.ext.prefix ?: "${meta.id}" 21 | 22 | """ 23 | stardist-predict2d \\ 24 | -i $image \\ 25 | -o . \\ 26 | $args 27 | 28 | cat <<-END_VERSIONS > versions.yml 29 | "${task.process}": 30 | stardist: \$( python -m pip show --version stardist | grep "Version" | sed -e "s/Version: //g" ) 31 | python: \$( python --version | sed -e "s/Python //g" ) 32 | tensorflow: \$( python -m pip show --version tensorflow | grep "Version" | sed -e "s/Version: //g" ) 33 | tifffile: \$( python -m pip show --version tifffile | grep "Version" | sed -e "s/Version: //g" ) 34 | END_VERSIONS 35 | """ 36 | 37 | stub: 38 | def args = task.ext.args ?: '' 39 | def prefix = task.ext.prefix ?: "${meta.id}" 40 | """ 41 | touch ${prefix}.stardist.tif 42 | 43 | cat <<-END_VERSIONS > versions.yml 44 | "${task.process}": 45 | stardist: \$( python -m pip show --version stardist | grep "Version" | sed -e "s/Version: //g" ) 46 | python: \$( python --version | sed -e "s/Python //g" ) 47 | tensorflow: \$( python -m pip show --version tensorflow | grep "Version" | sed -e "s/Version: //g" ) 48 | tifffile: \$( python -m pip show --version tifffile | grep "Version" | sed -e "s/Version: //g" ) 49 | END_VERSIONS 50 | """ 51 | } 52 | -------------------------------------------------------------------------------- /modules/nf-core/stardist/meta.yml: -------------------------------------------------------------------------------- 1 | name: "stardist" 2 | description: Cell and nuclear segmentation with star-convex shapes 3 | keywords: 4 | - stardist 5 | - segmentation 6 | - image 7 | tools: 8 | - "stardist": 9 | description: "Stardist is an cell segmentation tool developed in Python by Martin 10 | Weigert and Uwe Schmidt" 11 | homepage: "https://stardist.net/" 12 | documentation: "https://stardist.net/faq/" 13 | tool_dev_url: "https://github.com/stardist/stardist" 14 | doi: "10.1109/ISBIC56247.2022.9854534" 15 | licence: ["BSD 3-Clause"] 16 | identifier: "" 17 | 18 | input: 19 | - - meta: 20 | type: map 21 | description: | 22 | Groovy Map containing sample information 23 | e.g. `[ id:'sample1' ]` 24 | - image: 25 | type: file 26 | description: Single channel nuclear image 27 | pattern: "*.{tiff,tif}" 28 | output: 29 | - mask: 30 | - meta: 31 | type: map 32 | description: | 33 | Groovy Map containing sample information 34 | e.g. `[ id:'sample1' ]` 35 | - "*.stardist.tif": 36 | type: file 37 | description: labelled mask output from stardist in tif format. 38 | pattern: "*.{tiff,tif}" 39 | - versions: 40 | - versions.yml: 41 | type: file 42 | description: File containing software versions 43 | pattern: "versions.yml" 44 | authors: 45 | - "@migueLib" 46 | maintainers: 47 | - "@migueLib" 48 | -------------------------------------------------------------------------------- /modules/nf-core/stardist/tests/main.nf.test: -------------------------------------------------------------------------------- 1 | nextflow_process { 2 | name "Test Process STARDIST" 3 | script "../main.nf" 4 | process "STARDIST" 5 | config "./nextflow.config" 6 | 7 | tag "modules" 8 | tag "modules_nfcore" 9 | tag "stardist" 10 | 11 | test("stardist2d - tif") { 12 | 13 | when { 14 | process { 15 | """ 16 | input[0] = [ 17 | [ id:'test' ], // meta map 18 | file(params.modules_testdata_base_path + 'imaging/segmentation/nuclear_image.tif', checkIfExists: true) 19 | ] 20 | """ 21 | } 22 | } 23 | 24 | then { 25 | assertAll( 26 | { assert process.success }, 27 | { assert snapshot(process.out).match() } 28 | ) 29 | } 30 | 31 | } 32 | 33 | test("stardist2d - tif - stub") { 34 | 35 | options "-stub" 36 | 37 | when { 38 | process { 39 | """ 40 | input[0] = [ 41 | [ id:'test' ], // meta map 42 | file(params.modules_testdata_base_path + 'imaging/segmentation/nuclear_image.tif', checkIfExists: true) 43 | ] 44 | """ 45 | } 46 | } 47 | 48 | then { 49 | assertAll( 50 | { assert process.success }, 51 | { assert snapshot(process.out).match() } 52 | ) 53 | } 54 | 55 | } 56 | 57 | } 58 | -------------------------------------------------------------------------------- /modules/nf-core/stardist/tests/main.nf.test.snap: -------------------------------------------------------------------------------- 1 | { 2 | "stardist2d - tif - stub": { 3 | "content": [ 4 | { 5 | "0": [ 6 | [ 7 | { 8 | "id": "test" 9 | }, 10 | "test.stardist.tif:md5,d41d8cd98f00b204e9800998ecf8427e" 11 | ] 12 | ], 13 | "1": [ 14 | "versions.yml:md5,2cf7aba2a90e1053a51e1d58ff25a4bf" 15 | ], 16 | "mask": [ 17 | [ 18 | { 19 | "id": "test" 20 | }, 21 | "test.stardist.tif:md5,d41d8cd98f00b204e9800998ecf8427e" 22 | ] 23 | ], 24 | "versions": [ 25 | "versions.yml:md5,2cf7aba2a90e1053a51e1d58ff25a4bf" 26 | ] 27 | } 28 | ], 29 | "meta": { 30 | "nf-test": "0.9.0", 31 | "nextflow": "24.04.4" 32 | }, 33 | "timestamp": "2024-08-08T14:31:08.559997122" 34 | }, 35 | "stardist2d - tif": { 36 | "content": [ 37 | { 38 | "0": [ 39 | [ 40 | { 41 | "id": "test" 42 | }, 43 | "nuclear_image.stardist.tif:md5,9d0f8f98554c914dd55b9ea8818fbc63" 44 | ] 45 | ], 46 | "1": [ 47 | "versions.yml:md5,2cf7aba2a90e1053a51e1d58ff25a4bf" 48 | ], 49 | "mask": [ 50 | [ 51 | { 52 | "id": "test" 53 | }, 54 | "nuclear_image.stardist.tif:md5,9d0f8f98554c914dd55b9ea8818fbc63" 55 | ] 56 | ], 57 | "versions": [ 58 | "versions.yml:md5,2cf7aba2a90e1053a51e1d58ff25a4bf" 59 | ] 60 | } 61 | ], 62 | "meta": { 63 | "nf-test": "0.9.0", 64 | "nextflow": "24.04.4" 65 | }, 66 | "timestamp": "2024-08-08T14:30:57.525654244" 67 | } 68 | } -------------------------------------------------------------------------------- /modules/nf-core/stardist/tests/nextflow.config: -------------------------------------------------------------------------------- 1 | process { 2 | withName: "STARDIST" { 3 | ext.args = '-m 2D_versatile_fluo' 4 | } 5 | } 6 | -------------------------------------------------------------------------------- /nf-test.config: -------------------------------------------------------------------------------- 1 | config { 2 | 3 | testsDir "tests" 4 | workDir System.getenv("NFT_WORKDIR") ?: ".nf-test" 5 | configFile "" 6 | profile "" 7 | 8 | // Include plugins 9 | plugins { 10 | load "nft-utils@0.0.3" 11 | } 12 | 13 | } 14 | -------------------------------------------------------------------------------- /subworkflows/nf-core/utils_nextflow_pipeline/main.nf: -------------------------------------------------------------------------------- 1 | // 2 | // Subworkflow with functionality that may be useful for any Nextflow pipeline 3 | // 4 | 5 | /* 6 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 7 | SUBWORKFLOW DEFINITION 8 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 9 | */ 10 | 11 | workflow UTILS_NEXTFLOW_PIPELINE { 12 | take: 13 | print_version // boolean: print version 14 | dump_parameters // boolean: dump parameters 15 | outdir // path: base directory used to publish pipeline results 16 | check_conda_channels // boolean: check conda channels 17 | 18 | main: 19 | 20 | // 21 | // Print workflow version and exit on --version 22 | // 23 | if (print_version) { 24 | log.info("${workflow.manifest.name} ${getWorkflowVersion()}") 25 | System.exit(0) 26 | } 27 | 28 | // 29 | // Dump pipeline parameters to a JSON file 30 | // 31 | if (dump_parameters && outdir) { 32 | dumpParametersToJSON(outdir) 33 | } 34 | 35 | // 36 | // When running with Conda, warn if channels have not been set-up appropriately 37 | // 38 | if (check_conda_channels) { 39 | checkCondaChannels() 40 | } 41 | 42 | emit: 43 | dummy_emit = true 44 | } 45 | 46 | /* 47 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 48 | FUNCTIONS 49 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 50 | */ 51 | 52 | // 53 | // Generate version string 54 | // 55 | def getWorkflowVersion() { 56 | def version_string = "" as String 57 | if (workflow.manifest.version) { 58 | def prefix_v = workflow.manifest.version[0] != 'v' ? 'v' : '' 59 | version_string += "${prefix_v}${workflow.manifest.version}" 60 | } 61 | 62 | if (workflow.commitId) { 63 | def git_shortsha = workflow.commitId.substring(0, 7) 64 | version_string += "-g${git_shortsha}" 65 | } 66 | 67 | return version_string 68 | } 69 | 70 | // 71 | // Dump pipeline parameters to a JSON file 72 | // 73 | def dumpParametersToJSON(outdir) { 74 | def timestamp = new java.util.Date().format('yyyy-MM-dd_HH-mm-ss') 75 | def filename = "params_${timestamp}.json" 76 | def temp_pf = new File(workflow.launchDir.toString(), ".${filename}") 77 | def jsonStr = groovy.json.JsonOutput.toJson(params) 78 | temp_pf.text = groovy.json.JsonOutput.prettyPrint(jsonStr) 79 | 80 | nextflow.extension.FilesEx.copyTo(temp_pf.toPath(), "${outdir}/pipeline_info/params_${timestamp}.json") 81 | temp_pf.delete() 82 | } 83 | 84 | // 85 | // When running with -profile conda, warn if channels have not been set-up appropriately 86 | // 87 | def checkCondaChannels() { 88 | def parser = new org.yaml.snakeyaml.Yaml() 89 | def channels = [] 90 | try { 91 | def config = parser.load("conda config --show channels".execute().text) 92 | channels = config.channels 93 | } 94 | catch (NullPointerException e) { 95 | log.debug(e) 96 | log.warn("Could not verify conda channel configuration.") 97 | return null 98 | } 99 | catch (IOException e) { 100 | log.debug(e) 101 | log.warn("Could not verify conda channel configuration.") 102 | return null 103 | } 104 | 105 | // Check that all channels are present 106 | // This channel list is ordered by required channel priority. 107 | def required_channels_in_order = ['conda-forge', 'bioconda'] 108 | def channels_missing = ((required_channels_in_order as Set) - (channels as Set)) as Boolean 109 | 110 | // Check that they are in the right order 111 | def channel_priority_violation = required_channels_in_order != channels.findAll { ch -> ch in required_channels_in_order } 112 | 113 | if (channels_missing | channel_priority_violation) { 114 | log.warn """\ 115 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 116 | There is a problem with your Conda configuration! 117 | You will need to set-up the conda-forge and bioconda channels correctly. 118 | Please refer to https://bioconda.github.io/ 119 | The observed channel order is 120 | ${channels} 121 | but the following channel order is required: 122 | ${required_channels_in_order} 123 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~" 124 | """.stripIndent(true) 125 | } 126 | } 127 | -------------------------------------------------------------------------------- /subworkflows/nf-core/utils_nextflow_pipeline/meta.yml: -------------------------------------------------------------------------------- 1 | # yaml-language-server: $schema=https://raw.githubusercontent.com/nf-core/modules/master/subworkflows/yaml-schema.json 2 | name: "UTILS_NEXTFLOW_PIPELINE" 3 | description: Subworkflow with functionality that may be useful for any Nextflow pipeline 4 | keywords: 5 | - utility 6 | - pipeline 7 | - initialise 8 | - version 9 | components: [] 10 | input: 11 | - print_version: 12 | type: boolean 13 | description: | 14 | Print the version of the pipeline and exit 15 | - dump_parameters: 16 | type: boolean 17 | description: | 18 | Dump the parameters of the pipeline to a JSON file 19 | - output_directory: 20 | type: directory 21 | description: Path to output dir to write JSON file to. 22 | pattern: "results/" 23 | - check_conda_channel: 24 | type: boolean 25 | description: | 26 | Check if the conda channel priority is correct. 27 | output: 28 | - dummy_emit: 29 | type: boolean 30 | description: | 31 | Dummy emit to make nf-core subworkflows lint happy 32 | authors: 33 | - "@adamrtalbot" 34 | - "@drpatelh" 35 | maintainers: 36 | - "@adamrtalbot" 37 | - "@drpatelh" 38 | - "@maxulysse" 39 | -------------------------------------------------------------------------------- /subworkflows/nf-core/utils_nextflow_pipeline/tests/main.function.nf.test: -------------------------------------------------------------------------------- 1 | 2 | nextflow_function { 3 | 4 | name "Test Functions" 5 | script "subworkflows/nf-core/utils_nextflow_pipeline/main.nf" 6 | config "subworkflows/nf-core/utils_nextflow_pipeline/tests/nextflow.config" 7 | tag 'subworkflows' 8 | tag 'utils_nextflow_pipeline' 9 | tag 'subworkflows/utils_nextflow_pipeline' 10 | 11 | test("Test Function getWorkflowVersion") { 12 | 13 | function "getWorkflowVersion" 14 | 15 | then { 16 | assertAll( 17 | { assert function.success }, 18 | { assert snapshot(function.result).match() } 19 | ) 20 | } 21 | } 22 | 23 | test("Test Function dumpParametersToJSON") { 24 | 25 | function "dumpParametersToJSON" 26 | 27 | when { 28 | function { 29 | """ 30 | // define inputs of the function here. Example: 31 | input[0] = "$outputDir" 32 | """.stripIndent() 33 | } 34 | } 35 | 36 | then { 37 | assertAll( 38 | { assert function.success } 39 | ) 40 | } 41 | } 42 | 43 | test("Test Function checkCondaChannels") { 44 | 45 | function "checkCondaChannels" 46 | 47 | then { 48 | assertAll( 49 | { assert function.success }, 50 | { assert snapshot(function.result).match() } 51 | ) 52 | } 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /subworkflows/nf-core/utils_nextflow_pipeline/tests/main.function.nf.test.snap: -------------------------------------------------------------------------------- 1 | { 2 | "Test Function getWorkflowVersion": { 3 | "content": [ 4 | "v9.9.9" 5 | ], 6 | "meta": { 7 | "nf-test": "0.8.4", 8 | "nextflow": "23.10.1" 9 | }, 10 | "timestamp": "2024-02-28T12:02:05.308243" 11 | }, 12 | "Test Function checkCondaChannels": { 13 | "content": null, 14 | "meta": { 15 | "nf-test": "0.8.4", 16 | "nextflow": "23.10.1" 17 | }, 18 | "timestamp": "2024-02-28T12:02:12.425833" 19 | } 20 | } -------------------------------------------------------------------------------- /subworkflows/nf-core/utils_nextflow_pipeline/tests/main.workflow.nf.test: -------------------------------------------------------------------------------- 1 | nextflow_workflow { 2 | 3 | name "Test Workflow UTILS_NEXTFLOW_PIPELINE" 4 | script "../main.nf" 5 | config "subworkflows/nf-core/utils_nextflow_pipeline/tests/nextflow.config" 6 | workflow "UTILS_NEXTFLOW_PIPELINE" 7 | tag 'subworkflows' 8 | tag 'utils_nextflow_pipeline' 9 | tag 'subworkflows/utils_nextflow_pipeline' 10 | 11 | test("Should run no inputs") { 12 | 13 | when { 14 | workflow { 15 | """ 16 | print_version = false 17 | dump_parameters = false 18 | outdir = null 19 | check_conda_channels = false 20 | 21 | input[0] = print_version 22 | input[1] = dump_parameters 23 | input[2] = outdir 24 | input[3] = check_conda_channels 25 | """ 26 | } 27 | } 28 | 29 | then { 30 | assertAll( 31 | { assert workflow.success } 32 | ) 33 | } 34 | } 35 | 36 | test("Should print version") { 37 | 38 | when { 39 | workflow { 40 | """ 41 | print_version = true 42 | dump_parameters = false 43 | outdir = null 44 | check_conda_channels = false 45 | 46 | input[0] = print_version 47 | input[1] = dump_parameters 48 | input[2] = outdir 49 | input[3] = check_conda_channels 50 | """ 51 | } 52 | } 53 | 54 | then { 55 | expect { 56 | with(workflow) { 57 | assert success 58 | assert "nextflow_workflow v9.9.9" in stdout 59 | } 60 | } 61 | } 62 | } 63 | 64 | test("Should dump params") { 65 | 66 | when { 67 | workflow { 68 | """ 69 | print_version = false 70 | dump_parameters = true 71 | outdir = 'results' 72 | check_conda_channels = false 73 | 74 | input[0] = false 75 | input[1] = true 76 | input[2] = outdir 77 | input[3] = false 78 | """ 79 | } 80 | } 81 | 82 | then { 83 | assertAll( 84 | { assert workflow.success } 85 | ) 86 | } 87 | } 88 | 89 | test("Should not create params JSON if no output directory") { 90 | 91 | when { 92 | workflow { 93 | """ 94 | print_version = false 95 | dump_parameters = true 96 | outdir = null 97 | check_conda_channels = false 98 | 99 | input[0] = false 100 | input[1] = true 101 | input[2] = outdir 102 | input[3] = false 103 | """ 104 | } 105 | } 106 | 107 | then { 108 | assertAll( 109 | { assert workflow.success } 110 | ) 111 | } 112 | } 113 | } 114 | -------------------------------------------------------------------------------- /subworkflows/nf-core/utils_nextflow_pipeline/tests/nextflow.config: -------------------------------------------------------------------------------- 1 | manifest { 2 | name = 'nextflow_workflow' 3 | author = """nf-core""" 4 | homePage = 'https://127.0.0.1' 5 | description = """Dummy pipeline""" 6 | nextflowVersion = '!>=23.04.0' 7 | version = '9.9.9' 8 | doi = 'https://doi.org/10.5281/zenodo.5070524' 9 | } 10 | -------------------------------------------------------------------------------- /subworkflows/nf-core/utils_nextflow_pipeline/tests/tags.yml: -------------------------------------------------------------------------------- 1 | subworkflows/utils_nextflow_pipeline: 2 | - subworkflows/nf-core/utils_nextflow_pipeline/** 3 | -------------------------------------------------------------------------------- /subworkflows/nf-core/utils_nfcore_pipeline/meta.yml: -------------------------------------------------------------------------------- 1 | # yaml-language-server: $schema=https://raw.githubusercontent.com/nf-core/modules/master/subworkflows/yaml-schema.json 2 | name: "UTILS_NFCORE_PIPELINE" 3 | description: Subworkflow with utility functions specific to the nf-core pipeline template 4 | keywords: 5 | - utility 6 | - pipeline 7 | - initialise 8 | - version 9 | components: [] 10 | input: 11 | - nextflow_cli_args: 12 | type: list 13 | description: | 14 | Nextflow CLI positional arguments 15 | output: 16 | - success: 17 | type: boolean 18 | description: | 19 | Dummy output to indicate success 20 | authors: 21 | - "@adamrtalbot" 22 | maintainers: 23 | - "@adamrtalbot" 24 | - "@maxulysse" 25 | -------------------------------------------------------------------------------- /subworkflows/nf-core/utils_nfcore_pipeline/tests/main.function.nf.test: -------------------------------------------------------------------------------- 1 | 2 | nextflow_function { 3 | 4 | name "Test Functions" 5 | script "../main.nf" 6 | config "subworkflows/nf-core/utils_nfcore_pipeline/tests/nextflow.config" 7 | tag "subworkflows" 8 | tag "subworkflows_nfcore" 9 | tag "utils_nfcore_pipeline" 10 | tag "subworkflows/utils_nfcore_pipeline" 11 | 12 | test("Test Function checkConfigProvided") { 13 | 14 | function "checkConfigProvided" 15 | 16 | then { 17 | assertAll( 18 | { assert function.success }, 19 | { assert snapshot(function.result).match() } 20 | ) 21 | } 22 | } 23 | 24 | test("Test Function checkProfileProvided") { 25 | 26 | function "checkProfileProvided" 27 | 28 | when { 29 | function { 30 | """ 31 | input[0] = [] 32 | """ 33 | } 34 | } 35 | 36 | then { 37 | assertAll( 38 | { assert function.success }, 39 | { assert snapshot(function.result).match() } 40 | ) 41 | } 42 | } 43 | 44 | test("Test Function without logColours") { 45 | 46 | function "logColours" 47 | 48 | when { 49 | function { 50 | """ 51 | input[0] = true 52 | """ 53 | } 54 | } 55 | 56 | then { 57 | assertAll( 58 | { assert function.success }, 59 | { assert snapshot(function.result).match() } 60 | ) 61 | } 62 | } 63 | 64 | test("Test Function with logColours") { 65 | function "logColours" 66 | 67 | when { 68 | function { 69 | """ 70 | input[0] = false 71 | """ 72 | } 73 | } 74 | 75 | then { 76 | assertAll( 77 | { assert function.success }, 78 | { assert snapshot(function.result).match() } 79 | ) 80 | } 81 | } 82 | 83 | test("Test Function getSingleReport with a single file") { 84 | function "getSingleReport" 85 | 86 | when { 87 | function { 88 | """ 89 | input[0] = file(params.modules_testdata_base_path + '/generic/tsv/test.tsv', checkIfExists: true) 90 | """ 91 | } 92 | } 93 | 94 | then { 95 | assertAll( 96 | { assert function.success }, 97 | { assert function.result.contains("test.tsv") } 98 | ) 99 | } 100 | } 101 | 102 | test("Test Function getSingleReport with multiple files") { 103 | function "getSingleReport" 104 | 105 | when { 106 | function { 107 | """ 108 | input[0] = [ 109 | file(params.modules_testdata_base_path + '/generic/tsv/test.tsv', checkIfExists: true), 110 | file(params.modules_testdata_base_path + '/generic/tsv/network.tsv', checkIfExists: true), 111 | file(params.modules_testdata_base_path + '/generic/tsv/expression.tsv', checkIfExists: true) 112 | ] 113 | """ 114 | } 115 | } 116 | 117 | then { 118 | assertAll( 119 | { assert function.success }, 120 | { assert function.result.contains("test.tsv") }, 121 | { assert !function.result.contains("network.tsv") }, 122 | { assert !function.result.contains("expression.tsv") } 123 | ) 124 | } 125 | } 126 | } 127 | -------------------------------------------------------------------------------- /subworkflows/nf-core/utils_nfcore_pipeline/tests/main.function.nf.test.snap: -------------------------------------------------------------------------------- 1 | { 2 | "Test Function checkProfileProvided": { 3 | "content": null, 4 | "meta": { 5 | "nf-test": "0.8.4", 6 | "nextflow": "23.10.1" 7 | }, 8 | "timestamp": "2024-02-28T12:03:03.360873" 9 | }, 10 | "Test Function checkConfigProvided": { 11 | "content": [ 12 | true 13 | ], 14 | "meta": { 15 | "nf-test": "0.8.4", 16 | "nextflow": "23.10.1" 17 | }, 18 | "timestamp": "2024-02-28T12:02:59.729647" 19 | }, 20 | "Test Function without logColours": { 21 | "content": [ 22 | { 23 | "reset": "", 24 | "bold": "", 25 | "dim": "", 26 | "underlined": "", 27 | "blink": "", 28 | "reverse": "", 29 | "hidden": "", 30 | "black": "", 31 | "red": "", 32 | "green": "", 33 | "yellow": "", 34 | "blue": "", 35 | "purple": "", 36 | "cyan": "", 37 | "white": "", 38 | "bblack": "", 39 | "bred": "", 40 | "bgreen": "", 41 | "byellow": "", 42 | "bblue": "", 43 | "bpurple": "", 44 | "bcyan": "", 45 | "bwhite": "", 46 | "ublack": "", 47 | "ured": "", 48 | "ugreen": "", 49 | "uyellow": "", 50 | "ublue": "", 51 | "upurple": "", 52 | "ucyan": "", 53 | "uwhite": "", 54 | "iblack": "", 55 | "ired": "", 56 | "igreen": "", 57 | "iyellow": "", 58 | "iblue": "", 59 | "ipurple": "", 60 | "icyan": "", 61 | "iwhite": "", 62 | "biblack": "", 63 | "bired": "", 64 | "bigreen": "", 65 | "biyellow": "", 66 | "biblue": "", 67 | "bipurple": "", 68 | "bicyan": "", 69 | "biwhite": "" 70 | } 71 | ], 72 | "meta": { 73 | "nf-test": "0.8.4", 74 | "nextflow": "23.10.1" 75 | }, 76 | "timestamp": "2024-02-28T12:03:17.969323" 77 | }, 78 | "Test Function with logColours": { 79 | "content": [ 80 | { 81 | "reset": "\u001b[0m", 82 | "bold": "\u001b[1m", 83 | "dim": "\u001b[2m", 84 | "underlined": "\u001b[4m", 85 | "blink": "\u001b[5m", 86 | "reverse": "\u001b[7m", 87 | "hidden": "\u001b[8m", 88 | "black": "\u001b[0;30m", 89 | "red": "\u001b[0;31m", 90 | "green": "\u001b[0;32m", 91 | "yellow": "\u001b[0;33m", 92 | "blue": "\u001b[0;34m", 93 | "purple": "\u001b[0;35m", 94 | "cyan": "\u001b[0;36m", 95 | "white": "\u001b[0;37m", 96 | "bblack": "\u001b[1;30m", 97 | "bred": "\u001b[1;31m", 98 | "bgreen": "\u001b[1;32m", 99 | "byellow": "\u001b[1;33m", 100 | "bblue": "\u001b[1;34m", 101 | "bpurple": "\u001b[1;35m", 102 | "bcyan": "\u001b[1;36m", 103 | "bwhite": "\u001b[1;37m", 104 | "ublack": "\u001b[4;30m", 105 | "ured": "\u001b[4;31m", 106 | "ugreen": "\u001b[4;32m", 107 | "uyellow": "\u001b[4;33m", 108 | "ublue": "\u001b[4;34m", 109 | "upurple": "\u001b[4;35m", 110 | "ucyan": "\u001b[4;36m", 111 | "uwhite": "\u001b[4;37m", 112 | "iblack": "\u001b[0;90m", 113 | "ired": "\u001b[0;91m", 114 | "igreen": "\u001b[0;92m", 115 | "iyellow": "\u001b[0;93m", 116 | "iblue": "\u001b[0;94m", 117 | "ipurple": "\u001b[0;95m", 118 | "icyan": "\u001b[0;96m", 119 | "iwhite": "\u001b[0;97m", 120 | "biblack": "\u001b[1;90m", 121 | "bired": "\u001b[1;91m", 122 | "bigreen": "\u001b[1;92m", 123 | "biyellow": "\u001b[1;93m", 124 | "biblue": "\u001b[1;94m", 125 | "bipurple": "\u001b[1;95m", 126 | "bicyan": "\u001b[1;96m", 127 | "biwhite": "\u001b[1;97m" 128 | } 129 | ], 130 | "meta": { 131 | "nf-test": "0.8.4", 132 | "nextflow": "23.10.1" 133 | }, 134 | "timestamp": "2024-02-28T12:03:21.714424" 135 | } 136 | } -------------------------------------------------------------------------------- /subworkflows/nf-core/utils_nfcore_pipeline/tests/main.workflow.nf.test: -------------------------------------------------------------------------------- 1 | nextflow_workflow { 2 | 3 | name "Test Workflow UTILS_NFCORE_PIPELINE" 4 | script "../main.nf" 5 | config "subworkflows/nf-core/utils_nfcore_pipeline/tests/nextflow.config" 6 | workflow "UTILS_NFCORE_PIPELINE" 7 | tag "subworkflows" 8 | tag "subworkflows_nfcore" 9 | tag "utils_nfcore_pipeline" 10 | tag "subworkflows/utils_nfcore_pipeline" 11 | 12 | test("Should run without failures") { 13 | 14 | when { 15 | workflow { 16 | """ 17 | input[0] = [] 18 | """ 19 | } 20 | } 21 | 22 | then { 23 | assertAll( 24 | { assert workflow.success }, 25 | { assert snapshot(workflow.out).match() } 26 | ) 27 | } 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /subworkflows/nf-core/utils_nfcore_pipeline/tests/main.workflow.nf.test.snap: -------------------------------------------------------------------------------- 1 | { 2 | "Should run without failures": { 3 | "content": [ 4 | { 5 | "0": [ 6 | true 7 | ], 8 | "valid_config": [ 9 | true 10 | ] 11 | } 12 | ], 13 | "meta": { 14 | "nf-test": "0.8.4", 15 | "nextflow": "23.10.1" 16 | }, 17 | "timestamp": "2024-02-28T12:03:25.726491" 18 | } 19 | } -------------------------------------------------------------------------------- /subworkflows/nf-core/utils_nfcore_pipeline/tests/nextflow.config: -------------------------------------------------------------------------------- 1 | manifest { 2 | name = 'nextflow_workflow' 3 | author = """nf-core""" 4 | homePage = 'https://127.0.0.1' 5 | description = """Dummy pipeline""" 6 | nextflowVersion = '!>=23.04.0' 7 | version = '9.9.9' 8 | doi = 'https://doi.org/10.5281/zenodo.5070524' 9 | } 10 | -------------------------------------------------------------------------------- /subworkflows/nf-core/utils_nfcore_pipeline/tests/tags.yml: -------------------------------------------------------------------------------- 1 | subworkflows/utils_nfcore_pipeline: 2 | - subworkflows/nf-core/utils_nfcore_pipeline/** 3 | -------------------------------------------------------------------------------- /subworkflows/nf-core/utils_nfschema_plugin/main.nf: -------------------------------------------------------------------------------- 1 | // 2 | // Subworkflow that uses the nf-schema plugin to validate parameters and render the parameter summary 3 | // 4 | 5 | include { paramsSummaryLog } from 'plugin/nf-schema' 6 | include { validateParameters } from 'plugin/nf-schema' 7 | 8 | workflow UTILS_NFSCHEMA_PLUGIN { 9 | 10 | take: 11 | input_workflow // workflow: the workflow object used by nf-schema to get metadata from the workflow 12 | validate_params // boolean: validate the parameters 13 | parameters_schema // string: path to the parameters JSON schema. 14 | // this has to be the same as the schema given to `validation.parametersSchema` 15 | // when this input is empty it will automatically use the configured schema or 16 | // "${projectDir}/nextflow_schema.json" as default. This input should not be empty 17 | // for meta pipelines 18 | 19 | main: 20 | 21 | // 22 | // Print parameter summary to stdout. This will display the parameters 23 | // that differ from the default given in the JSON schema 24 | // 25 | if(parameters_schema) { 26 | log.info paramsSummaryLog(input_workflow, parameters_schema:parameters_schema) 27 | } else { 28 | log.info paramsSummaryLog(input_workflow) 29 | } 30 | 31 | // 32 | // Validate the parameters using nextflow_schema.json or the schema 33 | // given via the validation.parametersSchema configuration option 34 | // 35 | if(validate_params) { 36 | if(parameters_schema) { 37 | validateParameters(parameters_schema:parameters_schema) 38 | } else { 39 | validateParameters() 40 | } 41 | } 42 | 43 | emit: 44 | dummy_emit = true 45 | } 46 | 47 | -------------------------------------------------------------------------------- /subworkflows/nf-core/utils_nfschema_plugin/meta.yml: -------------------------------------------------------------------------------- 1 | # yaml-language-server: $schema=https://raw.githubusercontent.com/nf-core/modules/master/subworkflows/yaml-schema.json 2 | name: "utils_nfschema_plugin" 3 | description: Run nf-schema to validate parameters and create a summary of changed parameters 4 | keywords: 5 | - validation 6 | - JSON schema 7 | - plugin 8 | - parameters 9 | - summary 10 | components: [] 11 | input: 12 | - input_workflow: 13 | type: object 14 | description: | 15 | The workflow object of the used pipeline. 16 | This object contains meta data used to create the params summary log 17 | - validate_params: 18 | type: boolean 19 | description: Validate the parameters and error if invalid. 20 | - parameters_schema: 21 | type: string 22 | description: | 23 | Path to the parameters JSON schema. 24 | This has to be the same as the schema given to the `validation.parametersSchema` config 25 | option. When this input is empty it will automatically use the configured schema or 26 | "${projectDir}/nextflow_schema.json" as default. The schema should not be given in this way 27 | for meta pipelines. 28 | output: 29 | - dummy_emit: 30 | type: boolean 31 | description: Dummy emit to make nf-core subworkflows lint happy 32 | authors: 33 | - "@nvnieuwk" 34 | maintainers: 35 | - "@nvnieuwk" 36 | -------------------------------------------------------------------------------- /subworkflows/nf-core/utils_nfschema_plugin/tests/main.nf.test: -------------------------------------------------------------------------------- 1 | nextflow_workflow { 2 | 3 | name "Test Subworkflow UTILS_NFSCHEMA_PLUGIN" 4 | script "../main.nf" 5 | workflow "UTILS_NFSCHEMA_PLUGIN" 6 | 7 | tag "subworkflows" 8 | tag "subworkflows_nfcore" 9 | tag "subworkflows/utils_nfschema_plugin" 10 | tag "plugin/nf-schema" 11 | 12 | config "./nextflow.config" 13 | 14 | test("Should run nothing") { 15 | 16 | when { 17 | 18 | params { 19 | test_data = '' 20 | } 21 | 22 | workflow { 23 | """ 24 | validate_params = false 25 | input[0] = workflow 26 | input[1] = validate_params 27 | input[2] = "" 28 | """ 29 | } 30 | } 31 | 32 | then { 33 | assertAll( 34 | { assert workflow.success } 35 | ) 36 | } 37 | } 38 | 39 | test("Should validate params") { 40 | 41 | when { 42 | 43 | params { 44 | test_data = '' 45 | outdir = null 46 | } 47 | 48 | workflow { 49 | """ 50 | validate_params = true 51 | input[0] = workflow 52 | input[1] = validate_params 53 | input[2] = "" 54 | """ 55 | } 56 | } 57 | 58 | then { 59 | assertAll( 60 | { assert workflow.failed }, 61 | { assert workflow.stdout.any { it.contains('ERROR ~ Validation of pipeline parameters failed!') } } 62 | ) 63 | } 64 | } 65 | 66 | test("Should run nothing - custom schema") { 67 | 68 | when { 69 | 70 | params { 71 | test_data = '' 72 | } 73 | 74 | workflow { 75 | """ 76 | validate_params = false 77 | input[0] = workflow 78 | input[1] = validate_params 79 | input[2] = "${projectDir}/subworkflows/nf-core/utils_nfschema_plugin/tests/nextflow_schema.json" 80 | """ 81 | } 82 | } 83 | 84 | then { 85 | assertAll( 86 | { assert workflow.success } 87 | ) 88 | } 89 | } 90 | 91 | test("Should validate params - custom schema") { 92 | 93 | when { 94 | 95 | params { 96 | test_data = '' 97 | outdir = null 98 | } 99 | 100 | workflow { 101 | """ 102 | validate_params = true 103 | input[0] = workflow 104 | input[1] = validate_params 105 | input[2] = "${projectDir}/subworkflows/nf-core/utils_nfschema_plugin/tests/nextflow_schema.json" 106 | """ 107 | } 108 | } 109 | 110 | then { 111 | assertAll( 112 | { assert workflow.failed }, 113 | { assert workflow.stdout.any { it.contains('ERROR ~ Validation of pipeline parameters failed!') } } 114 | ) 115 | } 116 | } 117 | } 118 | -------------------------------------------------------------------------------- /subworkflows/nf-core/utils_nfschema_plugin/tests/nextflow.config: -------------------------------------------------------------------------------- 1 | plugins { 2 | id "nf-schema@2.1.0" 3 | } 4 | 5 | validation { 6 | parametersSchema = "${projectDir}/subworkflows/nf-core/utils_nfschema_plugin/tests/nextflow_schema.json" 7 | monochromeLogs = true 8 | } -------------------------------------------------------------------------------- /subworkflows/nf-core/utils_nfschema_plugin/tests/nextflow_schema.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://json-schema.org/draft/2020-12/schema", 3 | "$id": "https://raw.githubusercontent.com/./master/nextflow_schema.json", 4 | "title": ". pipeline parameters", 5 | "description": "", 6 | "type": "object", 7 | "$defs": { 8 | "input_output_options": { 9 | "title": "Input/output options", 10 | "type": "object", 11 | "fa_icon": "fas fa-terminal", 12 | "description": "Define where the pipeline should find input data and save output data.", 13 | "required": ["outdir"], 14 | "properties": { 15 | "validate_params": { 16 | "type": "boolean", 17 | "description": "Validate parameters?", 18 | "default": true, 19 | "hidden": true 20 | }, 21 | "outdir": { 22 | "type": "string", 23 | "format": "directory-path", 24 | "description": "The output directory where the results will be saved. You have to use absolute paths to storage on Cloud infrastructure.", 25 | "fa_icon": "fas fa-folder-open" 26 | }, 27 | "test_data_base": { 28 | "type": "string", 29 | "default": "https://raw.githubusercontent.com/nf-core/test-datasets/modules", 30 | "description": "Base for test data directory", 31 | "hidden": true 32 | }, 33 | "test_data": { 34 | "type": "string", 35 | "description": "Fake test data param", 36 | "hidden": true 37 | } 38 | } 39 | }, 40 | "generic_options": { 41 | "title": "Generic options", 42 | "type": "object", 43 | "fa_icon": "fas fa-file-import", 44 | "description": "Less common options for the pipeline, typically set in a config file.", 45 | "help_text": "These options are common to all nf-core pipelines and allow you to customise some of the core preferences for how the pipeline runs.\n\nTypically these options would be set in a Nextflow config file loaded for all pipeline runs, such as `~/.nextflow/config`.", 46 | "properties": { 47 | "help": { 48 | "type": "boolean", 49 | "description": "Display help text.", 50 | "fa_icon": "fas fa-question-circle", 51 | "hidden": true 52 | }, 53 | "version": { 54 | "type": "boolean", 55 | "description": "Display version and exit.", 56 | "fa_icon": "fas fa-question-circle", 57 | "hidden": true 58 | }, 59 | "logo": { 60 | "type": "boolean", 61 | "default": true, 62 | "description": "Display nf-core logo in console output.", 63 | "fa_icon": "fas fa-image", 64 | "hidden": true 65 | }, 66 | "singularity_pull_docker_container": { 67 | "type": "boolean", 68 | "description": "Pull Singularity container from Docker?", 69 | "hidden": true 70 | }, 71 | "publish_dir_mode": { 72 | "type": "string", 73 | "default": "copy", 74 | "description": "Method used to save pipeline results to output directory.", 75 | "help_text": "The Nextflow `publishDir` option specifies which intermediate files should be saved to the output directory. This option tells the pipeline what method should be used to move these files. See [Nextflow docs](https://www.nextflow.io/docs/latest/process.html#publishdir) for details.", 76 | "fa_icon": "fas fa-copy", 77 | "enum": ["symlink", "rellink", "link", "copy", "copyNoFollow", "move"], 78 | "hidden": true 79 | }, 80 | "monochrome_logs": { 81 | "type": "boolean", 82 | "description": "Use monochrome_logs", 83 | "hidden": true 84 | } 85 | } 86 | } 87 | }, 88 | "allOf": [ 89 | { 90 | "$ref": "#/$defs/input_output_options" 91 | }, 92 | { 93 | "$ref": "#/$defs/generic_options" 94 | } 95 | ] 96 | } 97 | -------------------------------------------------------------------------------- /tests/.nftignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | pipeline_info/*.{html,json,txt,yml} 3 | multiqc/multiqc_data/multiqc_data.json 4 | multiqc/multiqc_data/multiqc.log 5 | multiqc/multiqc_plots/pdf/* 6 | multiqc/multiqc_plots/png/* 7 | multiqc/multiqc_plots/svg/* 8 | multiqc/multiqc_report.html 9 | multiqc/crop_overview.png 10 | multiqc/crop_overview.txt 11 | clahe/*.tiff 12 | stack/*.tif 13 | molkartqc/crop_overview.png 14 | training_subset/hdf5/* 15 | training_subset/tiff/* 16 | -------------------------------------------------------------------------------- /tests/main.nf.test: -------------------------------------------------------------------------------- 1 | nextflow_pipeline { 2 | 3 | name "Test Workflow main.nf" 4 | script "../main.nf" 5 | tag "pipeline" 6 | tag "pipeline_molkart" 7 | 8 | test("Nuclear channel, stardist, mesmer and cellpose, without clahe") { 9 | 10 | when { 11 | params { 12 | input = 'https://raw.githubusercontent.com/nf-core/test-datasets/molkart/test_data/samplesheets/samplesheet_nuclear.csv' 13 | outdir = "$outputDir" 14 | skip_clahe = true 15 | mindagap_tilesize = 90 16 | mindagap_boxsize = 7 17 | mindagap_loopnum = 100 18 | segmentation_method = "cellpose,mesmer,stardist" 19 | } 20 | } 21 | 22 | then { 23 | // stable_name: All files + folders in ${params.outdir}/ with a stable name 24 | def stable_name = getAllFilesFromDir(params.outdir, relative: true, includeDir: true, ignore: ['pipeline_info/*.{html,json,txt}']) 25 | // stable_path: All files in ${params.outdir}/ with stable content 26 | def stable_path = getAllFilesFromDir(params.outdir, ignoreFile: 'tests/.nftignore') 27 | assert workflow.success 28 | assertAll( 29 | { assert workflow.success}, 30 | { assert snapshot( 31 | // Number of successful tasks 32 | workflow.trace.succeeded().size(), 33 | // pipeline versions.yml file for multiqc from which Nextflow version is removed because we tests pipelines on multiple Nextflow versions 34 | removeNextflowVersion("$outputDir/pipeline_info/nf_core_molkart_software_mqc_versions.yml"), 35 | // All stable path name, with a relative path 36 | stable_name, 37 | // All files with stable contents 38 | stable_path 39 | ).match() } 40 | ) 41 | } 42 | } 43 | 44 | test("Two channels, mesmer and cellpose, with clahe") { 45 | 46 | when { 47 | params { 48 | input = 'https://raw.githubusercontent.com/nf-core/test-datasets/molkart/test_data/samplesheets/samplesheet_membrane.csv' 49 | outdir = "$outputDir" 50 | mindagap_tilesize = 90 51 | mindagap_boxsize = 7 52 | mindagap_loopnum = 100 53 | clahe_pyramid_tile = 368 54 | segmentation_method = "cellpose,mesmer" 55 | } 56 | } 57 | 58 | then { 59 | // stable_name: All files + folders in ${params.outdir}/ with a stable name 60 | def stable_name = getAllFilesFromDir(params.outdir, relative: true, includeDir: true, ignore: ['pipeline_info/*.{html,json,txt}']) 61 | // stable_path: All files in ${params.outdir}/ with stable content 62 | def stable_path = getAllFilesFromDir(params.outdir, ignoreFile: 'tests/.nftignore') 63 | assert workflow.success 64 | assertAll( 65 | { assert workflow.success}, 66 | { assert snapshot( 67 | // Number of successful tasks 68 | workflow.trace.succeeded().size(), 69 | // pipeline versions.yml file for multiqc from which Nextflow version is removed because we tests pipelines on multiple Nextflow versions 70 | removeNextflowVersion("$outputDir/pipeline_info/nf_core_molkart_software_mqc_versions.yml"), 71 | // All stable path name, with a relative path 72 | stable_name, 73 | // All files with stable contents 74 | stable_path 75 | ).match() } 76 | ) 77 | } 78 | } 79 | 80 | test("Skip mindagap - clahe - cellpose") { 81 | 82 | when { 83 | params { 84 | input = 'https://raw.githubusercontent.com/nf-core/test-datasets/molkart/test_data/samplesheets/samplesheet_membrane.csv' 85 | outdir = "$outputDir" 86 | clahe_pyramid_tile = 368 87 | segmentation_method = "cellpose" 88 | skip_mindagap = true 89 | } 90 | } 91 | 92 | then { 93 | // stable_name: All files + folders in ${params.outdir}/ with a stable name 94 | def stable_name = getAllFilesFromDir(params.outdir, relative: true, includeDir: true, ignore: ['pipeline_info/*.{html,json,txt}']) 95 | // stable_path: All files in ${params.outdir}/ with stable content 96 | def stable_path = getAllFilesFromDir(params.outdir, ignoreFile: 'tests/.nftignore') 97 | assert workflow.success 98 | assertAll( 99 | { assert workflow.success}, 100 | { assert snapshot( 101 | // Number of successful tasks 102 | workflow.trace.succeeded().size(), 103 | // pipeline versions.yml file for multiqc from which Nextflow version is removed because we tests pipelines on multiple Nextflow versions 104 | removeNextflowVersion("$outputDir/pipeline_info/nf_core_molkart_software_mqc_versions.yml"), 105 | // All stable path name, with a relative path 106 | stable_name, 107 | // All files with stable contents 108 | stable_path 109 | ).match() } 110 | ) 111 | } 112 | } 113 | 114 | test("Create training subset") { 115 | 116 | when { 117 | params { 118 | input = 'https://raw.githubusercontent.com/nf-core/test-datasets/molkart/test_data/samplesheets/samplesheet_nuclear.csv' 119 | outdir = "$outputDir" 120 | mindagap_tilesize = 90 121 | mindagap_boxsize = 7 122 | mindagap_loopnum = 100 123 | clahe_pyramid_tile = 368 124 | create_training_subset = true 125 | crop_amount = 2 126 | crop_size_x = 10 127 | crop_size_y = 10 128 | } 129 | } 130 | 131 | then { 132 | // stable_name: All files + folders in ${params.outdir}/ with a stable name 133 | def stable_name = getAllFilesFromDir(params.outdir, relative: true, includeDir: true, ignore: ['pipeline_info/*.{html,json,txt}']) 134 | // stable_path: All files in ${params.outdir}/ with stable content 135 | def stable_path = getAllFilesFromDir(params.outdir, ignoreFile: 'tests/.nftignore') 136 | assert workflow.success 137 | assertAll( 138 | { assert workflow.success}, 139 | { assert snapshot( 140 | // Number of successful tasks 141 | workflow.trace.succeeded().size(), 142 | // pipeline versions.yml file for multiqc from which Nextflow version is removed because we tests pipelines on multiple Nextflow versions 143 | removeNextflowVersion("$outputDir/pipeline_info/nf_core_molkart_software_mqc_versions.yml"), 144 | // All stable path name, with a relative path 145 | stable_name, 146 | // All files with stable contents 147 | stable_path 148 | ).match() } 149 | ) 150 | } 151 | } 152 | } 153 | -------------------------------------------------------------------------------- /tests/tags.yml: -------------------------------------------------------------------------------- 1 | default: 2 | - bin/** 3 | - conf/** 4 | - lib/** 5 | - modules/** 6 | - subworkflows/** 7 | - tests/** 8 | - workflows/** 9 | - nextflow.config 10 | - main.nf 11 | -------------------------------------------------------------------------------- /tower.yml: -------------------------------------------------------------------------------- 1 | reports: 2 | multiqc_report.html: 3 | display: "MultiQC HTML report" 4 | final_QC.all_samples.csv: 5 | display: "QC metrics across all samples and segmentation methods" 6 | crop_overview.txt: 7 | display: "Crop overview if training subset is created" 8 | --------------------------------------------------------------------------------