Data was processed using nf-core/spatialxe v${workflow.manifest.version} ${doi_text} of the nf-core collection of workflows (Ewels et al., 2020).
11 |
12 |
13 |

14 |
15 |
nf-core/spatialxe v${version}
16 |
Run Name: $runName
17 |
18 | <% if (!success){
19 | out << """
20 |
21 |
nf-core/spatialxe execution completed unsuccessfully!
22 |
The exit status of the task that caused the workflow execution to fail was: $exitStatus.
23 |
The full error message was:
24 |
${errorReport}
25 |
26 | """
27 | } else {
28 | out << """
29 |
30 | nf-core/spatialxe execution completed successfully!
31 |
32 | """
33 | }
34 | %>
35 |
36 |
The workflow was completed at $dateComplete (duration: $duration)
37 |
The command used to launch the workflow was as follows:
38 |
$commandLine
39 |
40 |
Pipeline Configuration:
41 |
42 |
43 | <% out << summary.collect{ k,v -> "| $k | $v |
" }.join("\n") %>
44 |
45 |
46 |
47 |
nf-core/spatialxe
48 |
https://github.com/nf-core/spatialxe
49 |
50 |
51 |
52 |
53 |
54 |
--------------------------------------------------------------------------------
/modules/nf-core/cellpose/tests/main.nf.test.snap:
--------------------------------------------------------------------------------
1 | {
2 | "flows": {
3 | "content": [
4 | [
5 | [
6 | {
7 | "id": "test"
8 | },
9 | "cycif_tonsil_registered.ome_flows.tif:md5,de79a792d4bebd2f9753ceb47a0de5f7"
10 | ]
11 | ]
12 | ],
13 | "meta": {
14 | "nf-test": "0.8.4",
15 | "nextflow": "23.10.1"
16 | },
17 | "timestamp": "2024-03-18T14:22:16.855256249"
18 | },
19 | "versions": {
20 | "content": [
21 | [
22 | "versions.yml:md5,ce42208b574084f390cf58b4c19b5717"
23 | ]
24 | ],
25 | "meta": {
26 | "nf-test": "0.8.4",
27 | "nextflow": "23.10.1"
28 | },
29 | "timestamp": "2024-03-18T14:22:16.875087557"
30 | },
31 | "cellpose - stub": {
32 | "content": [
33 | {
34 | "0": [
35 | [
36 | {
37 | "id": "test"
38 | },
39 | "cycif_tonsil_registered.ome_cp_masks.tif:md5,d41d8cd98f00b204e9800998ecf8427e"
40 | ]
41 | ],
42 | "1": [
43 |
44 | ],
45 | "2": [
46 | "versions.yml:md5,ce42208b574084f390cf58b4c19b5717"
47 | ],
48 | "flows": [
49 |
50 | ],
51 | "mask": [
52 | [
53 | {
54 | "id": "test"
55 | },
56 | "cycif_tonsil_registered.ome_cp_masks.tif:md5,d41d8cd98f00b204e9800998ecf8427e"
57 | ]
58 | ],
59 | "versions": [
60 | "versions.yml:md5,ce42208b574084f390cf58b4c19b5717"
61 | ]
62 | }
63 | ],
64 | "meta": {
65 | "nf-test": "0.8.4",
66 | "nextflow": "23.10.1"
67 | },
68 | "timestamp": "2024-03-18T14:22:39.339792992"
69 | },
70 | "mask": {
71 | "content": [
72 | [
73 | [
74 | {
75 | "id": "test"
76 | },
77 | "cycif_tonsil_registered.ome_cp_masks.tif:md5,001ad312413f18bc2615741bd3ad12cf"
78 | ]
79 | ]
80 | ],
81 | "meta": {
82 | "nf-test": "0.8.4",
83 | "nextflow": "23.10.1"
84 | },
85 | "timestamp": "2024-03-18T14:22:16.8369758"
86 | }
87 | }
--------------------------------------------------------------------------------
/modules/local/proseg/meta.yml:
--------------------------------------------------------------------------------
1 | name: "proseg"
2 | description: Probabilistic cell segmentation for in situ spatial transcriptomics
3 | keywords:
4 | - segmentation
5 | - cell segmentation
6 | - spatialomics
7 | - probabilistic segmentation
8 | - in situ spatial transcriptomics
9 | tools:
10 | - "proseg":
11 | description: "Proseg (probabilistic segmentation) is a cell segmentation method for in situ spatial transcriptomics. Xenium, CosMx, and MERSCOPE platforms are currently supported."
12 | homepage: "https://github.com/dcjones/proseg/tree/main"
13 | documentation: "https://github.com/dcjones/proseg/blob/main/README.md"
14 | tool_dev_url: "https://github.com/dcjones/proseg"
15 | doi: ""
16 | licence: ["GNU Public License"]
17 |
18 | input:
19 | - - meta:
20 | type: map
21 | description: |
22 | Groovy Map containing run information
23 | e.g. `[ id:'run_id']`
24 | - transcripts:
25 | type: file
26 | description: |
27 | File containing the transcript position
28 | pattern: "transcripts.csv.gz"
29 |
30 | output:
31 | - - meta:
32 | type: map
33 | description: |
34 | Groovy Map containing run information
35 | e.g. `[ id:'run_id']`
36 | - cell_polygons:
37 | type: file
38 | description: 2D polygons for each cell in GeoJSON format. These are flattened from 3D
39 | pattern: "cell-polygons.geojson.gz"
40 | - - expected_counts:
41 | type: file
42 | description: cell-by-gene count matrix
43 | pattern: "expected-counts.csv.gz"
44 | - - cell_metadata:
45 | type: file
46 | description: Cell centroids, volume, and other information
47 | pattern: "cell-metadata.csv.gz"
48 | - - transcript_metadata:
49 | type: file
50 | description: Transcript ids, genes, revised positions, assignment probability
51 | pattern: "transcript-metadata.csv.gz"
52 | - - gene_metadata:
53 | type: file
54 | description: Per-gene summary statistics
55 | pattern: "gene-metadata.csv.gz"
56 | - - rates:
57 | type: file
58 | description: Cell-by-gene Poisson rate parameters
59 | pattern: "rates.csv.gz"
60 | - - cell_polygon_layers:
61 | type: file
62 | description: A separate, non-overlapping cell polygon for each z-layer, preserving 3D segmentation
63 | pattern: "cell-polygons-layers.geojson.gz"
64 | - - cell_hulls:
65 | type: file
66 | description: Convex hulls around assigned transcripts
67 | pattern: "cell-hulls.geojson.gz"
68 | - - versions:
69 | type: file
70 | description: File containing software versions
71 | pattern: "versions.yml"
72 |
73 | authors:
74 | - "@khersameesh24"
75 | maintainers:
76 | - "@khersameesh24"
77 |
--------------------------------------------------------------------------------
/modules/nf-core/xeniumranger/relabel/tests/main.nf.test.snap:
--------------------------------------------------------------------------------
1 | {
2 | "xeniumranger relabel": {
3 | "content": [
4 | [
5 | "versions.yml:md5,ab2584177544560d5a9e9c36f7d24354"
6 | ],
7 | [
8 | "dispersion.csv:md5,e8b1abb880ece8fb730ce34a15f958b4",
9 | "features_selected.csv:md5,c5e32d69f001f938ed316d2108a21e00",
10 | "cell_boundaries.csv.gz:md5,8b4f2aa455a6fb14b2669a42db32ea7e",
11 | "cell_boundaries.parquet:md5,e55d6a7fbec336103994baad8c8e4a9a",
12 | "cell_feature_matrix.h5:md5,96cb400f1b1dd6f8796daea0ad5c74e6",
13 | "barcodes.tsv.gz:md5,04ea06796d6b28517c288904ca043582",
14 | "features.tsv.gz:md5,7862242129681900a9cc4086dc83b62e",
15 | "matrix.mtx.gz:md5,489f86fbd8d65d6b973bb9cc7c5a76f1",
16 | "cells.csv.gz:md5,3cef2d7cc8cfba1d47bdb7c65c3d5d5f",
17 | "cells.parquet:md5,9b30b35ab961d2d243a1426e8dc980fe",
18 | "cells.zarr.zip:md5,556e47d5b14150239b10b2f801defa2b",
19 | "gene_panel.json:md5,8890dd5fd90706e751554ac3fdfdedde",
20 | "morphology.ome.tif:md5,6b65fff28a38a001b8f25061737fbf9b",
21 | "morphology_focus_0000.ome.tif:md5,90e796ad634d14e62cf2ebcadf2eaf98",
22 | "nucleus_boundaries.csv.gz:md5,e417b6e293298870956d42c7106cbd0c",
23 | "nucleus_boundaries.parquet:md5,bacbfc3c2e956d899e1d8ccba5dd7c5e",
24 | "transcripts.parquet:md5,c0f40d5c61b87404bc9efb84ff0563a8"
25 | ]
26 | ],
27 | "meta": {
28 | "nf-test": "0.9.0",
29 | "nextflow": "24.04.4"
30 | },
31 | "timestamp": "2024-10-29T21:06:09.082129"
32 | },
33 | "xeniumranger relabel stub": {
34 | "content": [
35 | {
36 | "0": [
37 | [
38 | {
39 | "id": "test_xeniumranger_relabel"
40 | },
41 | "fake_file.txt:md5,d41d8cd98f00b204e9800998ecf8427e"
42 | ]
43 | ],
44 | "1": [
45 | "versions.yml:md5,ab2584177544560d5a9e9c36f7d24354"
46 | ],
47 | "outs": [
48 | [
49 | {
50 | "id": "test_xeniumranger_relabel"
51 | },
52 | "fake_file.txt:md5,d41d8cd98f00b204e9800998ecf8427e"
53 | ]
54 | ],
55 | "versions": [
56 | "versions.yml:md5,ab2584177544560d5a9e9c36f7d24354"
57 | ]
58 | }
59 | ],
60 | "meta": {
61 | "nf-test": "0.9.0",
62 | "nextflow": "24.04.4"
63 | },
64 | "timestamp": "2024-10-22T15:22:34.353444"
65 | }
66 | }
--------------------------------------------------------------------------------
/assets/adaptivecard.json:
--------------------------------------------------------------------------------
1 | {
2 | "type": "message",
3 | "attachments": [
4 | {
5 | "contentType": "application/vnd.microsoft.card.adaptive",
6 | "contentUrl": null,
7 | "content": {
8 | "\$schema": "http://adaptivecards.io/schemas/adaptive-card.json",
9 | "msteams": {
10 | "width": "Full"
11 | },
12 | "type": "AdaptiveCard",
13 | "version": "1.2",
14 | "body": [
15 | {
16 | "type": "TextBlock",
17 | "size": "Large",
18 | "weight": "Bolder",
19 | "color": "<% if (success) { %>Good<% } else { %>Attention<%} %>",
20 | "text": "nf-core/spatialxe v${version} - ${runName}",
21 | "wrap": true
22 | },
23 | {
24 | "type": "TextBlock",
25 | "spacing": "None",
26 | "text": "Completed at ${dateComplete} (duration: ${duration})",
27 | "isSubtle": true,
28 | "wrap": true
29 | },
30 | {
31 | "type": "TextBlock",
32 | "text": "<% if (success) { %>Pipeline completed successfully!<% } else { %>Pipeline completed with errors. The full error message was: ${errorReport}.<% } %>",
33 | "wrap": true
34 | },
35 | {
36 | "type": "TextBlock",
37 | "text": "The command used to launch the workflow was as follows:",
38 | "wrap": true
39 | },
40 | {
41 | "type": "TextBlock",
42 | "text": "${commandLine}",
43 | "isSubtle": true,
44 | "wrap": true
45 | }
46 | ],
47 | "actions": [
48 | {
49 | "type": "Action.ShowCard",
50 | "title": "Pipeline Configuration",
51 | "card": {
52 | "type": "AdaptiveCard",
53 | "\$schema": "http://adaptivecards.io/schemas/adaptive-card.json",
54 | "body": [
55 | {
56 | "type": "FactSet",
57 | "facts": [<% out << summary.collect{ k,v -> "{\"title\": \"$k\", \"value\" : \"$v\"}"}.join(",\n") %>
58 | ]
59 | }
60 | ]
61 | }
62 | }
63 | ]
64 | }
65 | }
66 | ]
67 | }
68 |
--------------------------------------------------------------------------------
/modules/local/proseg/tests/main.nf.test:
--------------------------------------------------------------------------------
1 | nextflow_process {
2 |
3 | name "Test Process PROSEG"
4 | script "../main.nf"
5 | process "PROSEG"
6 |
7 | tag "modules"
8 | tag "modules_nfcore"
9 | tag "proseg"
10 | tag "segmentation"
11 | tag "cell_segmentation"
12 |
13 |
14 | setup {
15 | run("UNZIP") {
16 | script "modules/nf-core/unzip/main.nf"
17 | process {
18 | """
19 | input[0] = [[], file('https://raw.githubusercontent.com/nf-core/test-datasets/spatialxe/Xenium_Prime_Mouse_Ileum_tiny_outs.zip', checkIfExists: true)]
20 | """
21 | }
22 | }
23 | }
24 |
25 | test("proseg - transcripts.csv") {
26 |
27 | when {
28 | process {
29 | """
30 | input[0] = Channel.of([
31 | [id: "test_run_proseg"],
32 | ]).combine(UNZIP.out.unzipped_archive.map { it[1] } + "/transcripts.csv")
33 | """
34 | }
35 | }
36 |
37 | then {
38 | assertAll(
39 | { assert process.success },
40 | { assert snapshot(process.out).match() },
41 | { assert file(process.out.expected_counts.get(0).get(1).find { file(it).name == 'expected-counts.csv.gz' }).exists() },
42 | { assert file(process.out.cell_metadata.get(0).get(1).find { file(it).name == 'cell-metadata.csv.gz' }).exists() },
43 | { assert file(process.out.transcript_metadata.get(0).get(1).find { file(it).name == 'transcript-metadata.csv.gz' }).exists() },
44 | { assert file(process.out.gene_metadata.get(0).get(1).find { file(it).name == 'gene-metadata.csv.gz' }).exists() },
45 | { assert file(process.out.rates.get(0).get(1).find { file(it).name == 'rates.csv.gz' }).exists() },
46 | { assert file(process.out.cell_polygons.get(0).get(1).find { file(it).name == 'cell-polygons.geojson.gz' }).exists() },
47 | { assert file(process.out.cell_polygons_layers.get(0).get(1).find { file(it).name == 'cell-polygons-layers.geojson.gz' }).exists() },
48 | { assert file(process.out.cell-hulls.get(0).get(1).find { file(it).name == 'cell-hulls.geojson.gz' }).exists() },
49 | )
50 | }
51 |
52 | }
53 |
54 | test("proseg stub") {
55 |
56 | options "-stub"
57 |
58 | when {
59 | process {
60 | """
61 | input[0] = Channel.of([
62 | [id: "test_run_proseg"],
63 | ]).combine(UNZIP.out.unzipped_archive.map { it[1] } + "/transcripts.csv")
64 | """
65 | }
66 | }
67 |
68 | then {
69 | assertAll(
70 | { assert process.success },
71 | { assert snapshot(process.out).match() }
72 | )
73 | }
74 |
75 | }
76 |
77 | }
78 |
--------------------------------------------------------------------------------
/conf/base.config:
--------------------------------------------------------------------------------
1 | /*
2 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
3 | nf-core/spatialxe Nextflow base config file
4 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
5 | A 'blank slate' config file, appropriate for general use on most high performance
6 | compute environments. Assumes that all software is installed and available on
7 | the PATH. Runs in `local` mode - all jobs will be run on the logged in environment.
8 | ----------------------------------------------------------------------------------------
9 | */
10 |
11 | process {
12 |
13 | // TODO nf-core: Check the defaults for all processes
14 | cpus = { check_max( 1 * task.attempt, 'cpus' ) }
15 | memory = { check_max( 6.GB * task.attempt, 'memory' ) }
16 | time = { check_max( 4.h * task.attempt, 'time' ) }
17 |
18 | errorStrategy = { task.exitStatus in [143,137,104,134,139] ? 'retry' : 'finish' }
19 | maxRetries = 1
20 | maxErrors = '-1'
21 |
22 | // Process-specific resource requirements
23 | // NOTE - Please try and re-use the labels below as much as possible.
24 | // These labels are used and recognised by default in DSL2 files hosted on nf-core/modules.
25 | // If possible, it would be nice to keep the same label naming convention when
26 | // adding in your local modules too.
27 | // TODO nf-core: Customise requirements for specific processes.
28 | // See https://www.nextflow.io/docs/latest/config.html#config-process-selectors
29 | withLabel:process_single {
30 | cpus = { check_max( 1 , 'cpus' ) }
31 | memory = { check_max( 6.GB * task.attempt, 'memory' ) }
32 | time = { check_max( 4.h * task.attempt, 'time' ) }
33 | }
34 | withLabel:process_low {
35 | cpus = { check_max( 2 * task.attempt, 'cpus' ) }
36 | memory = { check_max( 12.GB * task.attempt, 'memory' ) }
37 | time = { check_max( 4.h * task.attempt, 'time' ) }
38 | }
39 | withLabel:process_medium {
40 | cpus = { check_max( 6 * task.attempt, 'cpus' ) }
41 | memory = { check_max( 36.GB * task.attempt, 'memory' ) }
42 | time = { check_max( 8.h * task.attempt, 'time' ) }
43 | }
44 | withLabel:process_high {
45 | cpus = { check_max( 12 * task.attempt, 'cpus' ) }
46 | memory = { check_max( 72.GB * task.attempt, 'memory' ) }
47 | time = { check_max( 16.h * task.attempt, 'time' ) }
48 | }
49 | withLabel:process_long {
50 | time = { check_max( 20.h * task.attempt, 'time' ) }
51 | }
52 | withLabel:process_high_memory {
53 | memory = { check_max( 200.GB * task.attempt, 'memory' ) }
54 | }
55 | withLabel:error_ignore {
56 | errorStrategy = 'ignore'
57 | }
58 | withLabel:error_retry {
59 | errorStrategy = 'retry'
60 | maxRetries = 2
61 | }
62 | withName:CUSTOM_DUMPSOFTWAREVERSIONS {
63 | cache = false
64 | }
65 | }
66 |
--------------------------------------------------------------------------------
/modules/nf-core/xeniumranger/resegment/meta.yml:
--------------------------------------------------------------------------------
1 | name: xeniumranger_resegment
2 | description: The xeniumranger resegment module allows you to generate a new segmentation of the morphology image space by rerunning the Xenium Onboard Analysis (XOA) segmentation algorithms with modified parameters.
3 | keywords:
4 | - spatial
5 | - resegment
6 | - morphology
7 | - segmentation
8 | - xeniumranger
9 | tools:
10 | - xeniumranger:
11 | description: |
12 | Xenium Ranger is a set of analysis pipelines that process Xenium In Situ Gene Expression data to relabel, resegment, or import new segmentation results from community-developed tools. Xenium Ranger provides flexible off-instrument reanalysis of Xenium In Situ data. Relabel transcripts, resegment cells with the latest 10x segmentation algorithms, or import your own segmentation data to assign transcripts to cells.
13 | homepage: "https://www.10xgenomics.com/support/software/xenium-ranger/latest"
14 | documentation: "https://www.10xgenomics.com/support/software/xenium-ranger/latest/getting-started"
15 | tool_dev_url: "https://www.10xgenomics.com/support/software/xenium-ranger/latest/analysis"
16 | licence:
17 | - "10x Genomics EULA"
18 | identifier: ""
19 | input:
20 | - - meta:
21 | type: map
22 | description: |
23 | Groovy Map containing run information
24 | e.g. [ id:'xenium_experiment' ]
25 | - xenium_bundle:
26 | type: directory
27 | description: Path to the xenium output bundle generated by the Xenium Onboard Analysis pipeline
28 | - - expansion_distance:
29 | type: integer
30 | description: Nuclei boundary expansion distance in µm. Only for use when nucleus segmentation provided as input. Default-5 (accepted range 0 - 100)
31 | - - dapi_filter:
32 | type: integer
33 | description: Minimum intensity in photoelectrons to filter nuclei default-100 range of values is 0 to 99th percentile of image stack or 1000, whichever is larger
34 | - - boundary_stain:
35 | type: string
36 | description: Specify the name of the boundary stain to use or disable possible options are default-ATP1A1/CD45/E-Cadherin or disable
37 | - - interior_stain:
38 | type: string
39 | description: Specify the name of the interior stain to use or disable possible options are default-18S or disable
40 | output:
41 | - outs:
42 | - meta:
43 | type: file
44 | description: Files containing the outputs of Cell Ranger, see official 10X Genomics documentation for a complete list
45 | pattern: "${meta.id}/outs/*"
46 | - "**/outs/**":
47 | type: file
48 | description: Files containing the outputs of xenium ranger, see official 10X Genomics documentation for a complete list of outputs
49 | pattern: "${meta.id}/outs/*"
50 | - versions:
51 | - versions.yml:
52 | type: file
53 | description: File containing software versions
54 | pattern: "versions.yml"
55 | authors:
56 | - "@khersameesh24"
57 | maintainers:
58 | - "@khersameesh24"
59 |
--------------------------------------------------------------------------------
/modules/nf-core/xeniumranger/import-segmentation/main.nf:
--------------------------------------------------------------------------------
1 | process XENIUMRANGER_IMPORT_SEGMENTATION {
2 | tag "$meta.id"
3 | label 'process_high'
4 |
5 | container "nf-core/xeniumranger:3.0.1"
6 |
7 | input:
8 | tuple val(meta), path(xenium_bundle)
9 | val(expansion_distance)
10 | path(coordinate_transform)
11 | path(nuclei)
12 | path(cells)
13 | path(transcript_assignment)
14 | path(viz_polygons)
15 |
16 | output:
17 | tuple val(meta), path("**/outs/**"), emit: outs
18 | path "versions.yml", emit: versions
19 |
20 | when:
21 | task.ext.when == null || task.ext.when
22 |
23 | script:
24 | // Exit if running this module with -profile conda / -profile mamba
25 | if (workflow.profile.tokenize(',').intersect(['conda', 'mamba']).size() >= 1) {
26 | error "XENIUMRANGER_IMPORT-SEGMENTATION module does not support Conda. Please use Docker / Singularity / Podman instead."
27 | }
28 | def args = task.ext.args ?: ''
29 | def prefix = task.ext.prefix ?: "${meta.id}"
30 |
31 | // image based segmentation options
32 | def expansion_distance = expansion_distance ? "--expansion-distance=\"${expansion_distance}\"": "" // expansion distance (default - 5, range - 0 - 100)
33 | def coordinate_transform = coordinate_transform ? "--coordinate-transform=\"${coordinate_transform}\"": ""
34 |
35 | def nuclei_detection = nuclei ? "--nuclei=\"${nuclei}\"": ""
36 | def cells = cells ? "--cells=\"${cells}\"": ""
37 |
38 | // transcript based segmentation
39 | def transcript_assignment = transcript_assignment ? "--transcript-assignment=\"${transcript_assignment}\"": ""
40 | def viz_polygons = viz_polygons ? "--viz-polygons=\"${viz_polygons}\"":""
41 |
42 | // shared argument
43 | def units = coordinate_transform ? "--units=microns": "--units=pixels"
44 |
45 | """
46 | xeniumranger import-segmentation \\
47 | --id="${prefix}" \\
48 | --xenium-bundle="${xenium_bundle}" \\
49 | --localcores=${task.cpus} \\
50 | --localmem=${task.memory.toGiga()} \\
51 | ${coordinate_transform} \\
52 | ${nuclei_detection} \\
53 | ${cells} \\
54 | ${expansion_distance} \\
55 | ${transcript_assignment} \\
56 | ${viz_polygons} \\
57 | ${units} \\
58 | ${args}
59 |
60 | cat <<-END_VERSIONS > versions.yml
61 | "${task.process}":
62 | xeniumranger: \$(xeniumranger -V | sed -e "s/xeniumranger-/- /g")
63 | END_VERSIONS
64 | """
65 |
66 | stub:
67 | // Exit if running this module with -profile conda / -profile mamba
68 | if (workflow.profile.tokenize(',').intersect(['conda', 'mamba']).size() >= 1) {
69 | error "XENIUMRANGER_IMPORT-SEGMENTATION module does not support Conda. Please use Docker / Singularity / Podman instead."
70 | }
71 | def prefix = task.ext.prefix ?: "${meta.id}"
72 | """
73 | mkdir -p "${prefix}/outs/"
74 | touch "${prefix}/outs/fake_file.txt"
75 |
76 | cat <<-END_VERSIONS > versions.yml
77 | "${task.process}":
78 | xeniumranger: \$(xeniumranger -V | sed -e "s/xeniumranger-/- /g")
79 | END_VERSIONS
80 | """
81 | }
82 |
--------------------------------------------------------------------------------
/modules/nf-core/xeniumranger/resegment/tests/main.nf.test:
--------------------------------------------------------------------------------
1 | nextflow_process {
2 |
3 | name "Test Process XENIUMRANGER_RESEGMENT"
4 | script "../main.nf"
5 | process "XENIUMRANGER_RESEGMENT"
6 | config "./nextflow.config"
7 |
8 | tag "modules"
9 | tag "modules_nfcore"
10 | tag "xeniumranger"
11 | tag "xeniumranger/resegment"
12 | tag "unzip"
13 |
14 | setup {
15 | run("UNZIP") {
16 | script "modules/nf-core/unzip/main.nf"
17 | process {
18 | """
19 | input[0] = [[], file('https://raw.githubusercontent.com/nf-core/test-datasets/spatialxe/Xenium_Prime_Mouse_Ileum_tiny_outs.zip', checkIfExists: true)]
20 | """
21 | }
22 | }
23 | }
24 |
25 | test("xeniumranger resegment") {
26 | when {
27 | process {
28 | """
29 | input[0] = Channel.of([
30 | [id: "test_xeniumranger_resegment"],
31 | ]).combine(UNZIP.out.unzipped_archive.map { it[1] })
32 | input[1] = []
33 | input[2] = []
34 | input[3] = []
35 | input[4] = []
36 | """
37 | }
38 | }
39 | then {
40 | assertAll(
41 | { assert process.success },
42 | { assert process.out.outs != null },
43 | { assert file(process.out.outs.get(0).get(1).find { file(it).name == 'analysis_summary.html' }).exists() },
44 | { assert file(process.out.outs.get(0).get(1).find { file(it).name == 'cells.csv.gz' }).exists() },
45 | { assert file(process.out.outs.get(0).get(1).find { file(it).name == 'cells.parquet' }).exists() },
46 | { assert file(process.out.outs.get(0).get(1).find { file(it).name == 'cells.zarr.zip' }).exists() },
47 | { assert file(process.out.outs.get(0).get(1).find { file(it).name == 'transcripts.parquet' }).exists() },
48 | { assert file(process.out.outs.get(0).get(1).find { file(it).name == 'transcripts.zarr.zip' }).exists() },
49 | { assert file(process.out.outs.get(0).get(1).find { file(it).name == 'analysis.zarr.zip' }).exists() },
50 | { assert path(process.out.outs.get(0).get(1).find { file(it).name == 'cell_feature_matrix.zarr.zip' }).exists() }
51 | )
52 | }
53 | }
54 |
55 | test("xeniumranger resegment stub") {
56 | options "-stub"
57 | when {
58 | process {
59 | """
60 | input[0] = Channel.of([
61 | [id: "test_xeniumranger_resegment"],
62 | ]).combine(UNZIP.out.unzipped_archive.map { it[1] })
63 | input[1] = []
64 | input[2] = []
65 | input[3] = []
66 | input[4] = []
67 | """
68 | }
69 | }
70 | then {
71 | assertAll(
72 | { assert process.success },
73 | { assert snapshot(process.out).match() }
74 | )
75 | }
76 | }
77 | }
--------------------------------------------------------------------------------
/modules/local/proseg/main.nf:
--------------------------------------------------------------------------------
1 | process PROSEG {
2 | tag "$meta.id"
3 | label 'process_high'
4 |
5 | container "nf-core/proseg:1.1.8"
6 |
7 | input:
8 | tuple val(meta), path(transcripts)
9 |
10 | output:
11 | tuple val(meta), path("cell-polygons.geojson.gz"), emit: cell_polygons_2d
12 | path("expected-counts.csv.gz"), emit: expected_counts
13 | path("cell-metadata.csv.gz"), emit: cell_metadata
14 | path("transcript-metadata.csv.gz"), emit: transcript_metadata
15 | path("gene-metadata.csv.gz"), emit: gene_metadata
16 | path("rates.csv.gz"), emit: rates
17 | path("cell-polygons-layers.geojson.gz"), emit: cell_polygons_layers
18 | path("cell-hulls.geojson.gz"), emit: cell_hulls
19 | path("versions.yml"), emit: versions
20 |
21 | when:
22 | task.ext.when == null || task.ext.when
23 |
24 | script:
25 | // Exit if running this module with -profile conda / -profile mamba
26 | if (workflow.profile.tokenize(',').intersect(['conda', 'mamba']).size() >= 1) {
27 | error "PROSEG module does not support Conda. Please use Docker / Singularity / Podman instead."
28 | }
29 | def args = task.ext.args ?: ''
30 | def prefix = task.ext.prefix ?: "${meta.id}"
31 | def platform = preset ? "${params.preset}" : ""
32 |
33 | // check for preset values
34 | if (!(platform in ['xenium', 'cosmx', 'merscope'])) {
35 | error "${platform} is an invalid platform (preset) type. Please specify xenium, cosmx, or merscope"
36 | }
37 |
38 | """
39 | proseg \\
40 | --${preset} \\
41 | ${transcripts} \\
42 | --nthreads ${task.cpus} \\
43 | --output-expected-counts expected-counts.csv.gz \\
44 | --output-cell-metadata cell-metadata.csv.gz \\
45 | --output-transcript-metadata transcript-metadata.csv.gz \\
46 | --output-gene-metadata gene-metadata.csv.gz \\
47 | --output-rates rates.csv.gz \\
48 | --output-cell-polygons cell-polygons.geojson.gz \\
49 | --output-cell-polygon-layers cell-polygons-layers.geojson.gz \\
50 | --output-cell-hulls cell-hulls.geojson.gz \\
51 | ${args}
52 |
53 | cat <<-END_VERSIONS > versions.yml
54 | "${task.process}":
55 | proseg: \$(proseg --version | sed 's/proseg //')
56 | END_VERSIONS
57 | """
58 |
59 | stub:
60 | // Exit if running this module with -profile conda / -profile mamba
61 | if (workflow.profile.tokenize(',').intersect(['conda', 'mamba']).size() >= 1) {
62 | error "PROSEG module does not support Conda. Please use Docker / Singularity / Podman instead."
63 | }
64 | def args = task.ext.args ?: ''
65 | def prefix = task.ext.prefix ?: "${meta.id}"
66 |
67 | """
68 | touch expected-counts.csv.gz
69 | touch cell-metadata.csv.gz
70 | touch transcript-metadata.csv.gz
71 | touch gene-metadata.csv.gz
72 | touch rates.csv.gz
73 | touch cell-polygons.geojson.gz
74 | touch cell-polygons-layers.geojson.gz
75 | touch cell-hulls.geojson.gz
76 |
77 | cat <<-END_VERSIONS > versions.yml
78 | "${task.process}":
79 | proseg: \$(proseg --version | sed 's/proseg //')
80 | END_VERSIONS
81 | """
82 | }
83 |
--------------------------------------------------------------------------------
/lib/WorkflowSpatialxe.groovy:
--------------------------------------------------------------------------------
1 | //
2 | // This file holds several functions specific to the workflow/spatialxe.nf in the nf-core/spatialxe pipeline
3 | //
4 |
5 | import groovy.text.SimpleTemplateEngine
6 |
7 | class WorkflowSpatialxe {
8 |
9 | //
10 | // Check and validate parameters
11 | //
12 | public static void initialise(params, log) {
13 | genomeExistsError(params, log)
14 |
15 |
16 | if (!params.fasta) {
17 | log.error "Genome fasta file not specified with e.g. '--fasta genome.fa' or via a detectable config file."
18 | System.exit(1)
19 | }
20 | }
21 |
22 | //
23 | // Get workflow summary for MultiQC
24 | //
25 | public static String paramsSummaryMultiqc(workflow, summary) {
26 | String summary_section = ''
27 | for (group in summary.keySet()) {
28 | def group_params = summary.get(group) // This gets the parameters of that particular group
29 | if (group_params) {
30 | summary_section += "