├── .github
├── ISSUE_TEMPLATE
│ ├── bug_report.md
│ └── feature_request.md
└── workflows
│ └── getbadge.yml
├── .gitignore
├── LICENSE
├── README.md
├── assets
└── generate_badge.py
├── figures
├── deepmib-demo.gif
├── inference-demo.gif
├── merged-demos.gif
├── qupath-demo.gif
└── youtube-thumbnail.jpg
├── notebooks
└── IBDColEpi-load-dataset-example.ipynb
└── source
├── exportTiles.groovy
├── fastpathology-model-example
├── example-model-config.txt
└── example-pipeline.fpl
├── getWorstPerformingPatches.ps1
├── html
├── generate-badge.html
├── generate-badge.js
└── generate-badge.py
├── importPyramidalTIFF.groovy
├── importStitchedTIFfromMIB.groovy
├── importTiles.groovy
├── qupath_export_tiles_annotation_multiclass.groovy
└── runtime-experiments
├── CMakeLists.txt
├── pipelineRuntime.cpp
└── statistical_analysis.py
/.github/ISSUE_TEMPLATE/bug_report.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Bug report
3 | about: Create a report to help us improve
4 | title: ''
5 | labels: ''
6 | assignees: ''
7 |
8 | ---
9 |
10 | **Describe the bug**
11 | A clear and concise description of what the bug is.
12 |
13 | **To Reproduce**
14 | Steps to reproduce the behavior:
15 | 1. Go to '...'
16 | 2. Click on '....'
17 | 3. Scroll down to '....'
18 | 4. See error
19 |
20 | **Expected behavior**
21 | A clear and concise description of what you expected to happen.
22 |
23 | **Screenshots**
24 | If applicable, add screenshots to help explain your problem.
25 |
26 | **Desktop (please complete the following information):**
27 | - OS: [e.g. iOS]
28 | - Version [e.g. 22]
29 |
30 | **Additional context**
31 | Add any other context about the problem here.
32 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/feature_request.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Feature request
3 | about: Suggest an idea for this project
4 | title: ''
5 | labels: ''
6 | assignees: ''
7 |
8 | ---
9 |
10 | **Is your feature request related to a problem? Please describe.**
11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
12 |
13 | **Describe the solution you'd like**
14 | A clear and concise description of what you want to happen.
15 |
16 | **Describe alternatives you've considered**
17 | A clear and concise description of any alternative solutions or features you've considered.
18 |
19 | **Additional context**
20 | Add any other context or screenshots about the feature request here.
21 |
--------------------------------------------------------------------------------
/.github/workflows/getbadge.yml:
--------------------------------------------------------------------------------
1 | name: getbadge
2 |
3 | on:
4 | push:
5 | branches:
6 | - '*'
7 | workflow_dispatch:
8 | schedule:
9 | # run CI once a week https://jasonet.co/posts/scheduled-actions/
10 | - cron: '0 0 * * 0'
11 |
12 | # trigger workflow every minute
13 | # - cron: '*/5 * * * *'
14 |
15 | jobs:
16 | update-downloads:
17 | runs-on: ubuntu-20.04
18 | steps:
19 | - uses: actions/checkout@v2
20 | with:
21 | persist-credentials: false # otherwise, the token used is the GITHUB_TOKEN, instead of your personal access token.
22 | fetch-depth: 0 # otherwise, there would be errors pushing refs to the destination repository.
23 |
24 | - name: Set up Python 3.7
25 | uses: actions/setup-python@v2
26 | with:
27 | python-version: 3.7
28 |
29 | - name: Install deps
30 | run: |
31 | pip install numpy pandas imgkit
32 | sudo apt-get install wkhtmltopdf
33 |
34 | - name: Get info
35 | run: |
36 | cd assets/
37 | wget "https://dataverse.no/api/info/metrics/filedownloads?parentAlias=ntnu"
38 | python generate_badge.py
39 |
40 | - name: Update badge in release
41 | uses: svenstaro/upload-release-action@v2
42 | with:
43 | repo_name: andreped/NoCodeSeg
44 | repo_token: ${{ secrets.CI }}
45 | file: ${{github.workspace}}/assets/badge.png
46 | asset_name: badge.png
47 | file_glob: true
48 | tag: download-badge
49 | overwrite: true
50 |
51 | - name: Update SVG in release
52 | uses: svenstaro/upload-release-action@v2
53 | with:
54 | repo_name: andreped/NoCodeSeg
55 | repo_token: ${{ secrets.CI }}
56 | file: ${{github.workspace}}/assets/badge.svg
57 | asset_name: badge.svg
58 | file_glob: true
59 | tag: download-badge
60 | overwrite: true
61 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .idea/
2 | .vs/
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2021 André Pedersen
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | [](https://opensource.org/licenses/MIT)
2 | [](https://www.frontiersin.org/articles/10.3389/fmed.2021.816281/full)
3 | []([https://youtu.be/WEgB4lAZyKM](https://www.youtube.com/watch?v=9dTfUwnL6zY))
4 | [](https://doi.org/10.18710/TLA01U)
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 | # NoCodeSeg: Deep segmentation made easy!
14 |
15 | ⚠️***Latest: Generic multiclass support has been added to the pipeline!***
16 |
17 | This is the official repository for the manuscript [*"Code-free development and deployment of deep segmentation models for digital pathology"*](https://www.frontiersin.org/articles/10.3389/fmed.2021.816281/full), **published** open access in Frontiers in Medicine.
18 |
19 | The repository contains trained deep models for epithelium segmentation of HE and CD3 immunostained WSIs, as well as source code relevant for importing/exporting annotations/predictions in [QuPath](https://qupath.github.io/), both from [DeepMIB](http://mib.helsinki.fi/downloads.html) and [FastPathology](https://github.com/AICAN-Research/FAST-Pathology).
20 |
21 | All relevant scripts for working with our pipeline can be found in the [source directory](https://github.com/andreped/NoCodeSeg/tree/main/source).
22 |
23 | See [here](https://github.com/andreped/NoCodeSeg#models) for how to access the trained models.
24 |
25 | See [here](https://github.com/andreped/NoCodeSeg#data) for how to download the 251 annotated WSIs.
26 |
27 | ## [Getting started](https://github.com/andreped/NoCodeSeg#getting-started)
28 |
29 | [](https://youtu.be/9dTfUwnL6zY).
30 |
31 | A video tutorial of the proposed pipeline was published on [YouTube](https://www.youtube.com/watch?v=9dTfUwnL6zY&ab_channel=HenrikSahlinPettersen).
32 | It demonstrates the steps for:
33 | * Downloading and installing the software
34 | * QuPath
35 | * Create a project, then export annotations as patches with label files
36 | * Export patches from unannotated images for prediction in DeepMIB
37 | * (later) Import predictions for MIB and FastPathology as annotations
38 | * MIB
39 | * Use the annotated patches/labels exported from QuPath
40 | * Configuring and training deep segmentation models (i.e. U-Net/SegNet)
41 | * Use the trained U-Net to predict unannotated patches exported from QuPath
42 | * Export trained models into the ONNX format for use in FastPathology
43 | * FastPathology
44 | * Importing and creating a configuration file for the DeepMIB exported ONNX model
45 | * Create a project and load WSIs into a project
46 | * Use the U-Net ONNX model to render predictions on top of the WSI in real time
47 | * Export full sized WSI tiffs for import into QuPath
48 |
49 | Note that the version of FastPathology used in the demonstration was v0.2.0 (this exact version can be downloaded from [here](https://github.com/AICAN-Research/FAST-Pathology/releases/tag/v0.2.0)). The software is continuously in development, and features presented in the video are therefore prone to changes in the near future. To get information regarding changes and new releases, please, visit the [FastPathology repository](https://github.com/AICAN-Research/FAST-Pathology).
50 |
51 | ## [Data](https://github.com/andreped/NoCodeSeg#data)
52 | The 251 annotated WSIs are made openly available for anyone on [DataverseNO](https://doi.org/10.18710/TLA01U). Alternatively, the data can be downloaded directly from Google Drive (click [here](https://drive.google.com/drive/folders/1eUVs1DA1UYayUYjr8_aY3O5xDgV1uLvH?usp=sharing) to access the dataset). Information on how to cite the IBDColEpi dataset can be found on [DataverseNO](https://doi.org/10.18710/TLA01U).
53 |
54 |
55 |
56 |
57 | ### [Reading annotations](https://github.com/andreped/NoCodeSeg#reading-annotations)
58 |
59 | The annotations are stored as tiled, pyramidal TIFFs, which makes it easy to generate patches from the data without the need for any preprocessing. Reading these files and working with them to generate training data, is already described in the [tutorial video](https://github.com/andreped/NoCodeSeg#getting-started) above.
60 |
61 | _TL;DR:_ Load TIFF as annotations in QuPath using provided [groovy script](https://github.com/andreped/NoCodeSeg/blob/main/source/importPyramidalTIFF.groovy) and [exporting](https://github.com/andreped/NoCodeSeg/blob/main/source/exportTiles.groovy) these as labelled tiles.
62 |
63 |
64 |
65 |
66 |
67 | ### [Reading annotation in Python](https://github.com/andreped/NoCodeSeg#reading-annotation-in-python)
68 |
69 | If you wish to use Python, the annotations can be read exactly the same way as regular WSIs (for instance using [pyFAST](https://github.com/smistad/FAST)).
70 |
71 | I have made a Jupyter Notebook demonstrating how to do this [here](https://github.com/andreped/NoCodeSeg/blob/main/notebooks/IBDColEpi-load-dataset-example.ipynb).
72 |
73 | Alternatively, click the CoLab button to access the notebook:
74 |
75 |
76 |
77 |
78 |
79 |
80 | ### [Models](https://github.com/andreped/NoCodeSeg#models)
81 |
82 | Note that the trained models can only be used for academic purposes due to MIB's license. Trained model files (.mibDeep for MIB and .onnx for FastPathology) are made openly available on [Google Drive](https://drive.google.com/drive/folders/1eUVs1DA1UYayUYjr8_aY3O5xDgV1uLvH). Simply download the file "trained-models.zip" and uncompress to get access the respective files.
83 |
84 |
85 | ## [Applications of pipeline](https://github.com/andreped/NoCodeSeg#applications-of-pipeline)
86 | * Chiou et al., An immunohistochemical atlas of necroptotic pathway expression (2024), EMBO Molecular Medicine, https://doi.org/10.1038/s44321-024-00074-6
87 | * Alharbi et al., A Deep Learning–Based Approach to Estimate Paneth Cell Granule Area in Celiac Disease (2023), Arch Pathol Lab Med, https://doi.org/10.5858/arpa.2023-0074-OA
88 | * Mallén et al., Sex Differences in Glomerular Lesions, in Atherosclerosis Progression, and in the Response to Angiotensin-Converting Enzyme Inhibitors in the ApoE−/− Mice Model (2023), International Journal of Molecular Science, https://doi.org/10.3390/ijms241713442
89 | * Røyset et al., Deep learning-based image analysis reveals significant differences in the number and distribution of mucosal CD3 and γδ T cells between Crohn's disease and ulcerative colitis, The Journal of Pathology, https://doi.org/10.1002/cjp2.301
90 | * Pettersen et al., Code-free development and deployment of deep segmentation models for digital pathology (2022), Frontiers in Medicine, https://doi.org/10.3389/fmed.2021.816281
91 |
92 | ## [How to cite](https://github.com/andreped/NoCodeSeg#how-to-cite)
93 | Please, consider citing our paper, if you find the work useful:
94 |
95 | @article{pettersen2022codefree,
96 | title={{Code-Free Development and Deployment of Deep Segmentation Models for Digital Pathology}},
97 | author={Pettersen, Henrik Sahlin and Belevich, Ilya and Røyset, Elin Synnøve and Smistad, Erik and Simpson, Melanie Rae and Jokitalo, Eija and Reinertsen, Ingerid and Bakke, Ingunn and Pedersen, André},
98 | journal={Frontiers in Medicine},
99 | volume={8},
100 | year={2022},
101 | url={https://www.frontiersin.org/article/10.3389/fmed.2021.816281},
102 | doi={10.3389/fmed.2021.816281},
103 | issn={2296-858X}
104 | }
105 |
106 |
107 | In addition, if you used the data set in your work, please, cite the following:
108 |
109 | @data{pettersen2021ibdcolepi,
110 | title = {{140 HE and 111 CD3-stained colon biopsies of active and inactivate inflammatory bowel disease with epithelium annotated: the IBDColEpi dataset}},
111 | author = {Pettersen, Henrik Sahlin and Belevich, Ilya and Røyset, Elin Synnøve and Smistad, Erik and Jokitalo, Eija and Reinertsen, Ingerid and Bakke, Ingunn and Pedersen, André},
112 | publisher = {DataverseNO},
113 | year = {2021},
114 | version = {V2},
115 | doi = {10.18710/TLA01U},
116 | url = {https://doi.org/10.18710/TLA01U}
117 | }
118 |
119 |
120 | ## [Acknowledgements](https://github.com/andreped/NoCodeSeg#acknowledgements)
121 | We wish to give our praise to [Peter Bankhead](https://www.ed.ac.uk/pathology/people/staff-students/peter-bankhead) and the [QuPath](https://github.com/qupath/qupath) team for their continuous support and assistance with QuPath and for assisting us in developing the scripts related to this study.
122 |
--------------------------------------------------------------------------------
/assets/generate_badge.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import pandas as pd
3 | import imgkit
4 |
5 |
6 | options = {
7 | 'format': 'png',
8 | 'crop-h': '20',
9 | 'crop-w': '176',
10 | 'crop-x': '8',
11 | 'crop-y': '8',
12 | 'encoding': "UTF-8",
13 | 'custom-header' : [
14 | ('Accept-Encoding', 'gzip')
15 | ]
16 | }
17 |
18 | path = "filedownloads?parentAlias=ntnu"
19 | data = np.asarray(pd.read_csv(path, delimiter=None))
20 | pid2find = "10.18710/TLA01U"
21 |
22 | res = []
23 | total = 0
24 | for i in range(data.shape[0]):
25 | id_, pid_, count_ = data[i]
26 | if pid2find in pid_:
27 | total += count_
28 |
29 | ret = '
'
34 |
35 | print("generated html line: ", ret)
36 |
37 | imgkit.from_string(ret, "badge.png", options=options)
38 |
39 | imgkit.from_string(ret, "badge.svg")
40 |
41 |
42 |
--------------------------------------------------------------------------------
/figures/deepmib-demo.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/andreped/NoCodeSeg/ca8dfa805ec4763954b8098c027205dabe950d45/figures/deepmib-demo.gif
--------------------------------------------------------------------------------
/figures/inference-demo.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/andreped/NoCodeSeg/ca8dfa805ec4763954b8098c027205dabe950d45/figures/inference-demo.gif
--------------------------------------------------------------------------------
/figures/merged-demos.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/andreped/NoCodeSeg/ca8dfa805ec4763954b8098c027205dabe950d45/figures/merged-demos.gif
--------------------------------------------------------------------------------
/figures/qupath-demo.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/andreped/NoCodeSeg/ca8dfa805ec4763954b8098c027205dabe950d45/figures/qupath-demo.gif
--------------------------------------------------------------------------------
/figures/youtube-thumbnail.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/andreped/NoCodeSeg/ca8dfa805ec4763954b8098c027205dabe950d45/figures/youtube-thumbnail.jpg
--------------------------------------------------------------------------------
/source/exportTiles.groovy:
--------------------------------------------------------------------------------
1 | /**
2 | * Script to export annotations as labeled tiles for QuPath > 0.2*.
3 | *
4 | * All patches will be exported to the same directory called 'tiles' inside the Project directory
5 | * The patches will be filtered based on tissue content, and finally moved to respective the
6 | * subdirectories: Images and Labels within the 'tiles' folder
7 | *
8 | * Each patch's filename contains the original WSI ID, and images are saved as PNG (by default)
9 | * and ground truth as TIF
10 | *
11 | * The downsampling level can be set by the user, default value is 4.
12 | *
13 | * Code is inspired by the script from the QuPath documentations, written by Pete Bankhead:
14 | * https://qupath.readthedocs.io/en/stable/docs/advanced/exporting_images.html#tile-exporter
15 | *
16 | * @author André Pedersen
17 | */
18 |
19 |
20 | import qupath.lib.images.servers.LabeledImageServer
21 | import java.awt.image.Raster
22 | import javax.imageio.ImageIO;
23 |
24 |
25 | // ----- SET THESE PARAMETERS -----
26 | def classNames = ["Benign", "Malign"] // names of classes of interest (simply add more values to list to add more classes)
27 | double downsample = 4 // which downsampling level to use -> how much to downsample the patches
28 | double glassThreshold = 50 // which threshold to use for tissue detection (lower value => more tissue included in mask)
29 | double percentageThreshold = 0.25 // if a patch contains less than X amount of tissue, it should be neglected (range [0.0, 1.0])
30 | int patchSize = 512 // generated patch size
31 | int pixelOverlap = 128 // stride for which patches are generated
32 | def imageExtension = ".tif"
33 | int nb_channels = 3;
34 | def multiChannel = false;
35 | // --------------------------------
36 |
37 |
38 | def imageData = getCurrentImageData()
39 |
40 | // Define output path (relative to project)
41 | def name = GeneralTools.getNameWithoutExtension(imageData.getServer().getMetadata().getName())
42 | def pathOutput = buildFilePath(PROJECT_BASE_DIR, 'tiles')
43 | mkdirs(pathOutput)
44 |
45 | // Create an ImageServer where the pixels are derived from annotations
46 | def tempServer = new LabeledImageServer.Builder(imageData)
47 | .backgroundLabel(0, ColorTools.WHITE) // Specify background label (usually 0 or 255)
48 | .downsample(downsample) // Choose server resolution; this should match the resolution at which tiles are exported
49 | .multichannelOutput(multiChannel) // If true, each label is a different channel (required for multiclass probability)
50 |
51 | // assign each class to the server (need to iterate across list array due to multi-class)
52 | def counter = 1
53 | classNames.each { currClassName ->
54 | tempServer.addLabel(currClassName, counter) // Choose output labels (the order matters!)
55 | counter++;
56 | }
57 |
58 | // finally, build server
59 | def labelServer = tempServer.build()
60 |
61 | // Create an exporter that requests corresponding tiles from the original & labeled image servers
62 | new TileExporter(imageData)
63 | .downsample(downsample) // Define export resolution
64 | .imageExtension(imageExtension) // Define file extension for original pixels (often .tif, .jpg, '.png' or '.ome.tif')
65 | .tileSize(patchSize) // Define size of each tile, in pixels
66 | .labeledServer(labelServer) // Define the labeled image server to use (i.e. the one we just built)
67 | .annotatedTilesOnly(true) // If true, only export tiles if there is a (labeled) annotation present
68 | .overlap(pixelOverlap) // Define overlap, in pixel units at the export resolution
69 | .writeTiles(pathOutput) // Write tiles to the specified directory
70 |
71 | // create new folder (IMAGES AND LABELS), but only if they do not exist!
72 | def dir1 = new File(pathOutput + "/Images");
73 | if (!dir1.isDirectory())
74 | dir1.mkdir()
75 |
76 | def dir2 = new File(pathOutput + "/Labels");
77 | if (!dir2.isDirectory())
78 | dir2.mkdir()
79 |
80 | // attempt to delete unwanted patches, both some formats as well as patches containing mostly glass
81 | // Iterate through all your tiles
82 | File folder = new File(pathOutput)
83 | File[] listOfFiles = folder.listFiles()
84 |
85 | // for each patch
86 | listOfFiles.each { tile ->
87 | // skip directories within masks folder, and skip all ground truth patches
88 | if (tile.isDirectory())
89 | return;
90 | def currPath = tile.getPath()
91 | if (!currPath.endsWith(imageExtension))
92 | return;
93 |
94 | // load TIFF images back again, estimate patch glass density, and remove patches with lots
95 | // of glass based on user-defined threshold
96 | def image = ImageIO.read(new File(currPath))
97 | Raster raster = image.getRaster();
98 |
99 | // estimate amount of tissue in patch
100 | def tissue = 0;
101 | for (int y = 0; y < image.getHeight(); ++y) {
102 | for (int x = 0; x < image.getWidth(); ++x) {
103 | double currDist = 0
104 | for (int z = 0; z < nb_channels; ++z) {
105 | currDist += raster.getSample(x, y, z)
106 | }
107 | currDist = ((currDist / 3) > (255 - glassThreshold)) ? 0 : 1;
108 | if (currDist > 0) {
109 | ++tissue
110 | }
111 | }
112 | }
113 |
114 | // remove patches containing less tissue, dependent on user-defined threshold, and move accepted patches to respective folders
115 | def amountTissue = tissue / (image.getWidth() * image.getHeight());
116 | def currLabelPatch = new File(pathOutput + "/" + tile.getName().split(imageExtension)[0] + ".png")
117 | if (amountTissue < percentageThreshold) {
118 | tile.delete()
119 | currLabelPatch.delete()
120 | } else {
121 | tile.renameTo(pathOutput + "/Images/" + tile.getName())
122 | currLabelPatch.renameTo(new File(pathOutput + "/Labels/" + tile.getName().split(imageExtension)[0] + ".png"))
123 | }
124 | }
125 |
126 | print "Done!"
127 |
128 | // reclaim memory - relevant for running this within a RunForProject
129 | Thread.sleep(100);
130 | javafx.application.Platform.runLater {
131 | getCurrentViewer().getImageRegionStore().cache.clear();
132 | System.gc();
133 | }
134 | Thread.sleep(100);
--------------------------------------------------------------------------------
/source/fastpathology-model-example/example-model-config.txt:
--------------------------------------------------------------------------------
1 | model_name:Epithelium_HE_512
2 | name:Epithelium_HE_512
3 | task:Epithelium_HE_512
4 | problem:segmentation
5 | resolution:high
6 | magnification_level:10
7 | mask_threshold:0.02
8 | tissue_threshold:70
9 | patch_overlap:0.05
10 | batch_size:1
11 | input_img_size_x:512
12 | input_img_size_y:512
13 | nb_channels:3
14 | nb_classes:2
15 | input_node:ImageInputLayer
16 | output_node:Softmax_Layer_Transpose2
17 | class_colors:0,0,255;0,255,0
18 | class_names:Exterior;Epithelium
19 | interpolation:0
20 | pipeline:import;tissue_segmentation;batchgen;neural_network;stitch;render
21 | batch_process:2
22 | scale_factor:1.0f/1.0f
23 | cpu:0
24 | IE:OpenVINO
--------------------------------------------------------------------------------
/source/fastpathology-model-example/example-pipeline.fpl:
--------------------------------------------------------------------------------
1 | PipelineName "Neural network high-res WSI segmentation"
2 | PipelineDescription "asd"
3 |
4 | PipelineOutputData heatmap stitcher 0
5 |
6 | ### Processing chain
7 | ProcessObject importer WholeSlideImageImporter
8 | Attribute filename @@filename@@
9 |
10 | ProcessObject tissueSeg TissueSegmentation
11 | Attribute threshold 85
12 | Input 0 importer 0
13 |
14 | ProcessObject patch PatchGenerator
15 | Attribute patch-size 256 256
16 | Attribute patch-level 2
17 | Attribute patch-overlap 0.0
18 | Attribute mask-threshold 0.05
19 | Input 0 importer 0
20 | Input 1 tissueSeg 0
21 |
22 | ProcessObject network SegmentationNetwork
23 | Attribute scale-factor 1.0
24 | #Attribute inference-engine TensorRT
25 | Attribute inference-engine OpenVINO
26 | Attribute model "C:/path/to/some/model.onnx"
27 | Input 0 patch 0
28 |
29 | ProcessObject stitcher PatchStitcher
30 | Input 0 network 0
31 |
32 | ProcessObject exporter TIFFImagePyramidExporter
33 | Attribute filename @@exportPath@@
34 | Attribute execute-on-last-frame-only true
35 | Input 0 stitcher 0
36 |
37 |
38 | ### Renderers
39 | Renderer segRenderer SegmentationRenderer
40 | Attribute opacity 1.0
41 | Attribute border-opacity 0.0
42 | Attribute label-colors "1" "green" "2" "red"
43 | Input 0 stitcher 0
--------------------------------------------------------------------------------
/source/getWorstPerformingPatches.ps1:
--------------------------------------------------------------------------------
1 | Get-Content "C:/path-to-list-of-worst-performing-patches.txt"
2 | | ForEach {Move-Item -LiteralPath .\$_ -Destination "C:/path-to-directory-of-removed-patches"
3 | }
--------------------------------------------------------------------------------
/source/html/generate-badge.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | Downloads Badge
5 |
6 |
7 |
8 |
9 |
48 |
49 |
50 |
--------------------------------------------------------------------------------
/source/html/generate-badge.js:
--------------------------------------------------------------------------------
1 | // fetch badge as html code
2 | //var returned_text = fetchText();
3 |
4 | // write badge to disk, to be able to use it in markdown later
5 | //download(returned_text, "./source/html/generate-badge.txt", "text/plain");
6 |
7 | //const { default: fetch } = await import('node-fetch.js')
8 | var nf = require("node-fetch");
9 | const { JSDOM } = require("jsdom");
10 | const { default: document } = (new JSDOM(`...`)).window;
11 | var fs = require('fs');
12 |
13 | import fetch from "node-fetch";
14 |
15 | let url = "https://dataverse.no/api/info/metrics/filedownloads?parentAlias=ntnu";
16 | let response = await fetch(url);
17 | if (response.status === 200) {
18 | let content = await response.text();
19 | let pid = "10.18710/TLA01U";
20 | let output = getBadge(content, pid);
21 |
22 | // write result to disk
23 | fs.appendFile('./test.txt', output, function (err) {
24 | if (err) return console.log(err);
25 | console.log('Appended!');
26 | });
27 | }
28 |
29 | function getBadge(content, pidToFind) {
30 | var x = content.split("\n");
31 | for (var i = 0; i < x.length; i++) {
32 | y = x[i].split(",");
33 | x[i] = y;
34 | }
35 | let total = 0;
36 | x.shift(); // remove header row ("id,pid,count")
37 | for (const row of x) {
38 | let pid = row[1];
39 | let count = row[2];
40 | if (pid.includes(pidToFind)) {
41 | total += Number(count);
42 | }
43 | }
44 | return (
45 | '
'
50 | );
51 | }
52 |
--------------------------------------------------------------------------------
/source/html/generate-badge.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import pandas as pd
3 |
4 |
5 | url = "https://dataverse.no/api/info/metrics/filedownloads?parentAlias=ntnu"
6 |
7 |
--------------------------------------------------------------------------------
/source/importPyramidalTIFF.groovy:
--------------------------------------------------------------------------------
1 | /**
2 | * Script to import uint8 pyramidal TIFF images from FastPathology as annotations for QuPath >= v0.3*
3 | *
4 | * It is assumed that the TIFF lies in the project directory structure generated by FastPathology.
5 | *
6 | * Furthermore, we assume that the image is uint8, where each uint correspond to a different class.
7 | *
8 | * Currently, it is assumed that the classes are labelled {1, 2, 3, ..., k}, with no space between, and where 0 is background.
9 | *
10 | * Code is slightly modified from Peter Bankhead's script to support RunForProject:
11 | * https://gist.github.com/petebankhead/27c1f8cd950583452c756f3a2ea41fc0
12 | *
13 | * which was written for https://forum.image.sc/t/rendering-wsi-as-overlay-on-top-of-another-wsi/52629/25?u=andreped
14 | *
15 | * All credit to Peter Bankhead and the QuPath team for their tremendous support in implementing this script!
16 | *
17 | * @author André Pedersen
18 | */
19 |
20 |
21 | // --- SET THESE PARAMETERS ---
22 | def masksPath = "C:/Users/username/fastpathology/projects/2022-09-09-173804/results/" // path to where TIFFs are stored (FastPathology project)
23 | def downsample = 2 // which scaling factor to use, if the segmentation is produced at a lower magnification level
24 | def level = 0 // which level to extract segmentation from (choosing 0 may be slow)
25 | def extension = ".tiff" // pyramidal TIFF
26 | def classNames = ["Benign", "Malign"] // names of classes of interest (in this case two classes, excluding the background class)
27 | def taskName = "Tissue segmentation" // name of task, which correspond to which task that was run in FastPathology to get this result
28 | int channel = 0 // 0-based index for the channel to threshold
29 | def fromFP = true // whether result is from FastPathology or not, if no, we assume that the TIFF lie directly in the maskPath directory with the same name.
30 | // ----------------------------
31 |
32 | // In case the image has bounds, we need to shift annotations
33 | def imageServer = getCurrentServer();
34 | def shiftX = -imageServer.boundsX;
35 | def shiftY = -imageServer.boundsY;
36 |
37 | // Get a list of image files, stopping early if none can be found
38 | def dirOutput = new File(masksPath);
39 | if (!dirOutput.isDirectory()) {
40 | print dirOutput + ' is not a valid directory!';
41 | return;
42 | }
43 |
44 | // get current WSI, update paths to file
45 | def currWSIName = GeneralTools.getNameWithoutExtension(getProjectEntry().getImageName())
46 | def path = ""
47 |
48 | if (fromFP) {
49 | path = masksPath + "/" + currWSIName + "/" + taskName + "/Segmentation/Segmentation" + extension
50 | } else {
51 | path = masksPath + "/" + currWSIName + extension
52 | }
53 |
54 | // check if file exists, if no return
55 | File file = new File(path)
56 | if (!file.exists()) {
57 | print path + ' does not exist!';
58 | return;
59 | }
60 |
61 | // Create a single-resolution server at the desired level, if required
62 | def server = buildServer(path)
63 | if (level != 0) {
64 | server = qupath.lib.images.servers.ImageServers.pyramidalize(server, server.getDownsampleForResolution(level))
65 | }
66 |
67 | def belowClass = getPathClass('Ignore*') // Class for pixels below the threshold
68 | def current_thresh = 0.5;
69 |
70 | classNames.each { currClassName ->
71 | def aboveClass = getPathClass(currClassName) // Class for pixels above the threshold
72 |
73 | // Create a thresholded image
74 | def thresholdServer = PixelClassifierTools.createThresholdServer(server, channel, current_thresh, belowClass, getPathClass(currClassName))
75 |
76 | // Create annotations and add to the current object hierarchy
77 | def hierarchy = getCurrentHierarchy()
78 | PixelClassifierTools.createAnnotationsFromPixelClassifier(hierarchy, thresholdServer, -1, -1)
79 |
80 | // Select current annotations
81 | selectObjectsByClassification(currClassName);
82 |
83 | // Get current annotations, rescale and shift
84 | def oldObjects = getAnnotationObjects().findAll{it.getPathClass() == getPathClass(currClassName)}
85 | def transform = java.awt.geom.AffineTransform.getTranslateInstance(shiftX, shiftY)
86 | transform.concatenate(java.awt.geom.AffineTransform.getScaleInstance(downsample, downsample))
87 | def newObjects = oldObjects.collect {p -> PathObjectTools.transformObject(p, transform, false)}
88 |
89 | // Delete old annotations
90 | clearSelectedObjects(false);
91 |
92 | // add resulting annotation object (and update current threshold)
93 | addObjects(newObjects)
94 | current_thresh++;
95 | }
96 |
97 |
98 | // finally, correct all annotations, by iteratively subtracting adjacent annotations to assign one uint to one class
99 | for(int i = 0; i < classNames.size() - 1; i++) {
100 | def className1 = classNames[i]
101 | def className2 = classNames[i + 1]
102 |
103 | def class1 = getAnnotationObjects().find {it.getPathClass() == getPathClass(className1)}
104 | def class2 = getAnnotationObjects().find {it.getPathClass() == getPathClass(className2)}
105 | def plane = class1.getROI().getImagePlane()
106 | if (plane != class2.getROI().getImagePlane()) {
107 | println 'Annotations are on different planes!'
108 | return
109 | }
110 | // Convert to geometries & compute distance
111 | // Note: see https://locationtech.github.io/jts/javadoc/org/locationtech/jts/geom/Geometry.html#distance-org.locationtech.jts.geom.Geometry-
112 | def g1 = class1.getROI().getGeometry()
113 | def g2 = class2.getROI().getGeometry()
114 |
115 | def difference = g1.difference(g2)
116 | if (difference.isEmpty())
117 | println "No intersection between areas"
118 | else {
119 | def roi = GeometryTools.geometryToROI(difference, plane)
120 | def annotation = PathObjects.createAnnotationObject(roi, getPathClass('Difference'))
121 | addObject(annotation)
122 | selectObjects(annotation)
123 | }
124 | //remove original object
125 | removeObject(class1, true)
126 |
127 | // rename annotation
128 | getAnnotationObjects().each { annotation ->
129 | if (annotation.getPathClass().equals(getPathClass("Difference")))
130 | annotation.setPathClass(getPathClass(className1))
131 | }
132 | }
133 |
134 | print "Done!"
135 |
136 | // reclaim memory - relevant for running this within a RunForProject
137 | Thread.sleep(100);
138 | javafx.application.Platform.runLater {
139 | getCurrentViewer().getImageRegionStore().cache.clear();
140 | System.gc();
141 | }
142 | Thread.sleep(100);
143 |
--------------------------------------------------------------------------------
/source/importStitchedTIFfromMIB.groovy:
--------------------------------------------------------------------------------
1 | /**
2 | * Script to import uint8 stitched TIF images as annotations for QuPath >= v0.3*
3 | *
4 | * It is assumed that the TIF lies in a user-defined directory with the same name but with the extension '.tif'
5 | *
6 | * However, the name of the stitched images (assumed .tif) can be set in the "extensions" variable
7 | *
8 | * Furthermore, we assume that the image is uint8, where each uint correspond to a different class
9 | *
10 | * Script supports multi-class stitched images, but it is assumed that each class is set in a strided manner
11 | * where uint 0 is assumed to be background
12 | *
13 | * Code is inspired by Pete Bankhead's script from the ImageJ forum:
14 | * https://forum.image.sc/t/rendering-wsi-as-overlay-on-top-of-another-wsi/52629/9?u=andreped
15 | *
16 | * @author André Pedersen
17 | */
18 |
19 |
20 | import qupath.lib.images.servers.ImageServerProvider
21 | import qupath.lib.regions.RegionRequest
22 | import java.awt.image.BufferedImage
23 | import qupath.lib.analysis.images.ContourTracing;
24 | import static qupath.lib.gui.scripting.QPEx.*
25 |
26 |
27 | // --- SET THESE PARAMETERS ---
28 | def masksPath = "E:/path/to/some/ResultsModels_WSI/"
29 | def downsample = 2.0;
30 | def extension = ".tif"
31 | def classNames = ["Benign", "Malign"] // example class names, for single class use ["Benign"]
32 | boolean fromFP = false // if predictions are stored in a FP-like manner (two-level folder structure = true)
33 | boolean fromMIB = true // if predictions are from MIB, they will need to be renamed adding "labels_"
34 | // ----------------------------
35 |
36 |
37 | // Get a list of image files, stopping early if none can be found
38 | def dirOutput = new File(masksPath);
39 | if (!dirOutput.isDirectory()) {
40 | print dirOutput + ' is not a valid directory!';
41 | return;
42 | }
43 |
44 | // get current WSI, update paths to file
45 | def currWSIName = GeneralTools.getNameWithoutExtension(getProjectEntry().getImageName())
46 | if (fromFP) {
47 | masksPath += currWSIName
48 | }
49 |
50 | def path = ""
51 | if (fromMIB) {
52 | path = masksPath + "/labels_" + currWSIName + extension
53 | } else {
54 | path = masksPath + "/" + currWSIName + extension
55 | }
56 |
57 | // check if file exists, if no return
58 | File file = new File(path)
59 | if (!file.exists()) {
60 | print path + ' does not exist!';
61 | return;
62 | }
63 |
64 | // Ideally you'd use ImageIO.read(File)... but if it doesn't work we need this
65 | def server = ImageServerProvider.buildServer(path, BufferedImage)
66 | def region = RegionRequest.createInstance(server)
67 | def img = server.readBufferedImage(region)
68 | def band = ContourTracing.extractBand(img.getRaster(), 0)
69 | def request = RegionRequest.createInstance(getCurrentServer(), downsample)
70 |
71 | // for each class, iterate and create annotations for each
72 | int counter = 1
73 | classNames.each { currClassName ->
74 | def annotations = ContourTracing.createAnnotations(band, request, counter, counter)
75 |
76 | addObjects(annotations)
77 | replaceClassification(null, currClassName);
78 |
79 | counter++
80 | }
81 |
82 | print "Done!"
83 |
84 | // reclaim memory - relevant for running this within a RunForProject
85 | Thread.sleep(100);
86 | javafx.application.Platform.runLater {
87 | getCurrentViewer().getImageRegionStore().cache.clear();
88 | System.gc();
89 | }
90 | Thread.sleep(100);
--------------------------------------------------------------------------------
/source/importTiles.groovy:
--------------------------------------------------------------------------------
1 | /**
2 | * Script to import binary masks & create annotations, adding them to the current object hierarchy for QuPath >= v0.3.*
3 | *
4 | * It is assumed that each mask is stored in a TIFF file in a project subdirectory called 'masks'.
5 | * Each file name should be of the form:
6 | * "Labels_{Short original image name} [{x},{y}, {width},{height}].tif"
7 | *
8 | * Note: It's assumed that the classification is a simple name without underscores, i.e. not a 'derived' classification
9 | * (so 'Tumor' is ok, but 'Tumor: Positive' is not)
10 | *
11 | * It is also assumed that the background is assigned value 0 and class of interest is assigned value 1.
12 | *
13 | * The x, y, width & height values should be in terms of coordinates for the full-resolution image.
14 | *
15 | * By default, the image name stored in the mask filename has to match that of the current image - but this check can be turned off.
16 | *
17 | * Code is inspired by scripts and ideas from these threads of the ImageJ forum:
18 | * https://forum.image.sc/t/importing-binary-masks-in-qupath/25713/24
19 | * https://forum.image.sc/t/importing-binary-masks-in-qupath/25713/2
20 | * https://forum.image.sc/t/transferring-segmentation-predictions-from-custom-masks-to-qupath/43408/24
21 | *
22 | * The code of these threads were mainly contributed by Pete Bankhead, Benjamin Pavie, and Raymond301.
23 | *
24 | * @author André Pedersen
25 | */
26 |
27 |
28 | import ij.plugin.filter.ThresholdToSelection
29 | import ij.process.ImageProcessor
30 | import qupath.lib.objects.PathObjects
31 | import qupath.lib.regions.ImagePlane
32 | import qupath.imagej.processing.RoiLabeling
33 | import ij.IJ
34 | import static qupath.lib.gui.scripting.QPEx.*
35 |
36 |
37 | // --- SET THESE PARAMETERS ---
38 | def className = "Epithelium";
39 | def pathOutput = "C:/path-to-exported-labels-dir"
40 | def patchFormat = "tif" // set to "png" or "tif"
41 | // ----------------------------
42 |
43 |
44 | // Get the main QuPath data structures
45 | def imageData = getCurrentImageData();
46 | def hierarchy = imageData.getHierarchy();
47 | def server = imageData.getServer();
48 | def plane = getCurrentViewer().getImagePlane();
49 |
50 | def name = GeneralTools.getNameWithoutExtension(imageData.getServer().getMetadata().getName())
51 | print pathOutput
52 | print name
53 |
54 | // Get a list of image files, stopping early if none can be found
55 | def dirOutput = new File(pathOutput);
56 | print dirOutput
57 | if (!dirOutput.isDirectory()) {
58 | print pathOutput + ' is not a valid directory!';
59 | return;
60 | }
61 |
62 | def files = dirOutput.listFiles({f -> f.isFile() } as FileFilter) as List;
63 | if (files.isEmpty()) {
64 | print 'No mask files found in ' + dirOutput;
65 | return;
66 | }
67 |
68 | // loading bar
69 | int spaces = 40;
70 | float progress = 100.0;
71 | int counter = 0;
72 | int nbPatches = files.size;
73 |
74 | // Create annotations for all the files
75 | def annotations = [];
76 | files.each {
77 | String hash = "#" * Math.ceil((counter * spaces) / nbPatches);
78 | println String.format("[%-" + spaces + "s] %d%s%d\r", hash, counter, '/', nbPatches);
79 | counter ++;
80 | def currentName = GeneralTools.getNameWithoutExtension(getProjectEntry().getImageName())
81 | def filename = it.getName();
82 | if (!filename.contains(currentName) || !filename.endsWith("]." + patchFormat))
83 | return;
84 | try {
85 | annotations << parseAnnotation(it, plane);
86 | } catch (Exception e) {
87 | print 'Unable to parse annotation from ' + it.getName() + ': ' + e.getLocalizedMessage();
88 | }
89 | }
90 |
91 | /**
92 | * Create a new annotation from a binary image, parsing the classification & region from the file name.
93 | *
94 | * @param file File containing the TIFF image mask. The image name must be formatted as above.
95 | * @return The PathAnnotationObject created based on the mask & file name contents.
96 | */
97 | def parseAnnotation(File file, ImagePlane plane) {
98 |
99 | def filename = file.getName();
100 | def imp = IJ.openImage(file.getPath());
101 |
102 | def parts = filename.split(' ');
103 | def regionParts = parts[-1].split(",") as List;
104 |
105 | // Handle scenario where there was not done any downsampling (d=1 skipped!)
106 | if (regionParts.size() == 4) {
107 | regionParts[0] = regionParts[0][1..-1]
108 | regionParts.add(0, "[d=1")
109 | }
110 |
111 | def downsample = regionParts[0].replace("[d=", "") as float;
112 |
113 | // Parse the x, y coordinates of the region
114 | int x = regionParts[1].replace("x=", "") as int;
115 | int y = regionParts[2].replace("y=", "") as int;
116 |
117 | // To create the ROI, travel into ImageJ
118 | def bp = imp.getProcessor();
119 | int n = bp.getStatistics().max as int;
120 | def rois = RoiLabeling.labelsToConnectedROIs(bp, n);
121 |
122 | def pathObjects = rois.collect {
123 | if (it == null)
124 | return;
125 | def roiQ = IJTools.convertToROI(it, -x/downsample, -y/downsample, downsample, plane);
126 | return PathObjects.createAnnotationObject(roiQ);
127 | }
128 | // this is slow, but it works... Better to add objects AFTER, as it seems like it draws every single time, maybe?
129 | addObjects(pathObjects);
130 | }
131 |
132 | resolveHierarchy();
133 |
134 | // finally, rename to class of interest
135 | replaceClassification(null, className);
136 |
137 | // merge all annotations
138 | print "Merging (might take some time...)"
139 | selectObjects {
140 | return it.isAnnotation() && it.getPathClass() == getPathClass(className)
141 | }
142 | mergeSelectedAnnotations();
143 |
144 | print "Done!"
145 |
146 | // reclaim memory - relevant for running this within a RunForProject
147 | Thread.sleep(100);
148 | javafx.application.Platform.runLater {
149 | getCurrentViewer().getImageRegionStore().cache.clear();
150 | System.gc();
151 | }
152 | Thread.sleep(100);
--------------------------------------------------------------------------------
/source/qupath_export_tiles_annotation_multiclass.groovy:
--------------------------------------------------------------------------------
1 | import qupath.lib.images.servers.LabeledImageServer
2 |
3 | def imageData = getCurrentImageData()
4 |
5 | // Define output path (relative to project)
6 | def name = GeneralTools.getNameWithoutExtension(imageData.getServer().getMetadata().getName())
7 |
8 | //create directory called tiles in qupath project directory
9 | //adding more strings will create more subfolders
10 | def pathOutput = buildFilePath(PROJECT_BASE_DIR, 'tiles')
11 | mkdirs(pathOutput)
12 |
13 |
14 | // Define output resolution
15 | double requestedPixelSize = 1 // default 10.0, change based on image size
16 |
17 | // Convert to downsample
18 | double downsample = requestedPixelSize / imageData.getServer().getPixelCalibration().getAveragedPixelSize()
19 |
20 | // Create an ImageServer where the pixels are derived from annotations
21 | def labelServer = new LabeledImageServer.Builder(imageData)
22 | .backgroundLabel(0, ColorTools.WHITE) // Specify background label (usually 0 or 255)
23 | .downsample(downsample) // Choose server resolution; this should match the resolution at which tiles are exported
24 | .addLabel('Epithelia', 1) // Choose output labels (the order matters! and so does the threshold value)
25 | .addLabel('Crypt', 2)
26 | .multichannelOutput(true) // If true, each label is a different channel (required for multiclass probability)
27 | .build()
28 |
29 | // Create an exporter that requests corresponding tiles from the original & labeled image servers
30 | new TileExporter(imageData)
31 | .downsample(downsample) // Define export resolution
32 | .imageExtension('.tif') // Define file extension for original pixels (often .tif, .jpg, '.png' or '.ome.tif')
33 | .tileSize(512) // Define size of each tile, in pixels
34 | .labeledServer(labelServer) // Define the labeled image server to use (i.e. the one we just built)
35 | .annotatedTilesOnly(false) // If true, only export tiles if there is a (labeled) annotation present
36 | .overlap(64) // Define overlap, in pixel units at the export resolution
37 | .imageSubDir("images")//save images in a subfolder called images
38 | .labeledImageSubDir("masks")//save masks in a subfolder called masks
39 | .writeTiles(pathOutput) // Write tiles to the specified directory:
40 | //by default you can omit imageSubDir and labeledImageSubDir and use just writeTiles.
41 | //This will save all images together
42 | print 'Done!'
--------------------------------------------------------------------------------
/source/runtime-experiments/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | cmake_minimum_required(VERSION 3.12)
2 | project(pipelineRuntime)
3 |
4 | set(CMAKE_CXX_STANDARD 17)
5 |
6 | include_directories(.)
7 | find_package(FAST REQUIRED)
8 | include(${FAST_USE_FILE})
9 |
10 | add_executable(pipelineRuntime pipelineRuntime.cpp)
11 | add_dependencies(pipelineRuntime fast_copy)
12 | target_link_libraries(pipelineRuntime ${FAST_LIBRARIES})
13 |
14 | set_target_properties(pipelineRuntime PROPERTIES
15 | CXX_STANDARD 17
16 | CXX_EXTENSIONS OFF
17 | )
--------------------------------------------------------------------------------
/source/runtime-experiments/pipelineRuntime.cpp:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 | #include
5 | #include
6 | #include
7 | #include
8 | #include
9 | #include
10 | #include
11 | #include
12 | #include
13 | #include
14 | #include
15 | #include
16 |
17 | using namespace fast;
18 |
19 | int main(int argc, char** argv) {
20 | Reporter::setGlobalReportMethod(Reporter::INFO, Reporter::NONE);
21 |
22 | CommandLineParser parser("Measure neural network performance script");
23 | parser.addOption("disable-warmup");
24 | parser.parse(argc, argv);
25 | const int iterations = 10;
26 | const bool warmupIteration = !parser.getOption("disable-warmup");
27 |
28 | std::cout << "\nPatch-wise high-res semantic segmentation...\n" << std::endl;
29 | const std::string resultFilename = "../results_neural-network-runtime.csv";
30 | std::ofstream file(resultFilename.c_str());
31 |
32 | std::vector img_size{256, 256};
33 | int patch_level = 1;
34 | int iter = 1;
35 |
36 | // Write header
37 | file << "Engine;Device Type;Iteration;Patch generator AVG;Patch generator STD;NN input AVG;NN input STD;NN inference AVG;NN inference STD;NN output AVG;NN output STD;Patch stitcher AVG;Patch stitcher STD;Exporter AVG; Exporter STD;Total\n";
38 |
39 | for (std::string engine : {"TensorRT", "OpenVINO"}) {
40 | std::map deviceTypes = {{"ANY", InferenceDeviceType::ANY}};
41 | if (engine == "OpenVINO") {
42 | // On OpenVINO, try all device types
43 | deviceTypes = std::map{
44 | {"CPU", InferenceDeviceType::CPU},
45 | {"GPU", InferenceDeviceType::GPU},
46 | };
47 | }
48 |
49 | for (auto &&deviceType : deviceTypes) {
50 | std::cout << engine << " for device type " << deviceType.first << std::endl;
51 | std::cout << "====================================" << std::endl;
52 |
53 | for (int iteration = 0; iteration <= iterations; ++iteration) {
54 |
55 | auto importer = WholeSlideImageImporter::New();
56 | importer->setFilename("path-to-some-wsi.ndpi");
57 |
58 | auto tissueSegmentation = TissueSegmentation::New();
59 | tissueSegmentation->setDilate(45);
60 | tissueSegmentation->setInputConnection(importer->getOutputPort());
61 |
62 | auto generator = PatchGenerator::New();
63 | generator->setPatchSize(img_size[0], img_size[1]);
64 | generator->setPatchLevel(patch_level);
65 | generator->setOverlap(0.0);
66 | generator->setMaskThreshold(0.01);
67 | generator->setInputConnection(importer->getOutputPort());
68 | generator->setInputConnection(1, tissueSegmentation->getOutputPort());
69 | generator->enableRuntimeMeasurements();
70 |
71 | auto network = SegmentationNetwork::New();
72 | network->setInferenceEngine(engine);
73 | if (engine == "OpenVINO")
74 | network->getInferenceEngine()->setDeviceType(deviceType.second);
75 | network->load("path-to-some-model.onnx");
76 | network->setScaleFactor(1.0f);
77 | network->setInputConnection(generator->getOutputPort());
78 | network->enableRuntimeMeasurements();
79 |
80 | auto stitcher = PatchStitcher::New();
81 | stitcher->setInputConnection(network->getOutputPort());
82 | stitcher->enableRuntimeMeasurements();
83 |
84 | auto start = std::chrono::high_resolution_clock::now();
85 | DataObject::pointer data;
86 | do {
87 | data = stitcher->updateAndGetOutputData();
88 | } while (!data->isLastFrame());
89 |
90 | auto exporter = TIFFImagePyramidExporter::New();
91 | exporter->setFilename("../pred_seg" + std::to_string(iter) + ".tiff");
92 | exporter->setInputConnection(stitcher->getOutputPort());
93 | exporter->enableRuntimeMeasurements();
94 | exporter->update();
95 |
96 | std::chrono::duration timeUsed =
97 | std::chrono::high_resolution_clock::now() - start;
98 | std::cout << "Total runtime: " << timeUsed.count() << std::endl;
99 | std::cout << "Patch generator runtime: " << std::endl;
100 | generator->getRuntime("create patch")->print();
101 | std::cout << "NN runtime: " << std::endl;
102 | network->getRuntime()->print();
103 | std::cout << "Patch stitcher runtime: " << std::endl;
104 | stitcher->getRuntime()->print();
105 | std::cout << "Exporter runtime" << std::endl;
106 | exporter->getRuntime()->print();
107 |
108 | iter++;
109 |
110 | if (iteration == 0 && warmupIteration)
111 | continue;
112 |
113 | file <<
114 | engine + ";" +
115 | deviceType.first + ";" +
116 | std::to_string(iteration) + ";" +
117 | std::to_string(generator->getRuntime("create patch")->getAverage()) + ";" +
118 | std::to_string(generator->getRuntime("create patch")->getStdDeviation()) + ";" +
119 | std::to_string(network->getRuntime("input_processing")->getAverage()) + ";" +
120 | std::to_string(network->getRuntime("input_processing")->getStdDeviation()) + ";" +
121 | std::to_string(network->getRuntime("inference")->getAverage()) + ";" +
122 | std::to_string(network->getRuntime("inference")->getStdDeviation()) + ";" +
123 | std::to_string(network->getRuntime("output_processing")->getAverage()) + ";" +
124 | std::to_string(network->getRuntime("output_processing")->getStdDeviation()) + ";" +
125 | std::to_string(stitcher->getRuntime("stitch patch")->getAverage()) + ";" +
126 | std::to_string(stitcher->getRuntime("stitch patch")->getStdDeviation()) + ";" +
127 | std::to_string(exporter->getRuntime()->getAverage()) + ";" +
128 | "0" + ";" +
129 | std::to_string(timeUsed.count())
130 | << std::endl;
131 | }
132 | }
133 | }
134 | }
135 |
--------------------------------------------------------------------------------
/source/runtime-experiments/statistical_analysis.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import pandas as pd
3 | from tabulate import tabulate
4 | np.set_printoptions(suppress=True)
5 | np.set_printoptions(threshold=np.inf)
6 |
7 |
8 | loc = "./build/results_neural-network-runtime.csv"
9 | df = pd.read_csv(loc, sep=";", header=0)
10 |
11 | nb_iters = 11
12 |
13 | IEs = ["OpenVINO CPU", "OpenVINO GPU", "TensorRT"]
14 |
15 | ret = []
16 | for i in range(3):
17 | tmp = df.iloc[int(i * nb_iters):int((i + 1) * nb_iters), :]
18 | print("----")
19 | print(tmp)
20 | print("-")
21 | print(np.mean(tmp, axis=0), np.std(tmp, axis=0))
22 | print()
23 |
--------------------------------------------------------------------------------