├── .gitignore
├── LICENSE.md
├── README.md
├── build
├── Dockerfile
├── README.txt
└── install_files
│ ├── requirements.txt
│ └── run.sh
├── config
└── parameter_config.json
├── force_config.txt
├── img
├── figure2.png
└── tform_progression.gif
├── model_weights
├── JigsawCNN_checkpoint
│ └── README.txt
└── fragment_classifier
│ └── best_weights.hdf5
├── shredder
├── line_utils.py
└── main.py
└── src
├── assembly_utils
├── detect_configuration.py
├── fetch_solutions.py
├── fragment_classifier.py
├── global_assembly.py
├── global_assembly_utils.py
├── jigsawnet.py
├── jigsawnet_utils.py
├── pairwise_alignment.py
└── pairwise_alignment_utils.py
├── main.py
├── preprocessing_utils
└── prepare_data.py
└── pythostitcher_utils
├── adjust_final_rotation.py
├── fragment_class.py
├── full_resolution.py
├── fuse_images_highres.py
├── fuse_images_lowres.py
├── genetic_algorithm.py
├── get_resname.py
├── gradient_blending.py
├── landmark_evaluation.py
├── line_utils.py
├── map_tform_low_res.py
├── optimize_stitch.py
├── plot_tools.py
├── preprocess.py
├── stain_normalization.py
└── transformations.py
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Local stuff
10 | /model_weights/JigsawCNN_checkpoint
11 | .idea
12 | */.idea/
13 | algorithm_analysis/
14 | .vscode
15 | /data
16 | /results
17 |
18 | # Distribution / packaging
19 |
20 | __pycache__/
21 | .Python
22 | develop-eggs/
23 | dist/
24 | downloads/
25 | eggs/
26 | .eggs/
27 | lib/
28 | lib64/
29 | parts/
30 | sdist/
31 | var/
32 | wheels/
33 | pip-wheel-metadata/
34 | share/python-wheels/
35 | *.egg-info/
36 | .installed.cfg
37 | *.egg
38 | MANIFEST
39 |
40 | # PyInstaller
41 | # Usually these files are written by a python script from a template
42 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
43 | *.manifest
44 | *.spec
45 |
46 | # Installer logs
47 | pip-log.txt
48 | pip-delete-this-directory.txt
49 |
50 | # Unit test / coverage reports
51 | htmlcov/
52 | .tox/
53 | .nox/
54 | .coverage
55 | .coverage.*
56 | .cache
57 | nosetests.xml
58 | coverage.xml
59 | *.cover
60 | *.py,cover
61 | .hypothesis/
62 | .pytest_cache/
63 |
64 | # Translations
65 | *.mo
66 | *.pot
67 |
68 | # Django stuff:
69 | *.log
70 | local_settings.py
71 | db.sqlite3
72 | db.sqlite3-journal
73 |
74 | # Flask stuff:
75 | instance/
76 | .webassets-cache
77 |
78 | # Scrapy stuff:
79 | .scrapy
80 |
81 | # Sphinx documentation
82 | docs/_build/
83 |
84 | # PyBuilder
85 | target/
86 |
87 | # Jupyter Notebook
88 | .ipynb_checkpoints
89 |
90 | # IPython
91 | profile_default/
92 | ipython_config.py
93 |
94 | # pyenv
95 | .python-version
96 |
97 | # pipenv
98 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
99 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
100 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
101 | # install all needed dependencies.
102 | #Pipfile.lock
103 |
104 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow
105 | __pypackages__/
106 |
107 | # Celery stuff
108 | celerybeat-schedule
109 | celerybeat.pid
110 |
111 | # SageMath parsed files
112 | *.sage.py
113 |
114 | # Environments
115 | .env
116 | .venv
117 | env/
118 | venv/
119 | ENV/
120 | env.bak/
121 | venv.bak/
122 |
123 | # Spyder project settings
124 | .spyderproject
125 | .spyproject
126 |
127 | # Rope project settings
128 | .ropeproject
129 |
130 | # mkdocs documentation
131 | /site
132 |
133 | # mypy
134 | .mypy_cache/
135 | .dmypy.json
136 | dmypy.json
137 |
138 | # Pyre type checker
139 | .pyre/
140 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
PythoStitcher
2 |
3 |
4 |
5 |
6 |
7 |
8 | ## What is PythoStitcher?
9 | Pythostitcher is a tool inspired by [AutoStitcher](https://www.nature.com/articles/srep29906) to stitch histopathology images into an artifical whole mount image. These artificial whole-mounts are indispensable for multimodal imaging research, since they greatly improve the ability to relate histopathology information to pre-operative imaging. PythoStitcher works fully automatically and is able to generate very high resolution (0.25 µm/pixel) whole-mounts. For a detailed description check out the official [PythoStitcher publication](https://www.nature.com/articles/s41598-024-52007-5) in Nature Scientific Reports.
10 |
11 |
12 |
13 |
14 |
15 | ## Does PythoStitcher also work on my data?
16 | If your data consists of either halves or quadrants of a certain type of tissue (i.e. a prostate), PythoStitcher should be able to reconstruct the artificial whole-mount for you. PythoStitcher expects that your data has multiple resolution layers, also known as a pyramidal file, and preferably uses .mrxs or .tiff files. In addition, PythoStitcher requires a tissue mask of said tissue. This tissue mask can be generated by your tissue segmentation algorithm of choice; in the provided sample data we make use of the algorithm from [Bándi et al](https://pubmed.ncbi.nlm.nih.gov/31871843/).
17 |
18 | ## How do I run PythoStitcher?
19 | #### Docker container
20 | It is highly recommended to run PythoStitcher as a Docker container, since PythoStitcher uses some libraries that need to be built from source. The Docker container comes prepackaged with these libraries, as well as with model weights of the involved CNNs, and should run out-of-the-box. You can pull the container with the following command or alternatively build it yourself locally with the provided Dockerfile in /build.
21 |
22 | docker pull ghcr.io/computationalpathologygroup/pythostitcher:latest
23 |
24 | #### Data preparation
25 | Your input data should be prepared as follows, where you make a separate raw_images and raw_masks directory for your high resolution image and tissue mask files, respectively. Ensure that the name of the tissue mask is exactly the same as that of the image. If you want to enforce the location of each fragment in the final reconstruction, you can include a force_config.txt file. See the example_force_config.txt file on how to format this. If you leave out this file, PythoStitcher will automatically determine the optimal configuration of the tissue fragments.
26 |
27 | data/
28 | └── patient_ID
29 | | └── raw_images
30 | | ├── image1
31 | | └── image2
32 | | └── raw_masks
33 | | ├── image1
34 | | └── image2
35 | │ └── force_config.txt [OPTIONAL]
36 |
37 |
38 | #### Usage instructions
39 |
40 | After preparing the input data in the aforementioned format, you can run PythoStitcher through the command line using:
41 |
42 | docker run -v /home/user:/home/user ghcr.io/computationalpathologygroup/pythostitcher --datadir "/home/user/data/patient_x" --savedir "/home/user/results" --resolution x
43 | where *datadir* refers to the directory with your input data, *savedir* refers to the location to save the result and *resolution* refers to the resolution in µm/pixel at which you want to save the final reconstruction. The *-v /home/user:/home/user* flag is used to create a volume such that the container can access your local data directory. This can be any directory, as long as it is a parent directory for both the data and result directories. To obtain the result for the prostatectomy case with four fragments (figure at the top) you would run:
44 |
45 | docker run -v /home/user:/home/user ghcr.io/computationalpathologygroup/pythostitcher --datadir "/home/user/data/prostate_4" --savedir "/home/user/results" --resolution 0.25
46 |
47 | #### Sample data
48 | If you don't have any data available, but are still curious to try PythoStitcher, you can make use of our sample data available from
. The sample data includes multiple prostatectomy cases with different sectioning approaches, please see the Zenodo record for more details.
49 |
50 | ## Acknowledgements
51 | The development of PythoStitcher would not have been possible without the open-sourcing of [JigsawNet](https://github.com/Lecanyu/JigsawNet), [ASAP](https://github.com/computationalpathologygroup/ASAP) and [PyVips](https://github.com/libvips/pyvips).
52 |
53 | ## Licensing
54 | The source code of Pythostitcher is licensed under the [GNU Lesser General Public License (LGPL)](https://www.gnu.org/licenses/lgpl-3.0.nl.html). The provided sample data is licensed under the [CC Attribution 4.0 International license](https://creativecommons.org/licenses/by/4.0/legalcode). Please take these licenses into account when using PythoStitcher.
55 |
56 |
--------------------------------------------------------------------------------
/build/Dockerfile:
--------------------------------------------------------------------------------
1 | # =============================================================
2 | # Configuration
3 | # =============================================================
4 |
5 | ARG UBUNTU_VERSION=20.04
6 | ARG CUDA_MAJOR_VERSION=11.3.1
7 | ARG CUDNN_MAJOR_VERSION=8
8 | ARG PYTHON_VERSION=3.9.15
9 | ARG NUMPY_VERSION=1.21.5
10 | ARG PYTORCH_VERSION=1.10.2
11 | ARG TORCHVISION_VERSION=0.11.3
12 | ARG TENSORFLOW_VERSION=2.8.0
13 | ARG VIPS_VERSION=8.13.0
14 | ARG PYTHOSTITCHER_VERSION=0.3.2
15 |
16 | ARG BUILD_JOBS=16
17 |
18 | # =============================================================
19 | # Create build docker
20 | # =============================================================
21 |
22 | FROM nvidia/cuda:${CUDA_MAJOR_VERSION}-cudnn${CUDNN_MAJOR_VERSION}-devel-ubuntu${UBUNTU_VERSION} AS builder
23 |
24 | # === Propagate build args ===
25 | ARG PYTHON_VERSION
26 | ARG NUMPY_VERSION
27 | ARG BUILD_JOBS
28 |
29 | # === Install build packages ===
30 | ENV DEBIAN_FRONTEND noninteractive
31 |
32 | RUN apt-get update --fix-missing && \
33 | apt-get install -y --no-install-recommends \
34 | ca-certificates \
35 | pkg-config apt-transport-https \
36 | openjdk-8-jdk \
37 | g++ ninja-build make \
38 | wget git zip unzip \
39 | libssl-dev zlib1g-dev \
40 | libncurses5-dev libncursesw5-dev libreadline-dev libsqlite3-dev \
41 | libgdbm-dev libdb5.3-dev libbz2-dev libexpat1-dev liblzma-dev tk-dev \
42 | gcovr libffi-dev uuid-dev
43 |
44 | # === Install python ===
45 | RUN cd /tmp && \
46 | wget https://www.python.org/ftp/python/${PYTHON_VERSION}/Python-${PYTHON_VERSION}.tar.xz && \
47 | tar xfv Python*.xz && \
48 | cd Python-3*/ && \
49 | ./configure --enable-shared LDFLAGS="-fprofile-arcs" && \
50 | make -j${BUILD_JOBS} install && \
51 | cd /tmp && \
52 | rm -rf Python-3* && \
53 | ldconfig
54 |
55 | RUN pip3 install --upgrade pip && \
56 | pip3 install cython wheel numpy==${NUMPY_VERSION} packaging
57 |
58 | # =============================================================
59 | # Build Object Detection framework for Tensorflow
60 | # =============================================================
61 |
62 | FROM builder AS tensorflow-odf
63 |
64 | # === Install protobuf compiler ===
65 | RUN apt-get install -y --no-install-recommends protobuf-compiler
66 |
67 | # === Build the library ===
68 | RUN git clone https://github.com/tensorflow/models.git && \
69 | cd models/research && \
70 | protoc object_detection/protos/*.proto --python_out=. && \
71 | cp object_detection/packages/tf2/setup.py . && \
72 | python3 setup.py bdist_wheel && \
73 | mkdir /tensorflow-odf && \
74 | cp dist/*.whl /tensorflow-odf/
75 |
76 | # =============================================================
77 | # Build NVIDIA apex module for PyTorch
78 | # =============================================================
79 |
80 | FROM builder AS apex
81 |
82 | # === Propagate build args ===
83 | ARG CUDA_MAJOR_VERSION
84 | ARG PYTORCH_VERSION
85 | ARG TORCHVISION_VERSION
86 |
87 | # === Install PyTorch ===
88 | RUN CUDA_IDENTIFIER_PYTORCH=`echo "cu${CUDA_MAJOR_VERSION}" | sed "s|\.||g" | cut -c1-5` && \
89 | pip3 install --no-cache-dir --find-links https://download.pytorch.org/whl/torch_stable.html \
90 | "torch==${PYTORCH_VERSION}+${CUDA_IDENTIFIER_PYTORCH}" \
91 | "torchvision==${TORCHVISION_VERSION}+${CUDA_IDENTIFIER_PYTORCH}"
92 |
93 | # =============================================================
94 | # Create base docker
95 | # =============================================================
96 |
97 | FROM nvidia/cuda:${CUDA_MAJOR_VERSION}-cudnn${CUDNN_MAJOR_VERSION}-runtime-ubuntu${UBUNTU_VERSION} AS base
98 |
99 | # === Propagate build args ===
100 | ARG CUDA_MAJOR_VERSION
101 | ARG PYTHON_VERSION
102 | ARG NUMPY_VERSION
103 | ARG PYTORCH_VERSION
104 | ARG TORCHVISION_VERSION
105 | ARG TENSORFLOW_VERSION
106 | ARG BUILD_JOBS
107 |
108 | # === Configurate environment variables ===
109 | ENV LANG=C.UTF-8 LC_ALL=C.UTF-8
110 | ENV DEBIAN_FRONTEND noninteractive
111 |
112 | # === Install libs ===
113 | RUN apt-get update --fix-missing && \
114 | apt-get install -y --no-install-recommends \
115 | man \
116 | openssh-server \
117 | ca-certificates apt-transport-https \
118 | sudo \
119 | git subversion \
120 | nano vim \
121 | tmux screen \
122 | htop \
123 | g++ ninja-build \
124 | rsync \
125 | pv \
126 | meson \
127 | curl wget \
128 | bzip2 zip unzip \
129 | dcmtk libboost-all-dev \
130 | libgomp1 \
131 | libjpeg-turbo8 \
132 | libssl-dev zlib1g-dev libncurses5-dev libncursesw5-dev libreadline-dev libsqlite3-dev \
133 | libgdbm-dev libdb5.3-dev libbz2-dev libexpat1-dev liblzma-dev tk-dev gcovr libffi-dev uuid-dev \
134 | libgtk2.0-dev libgsf-1-dev libtiff5-dev libopenslide-dev \
135 | libgl1-mesa-glx && \
136 | apt-get clean && \
137 | rm -rf /var/lib/apt/lists/* && \
138 | mkdir /var/run/sshd && \
139 | cat /etc/sudoers | grep -v secure_path > /tmp/sudoers && mv /tmp/sudoers /etc/sudoers
140 |
141 | # == Install VIPS ==
142 | ARG VIPS_VERSION
143 |
144 | RUN wget https://github.com/libvips/libvips/releases/download/v${VIPS_VERSION}/vips-${VIPS_VERSION}.tar.gz -P /tmp && \
145 | tar -xf /tmp/vips-${VIPS_VERSION}.tar.gz --directory /tmp/ && \
146 | cd /tmp/vips-${VIPS_VERSION} && \
147 | ./configure && \
148 | make && \
149 | sudo -S make install && \
150 | cd .. && \
151 | sudo -S ldconfig
152 |
153 | RUN env | grep '^PATH=\|^LD_LIBRARY_PATH=\|^LANG=\|^LC_ALL=\|^CUDA_ROOT=' > /etc/environment
154 |
155 | # === Configure timezone ===
156 | RUN echo "Europe/Amsterdam" > /etc/timezone && \
157 | rm -f /etc/localtime && \
158 | dpkg-reconfigure -f noninteractive tzdata
159 |
160 | # === Setup user ===
161 | RUN useradd -ms /bin/bash user && \
162 | (echo user ; echo user) | passwd user && \
163 | gpasswd -a user ssh && \
164 | gpasswd -a user sudo
165 |
166 | # === Install python with up-to-date pip ===
167 | RUN cd /tmp && \
168 | wget "https://www.python.org/ftp/python/${PYTHON_VERSION}/Python-${PYTHON_VERSION}.tar.xz" && \
169 | tar xfv Python*.xz && \
170 | cd Python-3*/ && \
171 | ./configure --enable-shared LDFLAGS="-fprofile-arcs" && \
172 | make -j${BUILD_JOBS} install && \
173 | cd ~ && \
174 | rm -rf /tmp/Python-3* && \
175 | ldconfig
176 |
177 | RUN pip3 install --upgrade pip pip-tools wheel setuptools && \
178 | printf '#!/bin/bash\necho "Please use pip3 instead of pip to install packages for python3"' > /usr/local/bin/pip && \
179 | chmod +x /usr/local/bin/pip && \
180 | rm -rf ~/.cache/pip
181 |
182 | # === Install python libraries ===
183 | COPY --from=tensorflow-odf /tensorflow-odf/object_detection-*.whl /root/python-packages/
184 | COPY install_files/requirements.txt /root/python-packages/
185 | RUN pip3 install -r /root/python-packages/requirements.txt
186 |
187 | # === Set some environment variables for TensorFlow. ===
188 | ENV FOR_DISABLE_CONSOLE_CTRL_HANDLER 1
189 | ENV TF_CPP_MIN_LOG_LEVEL 3
190 | RUN env | grep '^FOR_DISABLE_CONSOLE_CTRL_HANDLER=\|^TF_CPP_MIN_LOG_LEVEL=' >> /etc/environment
191 |
192 | # === Install ASAP ===
193 | RUN wget https://github.com/computationalpathologygroup/ASAP/releases/download/ASAP-2.1/ASAP-2.1-py39-Ubuntu2004.deb -P /root/
194 | RUN apt-get update && \
195 | apt-get install --assume-yes /root/ASAP-2.1-py39-Ubuntu2004.deb && \
196 | ldconfig && \
197 | SITE_PACKAGES=`python3 -c "import sysconfig; print(sysconfig.get_paths()['purelib'])"` && \
198 | printf "/opt/ASAP/bin/\n" > "${SITE_PACKAGES}/asap.pth" && \
199 | #printf "/opt/ASAP/bin/\n" > "/usr/lib/python3.9/site-packages/asap.pth" && \
200 | apt-get clean && \
201 | rm -rf /var/lib/apt/lists/*
202 |
203 | # === Install latest Pixman ===
204 | # This should fix the bug described here (https://github.com/TissueImageAnalytics/tiatoolbox/issues/133)
205 | RUN sudo apt-get install meson && \
206 | wget https://www.cairographics.org/releases/pixman-0.42.2.tar.gz && \
207 | tar -xf pixman-0.42.2.tar.gz && \
208 | cd pixman-0.42.2 && \
209 | mkdir build && \
210 | cd build && \
211 | meson --prefix=/usr --buildtype=release && \
212 | ninja && \
213 | sudo ninja install
214 |
215 | # === Get latest pre-release of PythoStitcher ===
216 | WORKDIR /home/user
217 | ARG PYTHOSTITCHER_VERSION
218 | RUN wget https://github.com/computationalpathologygroup/pythostitcher/archive/refs/tags/${PYTHOSTITCHER_VERSION}.tar.gz && \
219 | tar xf ${PYTHOSTITCHER_VERSION}.tar.gz
220 |
221 | # === Download JigsawNet model weights in PythoStitcher directory ===
222 | # Download JigsawNet weights from Google Drive
223 | RUN pip3 install gdown
224 | RUN gdown --id 1ogT0jPUqPBOcDnUYJHs0emCItxJqveQS -O JigsawCNN_checkpoint.zip && \
225 | unzip -o /home/user/JigsawCNN_checkpoint.zip -d /home/user/pythostitcher-${PYTHOSTITCHER_VERSION}/model_weights/
226 |
227 | # === Configure stop signal. ===
228 | STOPSIGNAL SIGINT
229 |
230 | # === Expose ports for ssh, tensorboard, and jupyter. ===
231 | EXPOSE 22 6006 8888
232 |
233 | # === Overwrite NVIDIA's authorship label
234 | LABEL maintainer="DNSCHOUTEN"
235 |
236 | # === Set entrypoint to run PythoStitcher ===
237 | USER root
238 | WORKDIR /home/user/pythostitcher-${PYTHOSTITCHER_VERSION}/src
239 | RUN cd /home/user/pythostitcher-${PYTHOSTITCHER_VERSION}/src
240 |
241 | COPY install_files/run.sh /root/run.sh
242 | ENTRYPOINT ["/bin/bash", "/root/run.sh"]
243 |
--------------------------------------------------------------------------------
/build/README.txt:
--------------------------------------------------------------------------------
1 | This folder provides the build information to build your own PythoStitcher container locally. The install_files folder contains two components:
2 | 1. requirements.txt - this is a precompiled list with dependencies which are tested to be compatible
3 | 2. run.sh - this is the bash file to execute when you start your Docker
4 |
5 | The provided run.sh file will give you two options for running the PythoStitcher Docker:
6 | 1. Automatically. This mode engages when you provide the docker with the PythoStitcher input arguments mentioned on the Readme of the repository. It will then just run the packaged PythoStitcher code directly and is the recommended way of using PythoStitcher.
7 | 2. Interactive. If you run the PythoStitcher Docker without any input arguments, it will launch an interactive instance which you can couple to your IDE of choice. This will allow you to run a local, modified version of PythoStitcher according to your needs. This is the advanced way of using the container and only recommended if you need to make any alterations to the code.
--------------------------------------------------------------------------------
/build/install_files/requirements.txt:
--------------------------------------------------------------------------------
1 | #
2 | # This file is autogenerated by pip-compile with python 3.9
3 | # To update, run:
4 | #
5 | # pip-compile --find-links=https://download.pytorch.org/whl/torch_stable.html requirements.in
6 | #
7 | --find-links https://download.pytorch.org/whl/torch_stable.html
8 |
9 | absl-py==1.0.0
10 | # via
11 | # tensorboard
12 | # tensorflow
13 | # tensorflow-datasets
14 | # tensorflow-gpu
15 | # tensorflow-metadata
16 | # tf-slim
17 | albumentations==1.1.0
18 | # via -r requirements.in
19 | anyio==3.6.1
20 | # via
21 | # httpcore
22 | # jupyter-server
23 | apache-beam==2.38.0
24 | # via object-detection
25 | argon2-cffi==21.3.0
26 | # via
27 | # jupyter-server
28 | # notebook
29 | argon2-cffi-bindings==21.2.0
30 | # via argon2-cffi
31 | arrow==1.2.2
32 | # via
33 | # isoduration
34 | # jinja2-time
35 | asttokens==2.0.5
36 | # via stack-data
37 | astunparse==1.6.3
38 | # via
39 | # tensorflow
40 | # tensorflow-gpu
41 | attrs==21.4.0
42 | # via
43 | # jsonschema
44 | # pytest
45 | avro-python3==1.10.2
46 | # via object-detection
47 | babel==2.10.1
48 | # via jupyterlab-server
49 | backcall==0.2.0
50 | # via ipython
51 | beautifulsoup4==4.11.1
52 | # via nbconvert
53 | binaryornot==0.4.4
54 | # via cookiecutter
55 | bleach
56 | # via nbconvert
57 | boto3==1.23.3
58 | # via neptune-client
59 | botocore==1.26.3
60 | # via
61 | # boto3
62 | # s3transfer
63 | bravado==11.0.3
64 | # via neptune-client
65 | bravado-core==5.17.0
66 | # via bravado
67 | cachetools==5.1.0
68 | # via google-auth
69 | captum==0.5.0
70 | # via -r requirements.in
71 | certifi==2022.5.18
72 | # via
73 | # httpcore
74 | # httpx
75 | # kaggle
76 | # requests
77 | # sentry-sdk
78 | cffi==1.15.0
79 | # via
80 | # argon2-cffi-bindings
81 | # pyvips
82 | chardet==4.0.0
83 | # via binaryornot
84 | charset-normalizer==2.0.12
85 | # via
86 | # httpx
87 | # requests
88 | click==8.1.3
89 | # via
90 | # cookiecutter
91 | # evalutils
92 | # gcapi
93 | # neptune-client
94 | # nltk
95 | # panimg
96 | # wandb
97 | cloudpickle==2.0.0
98 | # via apache-beam
99 | colorama==0.4.4
100 | # via sacrebleu
101 | construct==2.10.68
102 | # via panimg
103 | contextlib2==21.6.0
104 | # via object-detection
105 | cookiecutter==1.7.3
106 | # via evalutils
107 | crcmod==1.7
108 | # via apache-beam
109 | cycler==0.11.0
110 | # via
111 | # lvis
112 | # matplotlib
113 | cython==0.29.30
114 | # via
115 | # lvis
116 | # object-detection
117 | # tf-models-official
118 | debugpy==1.6.0
119 | # via ipykernel
120 | decorator==5.1.1
121 | # via
122 | # ipdb
123 | # ipython
124 | defusedxml==0.7.1
125 | # via nbconvert
126 | dill==0.3.1.1
127 | # via
128 | # apache-beam
129 | # tensorflow-datasets
130 | dm-tree==0.1.7
131 | # via tensorflow-model-optimization
132 | docker-pycreds==0.4.0
133 | # via wandb
134 | docopt==0.6.2
135 | # via hdfs
136 | entrypoints==0.4
137 | # via
138 | # jupyter-client
139 | # nbconvert
140 | et-xmlfile==1.1.0
141 | # via openpyxl
142 | evalutils==0.3.1
143 | # via -r requirements.in
144 | executing==0.8.3
145 | # via stack-data
146 | fastavro==1.4.12
147 | # via apache-beam
148 | fastjsonschema==2.15.3
149 | # via nbformat
150 | fire==0.4.0
151 | # via keras2onnx
152 | flatbuffers==1.12
153 | # via
154 | # onnxruntime-gpu
155 | # tensorflow
156 | # tensorflow-gpu
157 | fonttools==4.33.3
158 | # via matplotlib
159 | fqdn==1.5.1
160 | # via jsonschema
161 | future==0.18.2
162 | # via neptune-client
163 | gast==0.4.0
164 | # via
165 | # tensorflow
166 | # tensorflow-gpu
167 | gcapi==0.7.0
168 | # via -r requirements.in
169 | gin-config==0.5.0
170 | # via tf-models-official
171 | gitdb==4.0.9
172 | # via gitpython
173 | gitpython==3.1.27
174 | # via
175 | # -r requirements.in
176 | # neptune-client
177 | # wandb
178 | google-api-core==2.8.0
179 | # via google-api-python-client
180 | google-api-python-client==2.48.0
181 | # via tf-models-official
182 | google-auth==2.6.6
183 | # via
184 | # google-api-core
185 | # google-api-python-client
186 | # google-auth-httplib2
187 | # google-auth-oauthlib
188 | # tensorboard
189 | google-auth-httplib2==0.1.0
190 | # via google-api-python-client
191 | google-auth-oauthlib==0.4.6
192 | # via tensorboard
193 | google-pasta==0.2.0
194 | # via
195 | # tensorflow
196 | # tensorflow-gpu
197 | googleapis-common-protos==1.56.1
198 | # via
199 | # google-api-core
200 | # tensorflow-metadata
201 | grpcio==1.46.1
202 | # via
203 | # apache-beam
204 | # tensorboard
205 | # tensorflow
206 | # tensorflow-gpu
207 | h11==0.12.0
208 | # via httpcore
209 | h5py==3.6.0
210 | # via
211 | # tensorflow
212 | # tensorflow-gpu
213 | hdfs==2.7.0
214 | # via apache-beam
215 | httpcore==0.14.7
216 | # via httpx
217 | httplib2==0.19.1
218 | # via
219 | # apache-beam
220 | # google-api-python-client
221 | # google-auth-httplib2
222 | # oauth2client
223 | httpx==0.22.0
224 | # via gcapi
225 | idna==3.3
226 | # via
227 | # anyio
228 | # jsonschema
229 | # requests
230 | # rfc3986
231 | imageio==2.19.2
232 | # via
233 | # -r requirements.in
234 | # evalutils
235 | # scikit-image
236 | importlib-metadata==4.11.3
237 | # via
238 | # jupyterlab-server
239 | # markdown
240 | iniconfig==1.1.1
241 | # via pytest
242 | ipdb==0.13.9
243 | # via -r requirements.in
244 | ipykernel==6.13.0
245 | # via
246 | # ipywidgets
247 | # jupyter
248 | # jupyter-console
249 | # notebook
250 | # qtconsole
251 | ipython==8.3.0
252 | # via
253 | # ipdb
254 | # ipykernel
255 | # ipywidgets
256 | # jupyter-console
257 | # jupyterlab
258 | ipython-genutils==0.2.0
259 | # via
260 | # ipywidgets
261 | # notebook
262 | # qtconsole
263 | ipywidgets==7.7.0
264 | # via jupyter
265 | isoduration==20.11.0
266 | # via jsonschema
267 | jedi==0.18.1
268 | # via ipython
269 | jinja2==3.1.2
270 | # via
271 | # cookiecutter
272 | # jinja2-time
273 | # jupyter-server
274 | # jupyterlab
275 | # jupyterlab-server
276 | # nbconvert
277 | # notebook
278 | jinja2-time==0.2.0
279 | # via cookiecutter
280 | jmespath==1.0.0
281 | # via
282 | # boto3
283 | # botocore
284 | joblib==1.1.0
285 | # via
286 | # nltk
287 | # scikit-learn
288 | json5==0.9.8
289 | # via jupyterlab-server
290 | jsonpointer==2.3
291 | # via jsonschema
292 | jsonref==0.2
293 | # via bravado-core
294 | jsonschema[format,format_nongpl]==4.5.1
295 | # via
296 | # bravado-core
297 | # gcapi
298 | # jupyterlab-server
299 | # nbformat
300 | # swagger-spec-validator
301 | jupyter==1.0.0
302 | # via -r requirements.in
303 | jupyter-client==7.3.1
304 | # via
305 | # ipykernel
306 | # jupyter-console
307 | # jupyter-server
308 | # nbclient
309 | # notebook
310 | # qtconsole
311 | jupyter-console==6.4.3
312 | # via jupyter
313 | jupyter-core==4.10.0
314 | # via
315 | # jupyter-client
316 | # jupyter-server
317 | # jupyterlab
318 | # nbconvert
319 | # nbformat
320 | # notebook
321 | # qtconsole
322 | jupyter-server==1.17.0
323 | # via
324 | # jupyterlab
325 | # jupyterlab-server
326 | # nbclassic
327 | # notebook-shim
328 | jupyterlab==3.4.2
329 | # via -r requirements.in
330 | jupyterlab-pygments==0.2.2
331 | # via nbconvert
332 | jupyterlab-server==2.14.0
333 | # via jupyterlab
334 | jupyterlab-widgets==1.1.0
335 | # via ipywidgets
336 | kaggle==1.5.12
337 | # via tf-models-official
338 | keras==2.9.0
339 | # via
340 | # -r requirements.in
341 | # object-detection
342 | # tensorflow
343 | # tensorflow-gpu
344 | keras-preprocessing==1.1.2
345 | # via
346 | # tensorflow
347 | # tensorflow-gpu
348 | keras2onnx==1.7.0
349 | # via -r requirements.in
350 | kiwisolver==1.4.2
351 | # via
352 | # lvis
353 | # matplotlib
354 | libclang==14.0.1
355 | # via
356 | # tensorflow
357 | # tensorflow-gpu
358 | lvis==0.5.3
359 | # via object-detection
360 | lxml==4.8.0
361 | # via object-detection
362 | markdown==3.3.7
363 | # via tensorboard
364 | markupsafe==2.1.1
365 | # via
366 | # jinja2
367 | # nbconvert
368 | matplotlib==3.5.2
369 | # via
370 | # -r requirements.in
371 | # captum
372 | # lvis
373 | # object-detection
374 | # pycocotools
375 | # seaborn
376 | # tf-models-official
377 | matplotlib-inline==0.1.3
378 | # via
379 | # ipykernel
380 | # ipython
381 | mistune==0.8.4
382 | # via nbconvert
383 | mock==4.0.3
384 | # via -r requirements.in
385 | monai==0.8.1
386 | # via -r requirements.in
387 | monotonic==1.6
388 | # via bravado
389 | msgpack==1.0.3
390 | # via
391 | # bravado
392 | # bravado-core
393 | nbclassic==0.3.7
394 | # via jupyterlab
395 | nbclient==0.6.3
396 | # via nbconvert
397 | nbconvert==6.5.0
398 | # via
399 | # jupyter
400 | # jupyter-server
401 | # notebook
402 | nbformat==5.4.0
403 | # via
404 | # ipywidgets
405 | # jupyter-server
406 | # nbclient
407 | # nbconvert
408 | # notebook
409 | neptune-client==0.16.2
410 | # via -r requirements.in
411 | nest-asyncio==1.5.5
412 | # via
413 | # ipykernel
414 | # jupyter-client
415 | # nbclient
416 | # notebook
417 | networkx==2.8.1
418 | # via scikit-image
419 | nibabel==3.2.2
420 | # via -r requirements.in
421 | nltk==3.7
422 | # via -r requirements.in
423 | notebook==6.4.11
424 | # via
425 | # jupyter
426 | # nbclassic
427 | # widgetsnbextension
428 | notebook-shim==0.1.0
429 | # via nbclassic
430 | numpy==1.22.3
431 | # via
432 | # -r requirements.in
433 | # albumentations
434 | # apache-beam
435 | # captum
436 | # evalutils
437 | # h5py
438 | # imageio
439 | # keras-preprocessing
440 | # keras2onnx
441 | # lvis
442 | # matplotlib
443 | # monai
444 | # nibabel
445 | # onnx
446 | # onnxconverter-common
447 | # onnxruntime-gpu
448 | # opencv-python
449 | # opencv-python-headless
450 | # opt-einsum
451 | # pandas
452 | # panimg
453 | # pyarrow
454 | # pycocotools
455 | # pylibjpeg
456 | # pylibjpeg-libjpeg
457 | # pylibjpeg-openjpeg
458 | # pylibjpeg-rle
459 | # pywavelets
460 | # qudida
461 | # rdp
462 | # sacrebleu
463 | # scikit-image
464 | # scikit-learn
465 | # scipy
466 | # seaborn
467 | # seqeval
468 | # tensorboard
469 | # tensorflow
470 | # tensorflow-datasets
471 | # tensorflow-gpu
472 | # tensorflow-hub
473 | # tensorflow-model-optimization
474 | # tf-models-official
475 | # tifffile
476 | # torchvision
477 | oauth2client==4.1.3
478 | # via
479 | # apache-beam
480 | # tf-models-official
481 | oauthlib==3.2.0
482 | # via
483 | # neptune-client
484 | # requests-oauthlib
485 | object-detection @ file:///root/python-packages/object_detection-0.1-py3-none-any.whl
486 | # via -r requirements.in
487 | onnx==1.11.0
488 | # via
489 | # keras2onnx
490 | # onnxconverter-common
491 | onnxconverter-common==1.9.0
492 | # via keras2onnx
493 | onnxruntime-gpu==1.11.1
494 | # via -r requirements.in
495 | opencv-python==4.5.5.64
496 | # via
497 | # -r requirements.in
498 | # lvis
499 | opencv-python-headless==4.5.5.64
500 | # via
501 | # albumentations
502 | # qudida
503 | # tf-models-official
504 | openpyxl==3.0.9
505 | # via -r requirements.in
506 | openslide-python==1.1.2
507 | # via panimg
508 | opt-einsum==3.3.0
509 | # via
510 | # tensorflow
511 | # tensorflow-gpu
512 | orjson==3.6.8
513 | # via apache-beam
514 | packaging==21.3
515 | # via
516 | # ipykernel
517 | # jupyter-server
518 | # jupyterlab
519 | # jupyterlab-server
520 | # matplotlib
521 | # nbconvert
522 | # neptune-client
523 | # nibabel
524 | # pytest
525 | # qtpy
526 | # scikit-image
527 | # tensorflow
528 | # tensorflow-gpu
529 | pandas==1.4.2
530 | # via
531 | # -r requirements.in
532 | # evalutils
533 | # neptune-client
534 | # object-detection
535 | # seaborn
536 | # tf-models-official
537 | pandocfilters==1.5.0
538 | # via nbconvert
539 | panimg==0.8.1
540 | # via -r requirements.in
541 | parso==0.8.3
542 | # via jedi
543 | pathtools==0.1.2
544 | # via wandb
545 | pexpect==4.8.0
546 | # via ipython
547 | pickleshare==0.7.5
548 | # via ipython
549 | pillow==9.1.1
550 | # via
551 | # -r requirements.in
552 | # imageio
553 | # matplotlib
554 | # neptune-client
555 | # object-detection
556 | # openslide-python
557 | # panimg
558 | # scikit-image
559 | # tf-models-official
560 | # torchvision
561 | pluggy==1.0.0
562 | # via pytest
563 | portalocker==2.4.0
564 | # via sacrebleu
565 | poyo==0.5.0
566 | # via cookiecutter
567 | prometheus-client==0.14.1
568 | # via
569 | # jupyter-server
570 | # notebook
571 | promise==2.3
572 | # via
573 | # tensorflow-datasets
574 | # wandb
575 | prompt-toolkit==3.0.29
576 | # via
577 | # ipython
578 | # jupyter-console
579 | proto-plus==1.20.3
580 | # via apache-beam
581 | protobuf==3.20.1
582 | # via
583 | # apache-beam
584 | # google-api-core
585 | # googleapis-common-protos
586 | # keras2onnx
587 | # onnx
588 | # onnxconverter-common
589 | # onnxruntime-gpu
590 | # proto-plus
591 | # tensorboard
592 | # tensorflow
593 | # tensorflow-datasets
594 | # tensorflow-gpu
595 | # tensorflow-hub
596 | # tensorflow-metadata
597 | # wandb
598 | psutil==5.9.0
599 | # via
600 | # -r requirements.in
601 | # ipykernel
602 | # neptune-client
603 | # tf-models-official
604 | # wandb
605 | ptyprocess==0.7.0
606 | # via
607 | # pexpect
608 | # terminado
609 | pure-eval==0.2.2
610 | # via stack-data
611 | py==1.11.0
612 | # via pytest
613 | py-cpuinfo==8.0.0
614 | # via tf-models-official
615 | pyarrow==6.0.1
616 | # via apache-beam
617 | pyasn1==0.4.8
618 | # via
619 | # oauth2client
620 | # pyasn1-modules
621 | # rsa
622 | pyasn1-modules==0.2.8
623 | # via
624 | # google-auth
625 | # oauth2client
626 | pycparser==2.21
627 | # via cffi
628 | pydantic==1.9.1
629 | # via panimg
630 | pydicom==2.3.0
631 | # via
632 | # -r requirements.in
633 | # panimg
634 | pydot==1.4.2
635 | # via apache-beam
636 | pygad==2.18
637 | # manual add
638 | pygments==2.12.0
639 | # via
640 | # ipython
641 | # jupyter-console
642 | # nbconvert
643 | # qtconsole
644 | pyjwt==2.4.0
645 | # via neptune-client
646 | pylibjpeg==1.4.0
647 | # via panimg
648 | pylibjpeg-libjpeg==1.2.0
649 | # via
650 | # -r requirements.in
651 | # panimg
652 | pylibjpeg-openjpeg==1.1.1
653 | # via
654 | # -r requirements.in
655 | # panimg
656 | pylibjpeg-rle==1.3.0
657 | # via panimg
658 | pymongo==3.12.3
659 | # via apache-beam
660 | pyparsing==2.4.7
661 | # via
662 | # httplib2
663 | # lvis
664 | # matplotlib
665 | # packaging
666 | # pydot
667 | pyrsistent==0.18.1
668 | # via jsonschema
669 | pytest==7.1.2
670 | # via -r requirements.in
671 | python-dateutil==2.8.2
672 | # via
673 | # apache-beam
674 | # arrow
675 | # botocore
676 | # bravado
677 | # bravado-core
678 | # jupyter-client
679 | # kaggle
680 | # lvis
681 | # matplotlib
682 | # pandas
683 | # wandb
684 | python-slugify==6.1.2
685 | # via
686 | # cookiecutter
687 | # kaggle
688 | pytz==2022.1
689 | # via
690 | # apache-beam
691 | # babel
692 | # bravado-core
693 | # pandas
694 | pyvips==2.2.0
695 | # via panimg
696 | pywavelets==1.3.0
697 | # via scikit-image
698 | pyyaml==5.4.1
699 | # via
700 | # albumentations
701 | # bravado
702 | # bravado-core
703 | # swagger-spec-validator
704 | # tf-models-official
705 | # wandb
706 | pyzmq==23.0.0
707 | # via
708 | # jupyter-client
709 | # jupyter-server
710 | # notebook
711 | # qtconsole
712 | qtconsole==5.3.0
713 | # via jupyter
714 | qtpy==2.1.0
715 | # via qtconsole
716 | qudida==0.0.4
717 | # via albumentations
718 | rdp==0.8
719 | # via -r requirements.in
720 | regex==2022.4.24
721 | # via
722 | # nltk
723 | # sacrebleu
724 | requests==2.27.1
725 | # via
726 | # apache-beam
727 | # bravado
728 | # cookiecutter
729 | # google-api-core
730 | # hdfs
731 | # jupyterlab-server
732 | # kaggle
733 | # keras2onnx
734 | # neptune-client
735 | # requests-oauthlib
736 | # tensorboard
737 | # tensorflow-datasets
738 | # torchvision
739 | # wandb
740 | requests-oauthlib==1.3.1
741 | # via
742 | # google-auth-oauthlib
743 | # neptune-client
744 | rfc3339-validator==0.1.4
745 | # via jsonschema
746 | rfc3986[idna2008]==1.5.0
747 | # via httpx
748 | rfc3986-validator==0.1.1
749 | # via jsonschema
750 | rfc3987==1.3.8
751 | # via jsonschema
752 | rsa==4.8
753 | # via
754 | # google-auth
755 | # oauth2client
756 | s3transfer==0.5.2
757 | # via boto3
758 | sacrebleu==2.0.0
759 | # via tf-models-official
760 | scikit-image==0.19.2
761 | # via
762 | # -r requirements.in
763 | # albumentations
764 | scikit-learn==1.1.0
765 | # via
766 | # -r requirements.in
767 | # evalutils
768 | # qudida
769 | # seqeval
770 | scipy==1.8.1
771 | # via
772 | # -r requirements.in
773 | # albumentations
774 | # evalutils
775 | # object-detection
776 | # scikit-image
777 | # scikit-learn
778 | # seaborn
779 | # tf-models-official
780 | seaborn==0.11.2
781 | # via -r requirements.in
782 | send2trash==1.8.0
783 | # via
784 | # jupyter-server
785 | # notebook
786 | sentencepiece==0.1.96
787 | # via tf-models-official
788 | sentry-sdk==1.5.12
789 | # via wandb
790 | seqeval==1.2.2
791 | # via tf-models-official
792 | setproctitle==1.2.3
793 | # via wandb
794 | shapely==1.8.2
795 | # via -r requirements.in
796 | shortuuid==1.0.9
797 | # via wandb
798 | simpleitk==2.1.1.2
799 | # via
800 | # -r requirements.in
801 | # evalutils
802 | # panimg
803 | simplejson==3.17.6
804 | # via
805 | # bravado
806 | # bravado-core
807 | six==1.16.0
808 | # via
809 | # -r requirements.in
810 | # absl-py
811 | # asttokens
812 | # astunparse
813 | # bleach
814 | # bravado
815 | # bravado-core
816 | # cookiecutter
817 | # docker-pycreds
818 | # fire
819 | # google-auth
820 | # google-auth-httplib2
821 | # google-pasta
822 | # grpcio
823 | # hdfs
824 | # kaggle
825 | # keras-preprocessing
826 | # lvis
827 | # neptune-client
828 | # oauth2client
829 | # object-detection
830 | # promise
831 | # python-dateutil
832 | # rfc3339-validator
833 | # swagger-spec-validator
834 | # tensorflow
835 | # tensorflow-datasets
836 | # tensorflow-gpu
837 | # tensorflow-model-optimization
838 | # tf-models-official
839 | # wandb
840 | smmap==5.0.0
841 | # via gitdb
842 | sniffio==1.2.0
843 | # via
844 | # anyio
845 | # httpcore
846 | # httpx
847 | soupsieve==2.3.2.post1
848 | # via beautifulsoup4
849 | stack-data==0.2.0
850 | # via ipython
851 | swagger-spec-validator==2.7.4
852 | # via
853 | # bravado-core
854 | # neptune-client
855 | tabulate==0.8.9
856 | # via sacrebleu
857 | tensorboard==2.9.0
858 | # via
859 | # -r requirements.in
860 | # tensorflow
861 | # tensorflow-gpu
862 | tensorboard-data-server==0.6.1
863 | # via tensorboard
864 | tensorboard-plugin-wit==1.8.1
865 | # via tensorboard
866 | tensorflow==2.9.0
867 | # via
868 | # tensorflow-text
869 | # tf-models-official
870 | tensorflow-addons==0.16.1
871 | # via tf-models-official
872 | tensorflow-datasets==4.5.2
873 | # via tf-models-official
874 | tensorflow-estimator==2.9.0
875 | # via
876 | # tensorflow
877 | # tensorflow-gpu
878 | tensorflow-gpu==2.9.0
879 | # via -r requirements.in
880 | tensorflow-hub==0.12.0
881 | # via
882 | # tensorflow-text
883 | # tf-models-official
884 | tensorflow-io==0.26.0
885 | # via object-detection
886 | tensorflow-io-gcs-filesystem==0.26.0
887 | # via
888 | # tensorflow
889 | # tensorflow-gpu
890 | # tensorflow-io
891 | tensorflow-metadata==1.8.0
892 | # via tensorflow-datasets
893 | tensorflow-model-optimization==0.7.2
894 | # via tf-models-official
895 | tensorflow-text==2.9.0
896 | # via tf-models-official
897 | termcolor==1.1.0
898 | # via
899 | # fire
900 | # tensorflow
901 | # tensorflow-datasets
902 | # tensorflow-gpu
903 | terminado==0.15.0
904 | # via
905 | # jupyter-server
906 | # notebook
907 | text-unidecode==1.3
908 | # via python-slugify
909 | tf-models-official==2.9.0
910 | # via object-detection
911 | tf-slim==1.1.0
912 | # via
913 | # object-detection
914 | # tf-models-official
915 | threadpoolctl==3.1.0
916 | # via scikit-learn
917 | tifffile==2022.5.4
918 | # via
919 | # -r requirements.in
920 | # panimg
921 | # scikit-image
922 | tinycss2==1.1.1
923 | # via nbconvert
924 | toml==0.10.2
925 | # via ipdb
926 | tomli==2.0.1
927 | # via pytest
928 | torch==1.11.0+cu113
929 | # via
930 | # -r requirements.in
931 | # captum
932 | # monai
933 | # torchvision
934 | torchstain==1.3.0
935 | # manual add
936 | torchvision==0.12.0+cu113
937 | # via -r requirements.in
938 | tornado==6.1
939 | # via
940 | # ipykernel
941 | # jupyter-client
942 | # jupyter-server
943 | # jupyterlab
944 | # notebook
945 | # terminado
946 | tqdm==4.64.0
947 | # via
948 | # -r requirements.in
949 | # kaggle
950 | # nltk
951 | # tensorflow-datasets
952 | traitlets==5.2.1.post0
953 | # via
954 | # ipykernel
955 | # ipython
956 | # ipywidgets
957 | # jupyter-client
958 | # jupyter-core
959 | # jupyter-server
960 | # matplotlib-inline
961 | # nbclient
962 | # nbconvert
963 | # nbformat
964 | # notebook
965 | # qtconsole
966 | typeguard==2.13.3
967 | # via tensorflow-addons
968 | typing-extensions==4.2.0
969 | # via
970 | # apache-beam
971 | # bravado
972 | # onnx
973 | # pydantic
974 | # qudida
975 | # tensorflow
976 | # tensorflow-gpu
977 | # torch
978 | # torchvision
979 | uri-template==1.2.0
980 | # via jsonschema
981 | uritemplate==4.1.1
982 | # via google-api-python-client
983 | urllib3==1.26.9
984 | # via
985 | # botocore
986 | # kaggle
987 | # neptune-client
988 | # requests
989 | # sentry-sdk
990 | wandb==0.12.16
991 | # via -r requirements.in
992 | wcwidth==0.2.5
993 | # via prompt-toolkit
994 | webcolors==1.11.1
995 | # via jsonschema
996 | webencodings==0.5.1
997 | # via
998 | # bleach
999 | # tinycss2
1000 | websocket-client==1.3.2
1001 | # via
1002 | # jupyter-server
1003 | # neptune-client
1004 | werkzeug==2.1.2
1005 | # via tensorboard
1006 | wheel==0.37.1
1007 | # via
1008 | # astunparse
1009 | # tensorboard
1010 | widgetsnbextension==3.6.0
1011 | # via ipywidgets
1012 | wrapt==1.14.1
1013 | # via
1014 | # tensorflow
1015 | # tensorflow-gpu
1016 | xlrd==2.0.1
1017 | # via -r requirements.in
1018 | zipp==3.8.0
1019 | # via importlib-metadata
1020 |
1021 | # The following packages are considered to be unsafe in a requirements file:
1022 | # setuptools
--------------------------------------------------------------------------------
/build/install_files/run.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Preserve environment variables (slurm and some nvidia variables are set at runtime)
4 | env | grep '^SLURM_\|^NVIDIA_' >> /etc/environment
5 |
6 | # Disable python buffer for commands that are executed as user "user"
7 | echo "PYTHONUNBUFFERED=1" >> /etc/environment
8 |
9 | # Check if extra arguments were given and execute it as a command.
10 | if [ -z "$2" ]; then
11 | # Print the command for logging.
12 | printf "No extra arguments given, running jupyter and sshd\n\n"
13 |
14 | # Start the SSH daemon and a Jupyter notebook.
15 | /usr/sbin/sshd
16 | sudo --user=user --set-home /bin/bash -c '/usr/local/bin/jupyter lab --ip=0.0.0.0 --port=8888 --no-browser --NotebookApp.token='
17 | else
18 | # Print the command for logging.
19 | printf "Executing command: %s\n\n" "$*"
20 |
21 | # Execute the passed command.
22 | sudo --user=user --set-home python3 /home/user/pythostitcher-0.3.2/src/main.py "${@}"
23 | fi
24 |
--------------------------------------------------------------------------------
/config/parameter_config.json:
--------------------------------------------------------------------------------
1 | {
2 | "top_k": 6,
3 | "resolutions": [0.05, 0.15, 0.45, 1],
4 | "image_level": 7,
5 | "pad_fraction": 0.5,
6 | "slice_idx": "slice_0",
7 | "my_level": 45,
8 |
9 | "n_solutions": 40,
10 | "n_generations": [400, 200, 200, 100],
11 | "n_parents": 2,
12 | "n_mating": 6,
13 | "p_crossover": 0.5,
14 | "crossover_type": "scattered",
15 | "p_mutation": 0.25,
16 | "mutation_type": "random",
17 | "parent_selection": "rank",
18 | "early_stopping": "saturate_50",
19 |
20 | "resolution_scaling": [1, 2, 10, 20],
21 | "nbins": 16,
22 | "hist_sizes": [4, 8, 20, 80],
23 | "outer_point_weight": 0.5,
24 | "overlap_weight": 100,
25 | "distance_scaling_hor_required": true,
26 | "distance_scaling_ver_required": true,
27 |
28 | "translation_range": [0.05, 0.025, 0.015, 0.01],
29 | "angle_range": [10, 5, 3, 1],
30 |
31 | "weights_fragment_classifier": "model_weights/fragment_classifier/best_weights.hdf5",
32 | "alignment_score": "jigsawnet",
33 | "max_expand_threshold": 25,
34 | "bg_color": [0, 0, 0],
35 |
36 | "weights_jigsawnet": "model_weights/JigsawCNN_checkpoint",
37 | "JSN_Hyperparameters": {
38 | "width": 160,
39 | "height": 160,
40 | "depth": 3,
41 | "batch_size": 64,
42 | "weight_decay": 1e-4,
43 | "learning_rate": 1e-4,
44 | "total_training_step": 30000,
45 | "learner_num": 5
46 | }
47 |
48 | }
--------------------------------------------------------------------------------
/force_config.txt:
--------------------------------------------------------------------------------
1 | image1:left
2 | image2:right
--------------------------------------------------------------------------------
/img/figure2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/computationalpathologygroup/pythostitcher/ab2e573a141803a076ffd153b0cc747129c294eb/img/figure2.png
--------------------------------------------------------------------------------
/img/tform_progression.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/computationalpathologygroup/pythostitcher/ab2e573a141803a076ffd153b0cc747129c294eb/img/tform_progression.gif
--------------------------------------------------------------------------------
/model_weights/JigsawCNN_checkpoint/README.txt:
--------------------------------------------------------------------------------
1 | The weights for JigsawNet were too large to be put on Github. Therefore, these weights will come packaged with the Docker container to run PythoStitcher.
--------------------------------------------------------------------------------
/model_weights/fragment_classifier/best_weights.hdf5:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/computationalpathologygroup/pythostitcher/ab2e573a141803a076ffd153b0cc747129c294eb/model_weights/fragment_classifier/best_weights.hdf5
--------------------------------------------------------------------------------
/shredder/line_utils.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 |
4 | def interpolate_contour(contour):
5 | """
6 | Function to interpolate a contour which is represented by a set of points.
7 | Example:
8 | contour = [[0, 1], [1, 5], [2, 10]]
9 | new_contour = [[0, 1], [0, 2], [0, 3], [1, 4], [1, 5], [1, 6] etc.]
10 | """
11 |
12 | assert type(contour) == np.ndarray, "contour must be of type numpy array"
13 | assert len(contour.shape) == 2, "contour must be 2-dimensional"
14 |
15 | for i in range(len(contour) - 1):
16 |
17 | # Get x and y values to interpolate on
18 | xvals = np.array([contour[i, 0], contour[i + 1, 0]]).astype("int")
19 | yvals = np.array([contour[i, 1], contour[i + 1, 1]]).astype("int")
20 |
21 | # Create steps of size 1
22 | max_dif = np.max([np.abs(xvals[1] - xvals[0]), np.abs(yvals[1] - yvals[0])])
23 | new_xvals = np.linspace(xvals[0], xvals[1], num=max_dif).astype("int")
24 | new_yvals = np.linspace(yvals[0], yvals[1], num=max_dif).astype("int")
25 |
26 | # Get interpolated contour
27 | interp_contour = np.array([new_xvals, new_yvals]).T
28 |
29 | # Add interpolated values to new contour
30 | if i == 0:
31 | new_contour = interp_contour
32 | else:
33 | new_contour = np.vstack([new_contour, interp_contour])
34 |
35 | return new_contour
36 |
37 |
38 | def apply_im_tform_to_coords(coords, fragment, downscale, rot_k):
39 | """
40 | Convenience function to apply a 90 degree image rotation to coordinates. You could
41 | of course do this through coordinate transform, but this is overly complex due to
42 | changing centers of rotation and image shifts. This function just converts the coords
43 | to a binary image, rotates the image and extracts the coords.
44 | """
45 |
46 | # Downscale coords for efficiency
47 | coords_ds = (coords / downscale).astype("int")
48 |
49 | # Clip coords to prevent out of bounds indexing due to rounding errors
50 | coords_image_dims = (int(fragment.width / downscale),
51 | int(fragment.height / downscale))
52 | coords_ds_x = np.clip(coords_ds[:, 0], 0, coords_image_dims[0]-1)
53 | coords_ds_y = np.clip(coords_ds[:, 1], 0, coords_image_dims[1]-1)
54 |
55 | # Convert to image
56 | coords_image = np.zeros((coords_image_dims))
57 | coords_image[coords_ds_x, coords_ds_y] = 1
58 |
59 | # Rot image and extract coords
60 | coords_image = np.rot90(coords_image, rot_k, (0, 1))
61 | r, c = np.nonzero(coords_image)
62 | coords_image_rot = np.vstack([r, c]).T
63 | coords_image_rot = (coords_image_rot * downscale).astype("int")
64 |
65 | # Sort coords by x or y values depending on line direction
66 | if np.std(coords_ds[:, 0]) > np.std(coords_ds[:, 1]):
67 | coords_image_rot_sort = sorted(coords_image_rot, key=lambda x: x[0])
68 | coords_image_rot_sort = np.array(coords_image_rot_sort)
69 | else:
70 | coords_image_rot_sort = sorted(coords_image_rot, key=lambda x: x[1])
71 | coords_image_rot_sort = np.array(coords_image_rot_sort)
72 |
73 | return coords_image_rot_sort
74 |
--------------------------------------------------------------------------------
/shredder/main.py:
--------------------------------------------------------------------------------
1 | import multiresolutionimageinterface as mir
2 | import pathlib
3 | import argparse
4 | import tqdm
5 | import numpy as np
6 | import pyvips
7 | import matplotlib.pyplot as plt
8 | import cv2
9 | import copy
10 | import json
11 |
12 | from shapely.geometry import LineString, Point
13 |
14 | from line_utils import interpolate_contour, apply_im_tform_to_coords
15 |
16 |
17 | def collect_arguments():
18 | """
19 | Parse arguments formally
20 | """
21 |
22 | # Parse arguments
23 | parser = argparse.ArgumentParser(
24 | description="convert svs to tif"
25 | )
26 | parser.add_argument(
27 | "--datadir", required=True, type=pathlib.Path, help="Path with the tiffs to shred"
28 | )
29 | parser.add_argument(
30 | "--maskdir", required=True, type=pathlib.Path, help="Path with the tissuemasks"
31 | )
32 | parser.add_argument(
33 | "--savedir", required=True, type=pathlib.Path, help="Path to save the shreds"
34 | )
35 | parser.add_argument(
36 | "--rotation", required=False, type=int, default=5, help="Random rotation of the whole"
37 | "mount before shredding"
38 | )
39 | parser.add_argument(
40 | "--fragments", required=False, type=int, default=4, help="Number of fragments to shred to"
41 | )
42 | args = parser.parse_args()
43 |
44 | # Extract arguments
45 | data_dir = pathlib.Path(args.datadir)
46 | mask_dir = pathlib.Path(args.maskdir)
47 | save_dir = pathlib.Path(args.savedir)
48 | rotation = args.rotation
49 | n_fragments = args.fragments
50 |
51 | assert any([data_dir.is_dir(), data_dir.exists()]), "provided data location doesn't exist"
52 | assert mask_dir.is_dir(), "provided mask location doesn't exist"
53 | assert rotation in np.arange(0, 26), "rotation must be in range [0, 25]"
54 | assert n_fragments in [2, 4], "number of fragments must be either 2 or 4"
55 |
56 | if not save_dir.is_dir():
57 | save_dir.mkdir(parents=True)
58 |
59 | print(
60 | f"\nRunning job with following parameters:"
61 | f"\n - Data dir: {data_dir}"
62 | f"\n - Tissue mask dir: {mask_dir}"
63 | f"\n - Save dir: {save_dir}"
64 | f"\n - Rotation: {rotation}"
65 | f"\n - Number of fragments: {n_fragments}"
66 | f"\n"
67 | )
68 |
69 | return data_dir, mask_dir, save_dir, rotation, n_fragments
70 |
71 |
72 | class Shredder:
73 |
74 | def __init__(self, case, mask_dir, save_dir, rotation, n_fragments):
75 |
76 | self.case = case
77 | self.mask_path = mask_dir.joinpath(f"{self.case.stem}.tif")
78 | self.savedir = save_dir.joinpath(self.case.stem)
79 | self.rotation = rotation
80 | self.n_fragments = n_fragments
81 | self.lowres_level = 6
82 | self.pad_factor = 0.3
83 | self.n_samples = 10
84 | self.noise = 20
85 | self.step = 50
86 |
87 | self.parameters = {"rotation" : self.rotation, "n_fragments" : self.n_fragments}
88 |
89 | if not self.savedir.is_dir():
90 | self.savedir.mkdir(parents=True)
91 | self.savedir.joinpath("raw_images").mkdir()
92 | self.savedir.joinpath("raw_masks").mkdir()
93 |
94 | return
95 |
96 | def load_images(self):
97 | """
98 | Load the pyramidal image and get a downsampled image from it
99 | """
100 |
101 | # Get low resolution image
102 | self.opener = mir.MultiResolutionImageReader()
103 | self.mir_image = self.opener.open(str(self.case))
104 |
105 | self.ds_factor = int(self.mir_image.getLevelDownsample(self.lowres_level))
106 | self.lowres_image_dims = self.mir_image.getLevelDimensions(self.lowres_level)
107 | self.lowres_image = self.mir_image.getUCharPatch(
108 | 0,
109 | 0,
110 | *self.lowres_image_dims,
111 | self.lowres_level
112 | )
113 |
114 | # Remove paraffin for better tissue masking later
115 | self.lowres_image_hsv = cv2.cvtColor(self.lowres_image, cv2.COLOR_RGB2HSV)
116 | sat_thres = 15
117 | self.sat_mask = self.lowres_image_hsv[:, :, 1] < sat_thres
118 | self.lowres_image[self.sat_mask] = 255
119 |
120 | return
121 |
122 | def get_mask(self):
123 | """
124 | Get the postprocessed mask of the downsampled image
125 | """
126 |
127 | # Retrieve mask
128 | self.lowres_mask = np.all(self.lowres_image != [255, 255, 255], axis=2)
129 | self.lowres_mask = (self.lowres_mask * 255).astype("uint8")
130 |
131 | ### Flood fill the mask to remove holes
132 | # 1. Get enlarged version as we want to floodfill the background
133 | self.temp_pad = int(0.05 * self.lowres_mask.shape[0])
134 | self.lowres_mask = np.pad(
135 | self.lowres_mask,
136 | [[self.temp_pad, self.temp_pad], [self.temp_pad, self.temp_pad]],
137 | mode="constant",
138 | constant_values=0,
139 | )
140 |
141 | # Slightly increase mask size, required later on
142 | strel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (20, 20))
143 | self.lowres_mask = cv2.dilate(self.lowres_mask, strel)
144 |
145 | # 2. Get floodfilled background
146 | seedpoint = (0, 0)
147 | self.floodfill_mask = np.zeros(
148 | (self.lowres_mask.shape[0] + 2, self.lowres_mask.shape[1] + 2)
149 | )
150 | self.floodfill_mask = self.floodfill_mask.astype("uint8")
151 | _, _, self.lowres_mask, _ = cv2.floodFill(
152 | self.lowres_mask,
153 | self.floodfill_mask,
154 | seedpoint,
155 | 255
156 | )
157 |
158 | # 3. Convert back to foreground using array slicing and inversion
159 | self.lowres_mask = (
160 | 1 - self.lowres_mask[self.temp_pad + 1: -(self.temp_pad + 1), self.temp_pad + 1: -(
161 | self.temp_pad + 1)]
162 | )
163 |
164 | # Get largest connected component to remove small islands
165 | num_labels, labeled_mask, stats, _ = cv2.connectedComponentsWithStats(
166 | self.lowres_mask, connectivity=8
167 | )
168 | largest_cc_label = np.argmax(stats[1:, -1]) + 1
169 | self.lowres_mask = ((labeled_mask == largest_cc_label) * 255).astype("uint8")
170 |
171 | return
172 |
173 | def process(self):
174 | """
175 | Method to get some image characteristics
176 | """
177 |
178 | ### First figure out the rotation of the image
179 | # 1. Get temporary enlarged mask
180 | temp_pad = int(self.pad_factor * np.min(self.lowres_mask.shape))
181 | temp_mask = np.pad(
182 | self.lowres_mask,
183 | [[temp_pad, temp_pad], [temp_pad, temp_pad]],
184 | mode="constant",
185 | constant_values=0,
186 | )
187 |
188 | # 2. Get largest contour
189 | cnt, _ = cv2.findContours(
190 | temp_mask.astype("uint8"), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE
191 | )
192 | cnt = np.squeeze(max(cnt, key=cv2.contourArea))
193 |
194 | # 3. Get rotation angle and apply rotation
195 | bbox = cv2.minAreaRect(cnt)
196 | angle = bbox[2]
197 | if angle > 45:
198 | angle = angle-90
199 |
200 | # Create some extra noise in angle to create some imperfect cuts
201 | angle_noise = np.random.randint(-self.rotation, self.rotation)
202 | self.angle = int(angle + angle_noise)
203 | rot_center = (0, 0)
204 | rot_mat = cv2.getRotationMatrix2D(center=rot_center, angle=self.angle, scale=1)
205 | temp_mask = cv2.warpAffine(
206 | src=temp_mask, M=rot_mat, dsize=temp_mask.shape[::-1]
207 | )
208 | temp_mask = ((temp_mask > 128)*255).astype("uint8")
209 |
210 | # 4. Crop back to original size
211 | self.r, self.c = np.nonzero(temp_mask)
212 | self.lowres_mask = temp_mask[
213 | np.min(self.r):np.max(self.r), np.min(self.c):np.max(self.c)
214 | ]
215 |
216 | return
217 |
218 | def get_shred_parameters(self):
219 | """
220 | Method to get 4 equal masks
221 | """
222 |
223 | ### GET SHREDDING LINES ###
224 |
225 | # Get self.offset
226 | self.offset = 5
227 |
228 | # Get outer points of vertical shred line
229 | v_start = [int(0.5*self.lowres_mask.shape[1]), -self.offset]
230 | v_end = [int(0.5*self.lowres_mask.shape[1]), self.lowres_mask.shape[0]+self.offset-1]
231 |
232 | # Get the shredding line with some noise
233 | self.v_line_y = np.arange(v_start[1], v_end[1]+self.step, step=self.step)
234 | self.v_line_x = [v_start[0]]
235 | while len(self.v_line_x) < len(self.v_line_y):
236 | self.v_line_x.append(self.v_line_x[-1] + np.random.randint(-self.noise, self.noise))
237 |
238 | self.parameters["step_size"] = self.step
239 | self.parameters["edge_curvature"] = self.noise
240 |
241 | # Get outer points of horizontal shred line
242 | h_start = [-self.offset, int(0.5*self.lowres_mask.shape[0])]
243 | h_end = [self.lowres_mask.shape[1]+self.offset-1, int(0.5*self.lowres_mask.shape[0])]
244 |
245 | # Get the shredding line with some noise
246 | self.h_line_x = np.arange(h_start[0], h_end[0]+self.step, step=self.step)
247 | self.h_line_y = [h_start[1]]
248 | while len(self.h_line_y) < len(self.h_line_x):
249 | self.h_line_y.append(self.h_line_y[-1] + np.random.randint(-self.noise, self.noise))
250 |
251 | ### \ GET SHREDDING LINES ###
252 |
253 | ### GET INTERSECTION ###
254 |
255 | # Convert to shapely format
256 | v_line_points = [Point(x, y) for x, y in zip(self.v_line_x, self.v_line_y)]
257 | v_line = LineString(v_line_points)
258 |
259 | h_line_points = [Point(x, y) for x, y in zip(self.h_line_x, self.h_line_y)]
260 | h_line = LineString(h_line_points)
261 |
262 | # Compute intersection
263 | test = v_line.intersection(h_line)
264 | self.intersection = [int(test.x), int(test.y)]
265 |
266 | ### \ GET INTERSECTION ###
267 |
268 | ### Get final version for applying to the image
269 | # Interpolate for fair sampling later
270 | self.h_line = np.array([self.h_line_x, self.h_line_y]).T
271 | self.h_line_temp = interpolate_contour(self.h_line)
272 |
273 | # Only retain points in the mask
274 | self.h_line_temp = [i for i in self.h_line_temp if all(
275 | [0 128) * 255).astype("uint8")
343 |
344 | ### \\\ APPLY SHRED PARAMETERS TO IMAGE ###
345 |
346 | ### GET SHREDDED IMAGE FRAGMENTS ###
347 | seed_offset = 100
348 |
349 | if self.n_fragments == 2:
350 | seed_points = np.array([
351 | [self.intersection[0] - seed_offset, self.intersection[1]],
352 | [self.intersection[0] + seed_offset, self.intersection[1]]
353 | ])
354 | elif self.n_fragments == 4:
355 | seed_points = np.array([
356 | [self.intersection[0] - seed_offset, self.intersection[1] - seed_offset],
357 | [self.intersection[0] - seed_offset, self.intersection[1] + seed_offset],
358 | [self.intersection[0] + seed_offset, self.intersection[1] - seed_offset],
359 | [self.intersection[0] + seed_offset, self.intersection[1] + seed_offset],
360 | ])
361 |
362 | # Get individual fragments based on connected component labeling
363 | self.mask_fragments = []
364 |
365 | num_labels, self.labeled_mask, stats, _ = cv2.connectedComponentsWithStats(
366 | self.shredded_mask, connectivity=8
367 | )
368 |
369 | # Get lists with the sampled points
370 | self.all_set_a = [self.h_line_left if i[0]0.5!).
141 | # By lowering the threshold we introduce more false positives which may
142 | # be required in our case as stitches will never be near perfect.
143 | correct_threshold = 1e-20
144 |
145 | # Create filtered alignments file to write results
146 | with open(
147 | parameters["save_dir"].joinpath("configuration_detection", "filtered_alignments.txt"), "w"
148 | ) as f1:
149 | for v1, v2, prob, trans in zip(
150 | all_inference_v1, all_inference_v2, all_inference_prob, all_inference_trans
151 | ):
152 | if correct_probability > correct_threshold:
153 | f1.write("%d\t%d\t%f\t0\n" % (v1, v2, prob))
154 | f1.write(
155 | "%f %f %f\n%f %f %f\n0 0 1\n"
156 | % (trans[0, 0], trans[0, 1], trans[0, 2], trans[1, 0], trans[1, 1], trans[1, 2])
157 | )
158 | f1.close()
159 |
160 | # Save figure with all probabilites
161 | plt.figure(figsize=(8, 14))
162 | plt.suptitle("Image pairs with JigsawNet score and attention box\n", fontsize=20)
163 | for c, (im, pred, bbox) in enumerate(
164 | zip(all_inference_images, all_inference_prob, all_inference_bbox), 1
165 | ):
166 |
167 | # Get bbox with attention
168 | im_size = resized_path_img.shape[0]
169 | [new_min_row_ratio, new_min_col_ratio, new_max_row_ratio, new_max_col_ratio] = bbox
170 | bbox_min_col = im_size * new_min_col_ratio
171 | bbox_max_col = im_size * new_max_col_ratio
172 | bbox_min_row = im_size * new_min_row_ratio
173 | bbox_max_row = im_size * new_max_row_ratio
174 | bbox_coords = np.array(
175 | [
176 | [bbox_min_col, bbox_min_row],
177 | [bbox_min_col, bbox_max_row],
178 | [bbox_max_col, bbox_max_row],
179 | [bbox_max_col, bbox_min_row],
180 | ]
181 | )
182 | test = np.vstack([bbox_coords, bbox_coords[0]])
183 |
184 | plt.subplot(6, 4, c)
185 | plt.title(f"pred: {pred:.4f}")
186 | plt.imshow(im)
187 | plt.plot(test[:, 0], test[:, 1], c="g", linewidth=3)
188 | plt.axis("off")
189 | plt.tight_layout()
190 | plt.savefig(
191 | parameters["save_dir"].joinpath(
192 | "configuration_detection", "checks", f"jigsawnet_pred_expand{max_expand_threshold}.png"
193 | )
194 | )
195 | plt.close()
196 |
197 | # Reset graphs required when performing reassembly for multiple cases
198 | tf.compat.v1.reset_default_graph()
199 | parameters["log"].setLevel(logging.WARNING)
200 |
201 | del net, evaluator
202 |
203 | return
204 |
205 |
206 | def SingleTest(checkpoint_root, K, net, is_training=False):
207 | """
208 | Main function for evaluating own image data.
209 |
210 | Inputs:
211 | - checkpoint_root: path to weights
212 | - K: number of models in ensemble
213 | - net: the model to use (jigsawnet)
214 | - is_training: whether the model is in training mode
215 |
216 | Output:
217 | - Jigsawnet prediction
218 | """
219 |
220 | input = tf.keras.Input(
221 | shape=[net.params["height"], net.params["width"], net.params["depth"]], dtype=tf.float32
222 | )
223 | roi_box = tf.keras.Input(shape=[4], dtype=tf.float32)
224 |
225 | logits = net._inference(input, roi_box, is_training)
226 | probability = tf.nn.softmax(logits)
227 |
228 | # Get all models and restore the weights
229 | sessions = []
230 | saver = tf.compat.v1.train.Saver(max_to_keep=10)
231 |
232 | for i in range(K):
233 | check_point = os.path.join(checkpoint_root, "g%d" % i)
234 | sess = tf.compat.v1.Session()
235 | sess_init_op = tf.group(
236 | tf.compat.v1.global_variables_initializer(), tf.compat.v1.local_variables_initializer()
237 | )
238 | sess.run(sess_init_op)
239 | saver.restore(sess, tf.train.latest_checkpoint(check_point))
240 | sessions.append(sess)
241 |
242 | # Inference on JigsawNet. Note how this is only performed with batch_size=1. Perhaps
243 | # quite some potential to speed this up.
244 | while not net.close:
245 | if len(np.shape(net.evaluate_image)) < 4:
246 | net.evaluate_image = np.reshape(
247 | net.evaluate_image,
248 | [1, net.params["height"], net.params["width"], net.params["depth"]],
249 | )
250 | if len(np.shape(net.roi_box)) < 2:
251 | net.roi_box = np.reshape(net.roi_box, [1, 4])
252 |
253 | # Save predictions and probabilities
254 | preds = []
255 | probs = [] # correct and incorrect probability
256 | for i in range(K):
257 | pred, prob = sessions[i].run(
258 | [net.pred, probability], feed_dict={input: net.evaluate_image, roi_box: net.roi_box}
259 | )
260 | pred = pred[0]
261 | prob = prob[0]
262 | preds.append(pred)
263 | probs.append(prob)
264 | yield preds, probs
265 |
266 | # Close sessions after inference
267 | for sess in sessions:
268 | sess.close()
269 |
270 | tf.compat.v1.reset_default_graph()
271 |
272 | return
273 |
--------------------------------------------------------------------------------
/src/assembly_utils/pairwise_alignment.py:
--------------------------------------------------------------------------------
1 | from .pairwise_alignment_utils import *
2 | from .fragment_classifier import Classifier
3 |
4 |
5 | def run_pairwise_alignment(parameters):
6 | """
7 | Pairwise alignment function to compute pairs of corresponding images which serve
8 | as the input for the JigsawNet model.
9 | """
10 |
11 | # Get all fragment filenames
12 | fragment_names = sorted(
13 | [i.name for i in parameters["save_dir"].joinpath("preprocessed_images").iterdir()]
14 | )
15 | assert len(fragment_names) > 0, "no fragments were found in the given directory"
16 |
17 | # Insert some more variables
18 | parameters["pa_fragment_names"] = fragment_names
19 | parameters["pa_resolution"] = [0.1]
20 |
21 | # Create fragment list .txt file
22 | with open(
23 | parameters["save_dir"].joinpath("configuration_detection", "fragment_list.txt"), "w"
24 | ) as f:
25 | for name in fragment_names:
26 | f.write(f"{name}\n")
27 |
28 | # Create background colour .txt file
29 | with open(parameters["save_dir"].joinpath("configuration_detection", "bg_color.txt"), "w") as f:
30 | f.write("0 0 0")
31 |
32 | # Get fragment classifier model
33 | classifier = Classifier(weights=parameters["weights_fragment_classifier"])
34 | classifier.build_model()
35 | parameters["fragment_classifier"] = classifier
36 |
37 | # Fetch all fragments
38 | fragments = []
39 | for fragment in fragment_names:
40 | parameters["fragment_name"] = fragment
41 | fragments.append(Fragment(kwargs=parameters))
42 |
43 | # Preprocess all images
44 | parameters["log"].log(parameters["my_level"], " - identifying stitch edges")
45 | for f in fragments:
46 | f.read_images()
47 | f.process_images()
48 | f.classify_stitch_edges()
49 | f.save_images()
50 | f.get_stitch_edges()
51 | if f.require_landmark_computation:
52 | f.save_landmark_points()
53 |
54 | plot_stitch_edge_classification(fragments=fragments, parameters=parameters)
55 |
56 | # Find matching pairs
57 | parameters["log"].log(parameters["my_level"], f" - computing pairwise alignment")
58 | explore_pairs(fragments=fragments, parameters=parameters)
59 |
60 | return
61 |
--------------------------------------------------------------------------------
/src/main.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import json
3 | import logging
4 | import os
5 | import pathlib
6 |
7 | from assembly_utils.detect_configuration import detect_configuration
8 | from preprocessing_utils.prepare_data import prepare_data
9 | from pythostitcher_utils.fragment_class import Fragment
10 | from pythostitcher_utils.full_resolution import generate_full_res
11 | from pythostitcher_utils.get_resname import get_resname
12 | from pythostitcher_utils.optimize_stitch import optimize_stitch
13 | from pythostitcher_utils.preprocess import preprocess
14 |
15 | os.environ["VIPS_CONCURRENCY"] = "20"
16 |
17 |
18 | def load_parameter_configuration(data_dir, save_dir, output_res):
19 | """
20 | Convenience function to load all the PythoStitcher parameters and pack them up
21 | in a dictionary for later use.
22 | """
23 |
24 | # Verify its existence
25 | config_file = pathlib.Path().absolute().parent.joinpath("config/parameter_config.json")
26 | assert config_file.exists(), "parameter config file not found"
27 |
28 | # Load main parameter config
29 | with open(config_file) as f:
30 | parameters = json.load(f)
31 |
32 | # Convert model weight paths to absolute paths
33 | parameters["weights_fragment_classifier"] = (
34 | pathlib.Path().absolute().parent.joinpath(parameters["weights_fragment_classifier"])
35 | )
36 | parameters["weights_jigsawnet"] = (
37 | pathlib.Path().absolute().parent.joinpath(parameters["weights_jigsawnet"])
38 | )
39 |
40 | # Insert parsed arguments
41 | parameters["data_dir"] = data_dir
42 | parameters["save_dir"] = save_dir
43 | parameters["patient_idx"] = data_dir.name
44 | parameters["output_res"] = output_res
45 | parameters["fragment_names"] = sorted([
46 | i.name for i in data_dir.joinpath("raw_images").iterdir() if not i.is_dir()
47 | ])
48 | parameters["n_fragments"] = len(parameters["fragment_names"])
49 | parameters["resolution_scaling"] = [
50 | i / parameters["resolutions"][0] for i in parameters["resolutions"]
51 | ]
52 |
53 | parameters["raw_image_names"] = sorted(
54 | [i.name for i in data_dir.joinpath("raw_images").iterdir() if not i.is_dir()]
55 | )
56 | if data_dir.joinpath("raw_masks").is_dir():
57 | parameters["raw_mask_names"] = sorted(
58 | [i.name for i in data_dir.joinpath("raw_masks").iterdir()]
59 | )
60 | else:
61 | parameters["raw_mask_names"] = [None] * len(parameters["raw_image_names"])
62 |
63 | # Some assertions
64 | assert parameters["n_fragments"] in [
65 | 2, 4,
66 | ], "pythostitcher only supports stitching 2/4 fragments"
67 |
68 | # Make directories for later saving
69 | dirnames = [
70 | pathlib.Path(parameters["save_dir"]),
71 | pathlib.Path(parameters["save_dir"]).joinpath("configuration_detection", "checks"),
72 | ]
73 |
74 | for d in dirnames:
75 | if not d.is_dir():
76 | d.mkdir(parents=True)
77 |
78 | return parameters
79 |
80 |
81 | def collect_arguments():
82 | """
83 | Function to parse arguments into main function
84 | """
85 |
86 | # Parse arguments
87 | parser = argparse.ArgumentParser(
88 | description="Stitch histopathology images into a pseudo whole-mount image"
89 | )
90 | parser.add_argument(
91 | "--datadir",
92 | required=True,
93 | type=pathlib.Path,
94 | help="Path to the case to stitch"
95 | )
96 | parser.add_argument(
97 | "--savedir",
98 | required=True,
99 | type=pathlib.Path,
100 | help="Directory to save the results",
101 | )
102 | parser.add_argument(
103 | "--resolution",
104 | required=True,
105 | default=1,
106 | type=float,
107 | help="Output resolution (µm/pixel) of the reconstructed image. Should be roughly "
108 | "in range of 0.25-16 with a factor 2 between steps (0.25-0.50-1.00 etc).",
109 | )
110 | args = parser.parse_args()
111 |
112 | # Extract arguments
113 | data_dir = pathlib.Path(args.datadir)
114 | resolution = args.resolution
115 |
116 | assert data_dir.is_dir(), "provided patient directory doesn't exist"
117 | assert resolution > 0, "output resolution cannot be negative"
118 |
119 | if not data_dir.joinpath("raw_images").is_dir():
120 | mode = "batch"
121 | save_dir = pathlib.Path(args.savedir)
122 | else:
123 | mode = "single"
124 | save_dir = pathlib.Path(args.savedir).joinpath(data_dir.name)
125 |
126 | return data_dir, save_dir, resolution, mode
127 |
128 |
129 | def run_case(data_dir, save_dir, output_res):
130 | """
131 | PythoStitcher is an automated and robust program for stitching prostate tissue
132 | fragments into a whole histological section.
133 |
134 | Original paper: https://www.nature.com/articles/srep29906
135 | Original Matlab code by Greg Penzias, 2016
136 | Python implementation by Daan Schouten, 2022
137 |
138 | Please see the data directory for how to structure the input images for
139 | Pythostitcher. The general structure is as follows, where Patient_identifier denotes
140 | any (anonymized) patient identifier. The current version requires either two or four
141 | fragments, support for other amounts of fragments might be added in a future version.
142 |
143 | ___________________________
144 | /data
145 | /{Patient_identifier}
146 | /raw_images
147 | {fragment_name}.mrxs
148 | {fragment_name}.mrxs
149 | /raw_masks
150 | {fragment_name}.tif
151 | {fragment_name}.tif
152 | ___________________________
153 |
154 | """
155 |
156 | # Sanity checks
157 | assert data_dir.joinpath("raw_images").is_dir(), "patient has no 'raw_images' directory"
158 | assert (
159 | len(list(data_dir.joinpath("raw_images").iterdir())) > 0
160 | ), "no images found in 'raw_images' directory"
161 |
162 | print(
163 | f"\nRunning job with following parameters:"
164 | f"\n - Data dir: {data_dir}"
165 | f"\n - Save dir: {save_dir}"
166 | f"\n - Output resolution: {output_res} µm/pixel\n"
167 | )
168 |
169 | # Collect arguments
170 | parameters = load_parameter_configuration(data_dir, save_dir, output_res)
171 |
172 | # Initiate logging file
173 | logfile = save_dir.joinpath("pythostitcher_log.txt")
174 | if logfile.exists():
175 | logfile.unlink()
176 |
177 | logging.basicConfig(
178 | filename=logfile,
179 | level=logging.WARNING,
180 | format="%(asctime)s %(message)s",
181 | datefmt="%Y-%m-%d %H:%M:%S",
182 | force=True
183 | )
184 | logging.addLevelName(parameters["my_level"], "output")
185 | log = logging.getLogger(f"{data_dir.name}")
186 |
187 | parameters["log"] = log
188 | parameters["log"].log(parameters["my_level"], f"Running job with following parameters:")
189 | parameters["log"].log(parameters["my_level"], f" - Data dir: {parameters['data_dir']}")
190 | parameters["log"].log(parameters["my_level"], f" - Save dir: {parameters['save_dir']}")
191 | parameters["log"].log(
192 | parameters["my_level"], f" - Output resolution: {parameters['output_res']}\n"
193 | )
194 |
195 | if not data_dir.joinpath("raw_masks").is_dir():
196 | parameters["log"].log(
197 | parameters["my_level"],
198 | f"WARNING: PythoStitcher did not find any raw tissuemasks. If you intend to use "
199 | f"PythoStitcher with pregenerated tissuemasks, please put these files in "
200 | f"[{data_dir.joinpath('raw_masks')}]. If no tissuemasks are supplied, "
201 | f"PythoStitcher will use a generic tissue segmentation which may not perform "
202 | f"as well for your use case. In addition, PythoStitcher will not "
203 | f"be able to generate the full resolution end result.",
204 | )
205 |
206 | ### MAIN PYTHOSTITCHER #s##
207 | # Preprocess data
208 | prepare_data(parameters=parameters)
209 |
210 | # Detect configuration of fragments. Return the 3 most likely configurations in order
211 | # of likelihood.
212 | solutions = detect_configuration(parameters=parameters)
213 |
214 | # Loop over all solutions
215 | for count_sol, sol in enumerate(solutions, 1):
216 | parameters["log"].log(parameters["my_level"], f"### Exploring solution {count_sol} ###")
217 | parameters["detected_configuration"] = sol
218 | parameters["num_sol"] = count_sol
219 | parameters["sol_save_dir"] = parameters["save_dir"].joinpath(f"sol_{count_sol}")
220 |
221 | for count_res, res in enumerate(parameters["resolutions"]):
222 |
223 | # Set current iteration
224 | parameters["iteration"] = count_res
225 | parameters["res_name"] = get_resname(res)
226 | parameters["fragment_names"] = [sol[i].lower() for i in sorted(sol)]
227 |
228 | fragments = []
229 | for im_path, fragment_name in sol.items():
230 | fragments.append(
231 | Fragment(im_path=im_path, fragment_name=fragment_name, kwargs=parameters)
232 | )
233 |
234 | # Preprocess all images to a usable format for PythoStitcher
235 | preprocess(fragments=fragments, parameters=parameters)
236 |
237 | # Get optimal stitch using a genetic algorithm
238 | optimize_stitch(parameters=parameters)
239 |
240 | # Generate full resolution blended image
241 | generate_full_res(parameters=parameters, log=log)
242 |
243 | parameters["log"].log(
244 | parameters["my_level"], f"### Succesfully stitched solution {count_sol} ###\n",
245 | )
246 |
247 | parameters["log"].log(
248 | parameters["my_level"], f"\nPythoStitcher completed!",
249 | )
250 | del parameters, log
251 |
252 | return
253 |
254 |
255 | def main():
256 | """
257 | Main function to run PythoStitcher. PythoStitcher will automatically figure out if
258 | the provided data directory contains multiple patients. If so, it will initiate
259 | batch mode. Otherwise it will run in single mode.
260 | """
261 |
262 | # Get arguments and determine single/batch mode
263 | data_dir, save_dir, output_res, mode = collect_arguments()
264 |
265 | # Run PythoStitcher for a single case or a batch of cases.
266 | if mode == "single":
267 | run_case(data_dir, save_dir, output_res)
268 | elif mode == "batch":
269 |
270 | # Extract valid patients
271 | patients = sorted([i for i in data_dir.iterdir() if i.joinpath("raw_images").is_dir()])
272 |
273 | # Filter patients that have already been stitched
274 | patients = sorted([i for i in patients if not save_dir.joinpath(i.name, "sol_1").is_dir()])
275 | print(f"\n### Identified {len(patients)} cases. ###")
276 |
277 | for pt in patients:
278 |
279 | pt_data_dir = data_dir.joinpath(pt.name)
280 | pt_save_dir = save_dir.joinpath(pt.name)
281 | run_case(pt_data_dir, pt_save_dir, output_res)
282 |
283 | return
284 |
285 |
286 | if __name__ == "__main__":
287 | main()
288 |
--------------------------------------------------------------------------------
/src/preprocessing_utils/prepare_data.py:
--------------------------------------------------------------------------------
1 | import multiresolutionimageinterface as mir
2 | import numpy as np
3 | import cv2
4 | import matplotlib.pyplot as plt
5 | import torchstain
6 | from torchvision import transforms
7 | from scipy import ndimage
8 |
9 |
10 | class Processor:
11 | """
12 | This class will help with preprocessing your data in PythoStitcher. You don't need
13 | to execute this script yourself, PythoStitcher will automatically perform the
14 | preprocessing. If this script throws any error, double check that your data:
15 | - is in .tif or .mrxs format
16 | - has multiple resolution layers (pyramidal)
17 | """
18 | def __init__(self, image_file, mask_file, save_dir, level, count):
19 |
20 | assert isinstance(level, int), "level must be an integer"
21 |
22 | self.image_filename = image_file
23 | self.mask_filename = mask_file
24 | self.mask_provided = bool(mask_file)
25 | self.save_dir = save_dir
26 | self.new_level = level
27 | self.count = count
28 |
29 | self.opener = mir.MultiResolutionImageReader()
30 |
31 | return
32 |
33 | def load(self):
34 | """
35 | Function to load and downsample the raw mask to the provided level. If no mask
36 | is provided, it will be created through some simple processing.
37 | """
38 |
39 | # Load raw image
40 | self.raw_image = self.opener.open(str(self.image_filename))
41 | assert self.raw_image.valid(), "Loaded image was not valid"
42 |
43 | # Load raw mask if available
44 | if self.mask_provided:
45 | self.raw_mask = self.opener.open(str(self.mask_filename))
46 | assert self.raw_mask.valid(), "Loaded mask was not valid"
47 |
48 | # Use a plausible default value for the image level [issue #4]
49 | if self.new_level > self.raw_image.getNumberOfLevels():
50 | print(f"The image_level parameter in the parameter_config.json file ({self.new_level}) "
51 | f"is larger than the number of available levels in the image "
52 | f"({self.raw_image.getNumberOfLevels()}). Using {self.raw_image.getNumberOfLevels() - 2} "
53 | f"as default, consider modifying the config file.")
54 | self.new_level = self.raw_image.getNumberOfLevels() - 2
55 |
56 | # Get downsampled image
57 | self.new_dims = self.raw_image.getLevelDimensions(self.new_level)
58 | self.image = self.raw_image.getUCharPatch(0, 0, *self.new_dims, self.new_level)
59 |
60 | # Get downsampled mask with same dimensions as downsampled image
61 | if self.mask_provided:
62 | mask_dims = [
63 | self.raw_mask.getLevelDimensions(i)
64 | for i in range(self.raw_mask.getNumberOfLevels())
65 | ]
66 | mask_level = mask_dims.index(self.new_dims)
67 | self.mask = self.raw_mask.getUCharPatch(0, 0, *self.new_dims, mask_level)
68 | if len(self.mask.shape) > 2:
69 | self.mask = np.squeeze(np.mean(self.mask, axis=2))
70 | self.mask = (((self.mask / np.max(self.mask)) > 0.5) * 255).astype("uint8")
71 |
72 | else:
73 | raise ValueError("PythoStitcher requires a tissue mask for stitching")
74 |
75 | return
76 |
77 | def get_otsu_mask(self):
78 | """
79 | Method to get the mask using Otsu thresholding. This mask will be combined with
80 | the tissue segmentation mask in order to filter out the fatty tissue.
81 | """
82 |
83 | # Convert to HSV space and perform Otsu thresholding
84 | self.image_hsv = cv2.cvtColor(self.image, cv2.COLOR_RGB2HSV)
85 | self.image_hsv = cv2.medianBlur(self.image_hsv[:, :, 1], 7)
86 | _, self.otsu_mask = cv2.threshold(self.image_hsv, 0, 255, cv2.THRESH_OTSU +
87 | cv2.THRESH_BINARY)
88 | self.otsu_mask = (self.otsu_mask / np.max(self.otsu_mask)).astype("uint8")
89 |
90 | # Postprocess the mask a bit
91 | kernel = cv2.getStructuringElement(shape=cv2.MORPH_ELLIPSE, ksize=(8, 8))
92 | pad = int(0.1 * self.otsu_mask.shape[0])
93 | self.otsu_mask = np.pad(
94 | self.otsu_mask,
95 | [[pad, pad], [pad, pad]],
96 | mode="constant",
97 | constant_values=0
98 | )
99 | self.otsu_mask = cv2.morphologyEx(
100 | src=self.otsu_mask, op=cv2.MORPH_CLOSE, kernel=kernel, iterations=3
101 | )
102 | self.otsu_mask = self.otsu_mask[pad:-pad, pad:-pad]
103 |
104 | return
105 |
106 | def get_tissueseg_mask(self):
107 | """
108 | Function to postprocess both the regular and the mask. This mainly consists
109 | of getting the largest component and then cleaning up this mask.
110 | """
111 |
112 | # Get information on all connected components in the mask
113 | num_labels, labeled_im, stats, _ = cv2.connectedComponentsWithStats(
114 | self.mask, connectivity=8
115 | )
116 |
117 | # Background gets counted as label, therefore an empty image will have 1 label.
118 | assert num_labels > 1, "mask is empty"
119 |
120 | # The largest label (excluding background) is the mask.
121 | largest_cc_label = np.argmax(stats[1:, -1]) + 1
122 | self.mask = ((labeled_im == largest_cc_label) * 255).astype("uint8")
123 |
124 | # Temporarily enlarge mask for better closing
125 | pad = int(0.1 * self.mask.shape[0])
126 | self.mask = np.pad(
127 | self.mask,
128 | [[pad, pad], [pad, pad]],
129 | mode="constant",
130 | constant_values=0
131 | )
132 |
133 | # Closing operation to close some holes on the mask border
134 | kernel = cv2.getStructuringElement(shape=cv2.MORPH_ELLIPSE, ksize=(10, 10))
135 | self.mask = cv2.morphologyEx(src=self.mask, op=cv2.MORPH_CLOSE, kernel=kernel, iterations=2)
136 |
137 | # Flood fill to remove holes inside mask. The floodfill mask is required by opencv
138 | seedpoint = (0, 0)
139 | floodfill_mask = np.zeros((self.mask.shape[0] + 2, self.mask.shape[1] + 2)).astype("uint8")
140 | _, _, self.mask, _ = cv2.floodFill(self.mask, floodfill_mask, seedpoint, 255)
141 | self.mask = self.mask[1+pad:-1-pad, 1+pad:-1-pad]
142 | self.mask = 1 - self.mask
143 |
144 | assert np.sum(self.mask) > 0, "floodfilled mask is empty"
145 |
146 | return
147 |
148 | def combine_masks(self):
149 | """
150 | Method to combine the tissue mask and the Otsu mask for fat filtering.
151 | """
152 |
153 | # Combine
154 | self.final_mask = self.otsu_mask * self.mask
155 |
156 | # Postprocess similar to tissue segmentation mask. Get largest cc and floodfill.
157 | num_labels, labeled_im, stats, _ = cv2.connectedComponentsWithStats(
158 | self.final_mask, connectivity=8
159 | )
160 | assert num_labels > 1, "mask is empty"
161 | largest_cc_label = np.argmax(stats[1:, -1]) + 1
162 | self.final_mask = ((labeled_im == largest_cc_label) * 255).astype("uint8")
163 |
164 | # Flood fill
165 | offset = 5
166 | self.final_mask = np.pad(
167 | self.final_mask,
168 | [[offset, offset], [offset, offset]],
169 | mode="constant",
170 | constant_values=0
171 | )
172 |
173 | seedpoint = (0, 0)
174 | floodfill_mask = np.zeros((self.final_mask.shape[0] + 2, self.final_mask.shape[1] + 2)).astype("uint8")
175 | _, _, self.final_mask, _ = cv2.floodFill(self.final_mask, floodfill_mask, seedpoint, 255)
176 | self.final_mask = self.final_mask[1 + offset:-1 - offset, 1 + offset:-1 - offset]
177 | self.final_mask = 1 - self.final_mask
178 |
179 | # Crop to nonzero pixels for efficient saving
180 | r, c = np.nonzero(self.final_mask)
181 | self.final_mask = self.final_mask[np.min(r) : np.max(r), np.min(c) : np.max(c)]
182 | self.image = self.image[np.min(r) : np.max(r), np.min(c) : np.max(c)]
183 | self.image = self.image.astype("uint8")
184 | self.final_mask = (self.final_mask * 255).astype("uint8")
185 |
186 | return
187 |
188 | def normalize_stains(self):
189 | """
190 | Function to normalize the stain of the image.
191 | """
192 |
193 | # Only normalize stains for the other images
194 | if self.count > 1:
195 |
196 | # Load reference image
197 | ref_image = cv2.imread(str(self.save_dir.joinpath("preprocessed_images", f"fragment1.png")), cv2.IMREAD_COLOR)
198 | ref_image = cv2.cvtColor(ref_image, cv2.COLOR_BGR2RGB)
199 |
200 | # Initialize stain normalizer
201 | T = transforms.Compose([
202 | transforms.ToTensor(),
203 | transforms.Lambda(lambda x: x*255)
204 | ])
205 | stain_normalizer = torchstain.normalizers.ReinhardNormalizer(backend="torch")
206 | stain_normalizer.fit(T(ref_image))
207 |
208 | # Apply stain normalization
209 | self.image = stain_normalizer.normalize(T(self.image))
210 | self.image = self.image.numpy().astype("uint8")
211 |
212 | return
213 |
214 | def save(self):
215 | """
216 | Function to save the downsampled image and mask
217 | """
218 |
219 | # Save image
220 | image_savedir = self.save_dir.joinpath("preprocessed_images")
221 | if not image_savedir.is_dir():
222 | image_savedir.mkdir()
223 |
224 | image_savefile = image_savedir.joinpath(f"fragment{self.count}.png")
225 | cv2.imwrite(str(image_savefile), cv2.cvtColor(self.image, cv2.COLOR_RGB2BGR))
226 |
227 | # Save mask
228 | mask_savedir = self.save_dir.joinpath("preprocessed_masks")
229 | if not mask_savedir.is_dir():
230 | mask_savedir.mkdir()
231 |
232 | mask_savefile = mask_savedir.joinpath(f"fragment{self.count}.png")
233 | cv2.imwrite(str(mask_savefile), self.final_mask)
234 |
235 | return
236 |
237 |
238 | def prepare_data(parameters):
239 | """
240 | Downsample both images and masks to determine fragment configuration.
241 | """
242 |
243 | parameters["log"].log(parameters["my_level"], "Preprocessing raw images...")
244 |
245 | # Get all image files
246 | image_files = sorted(
247 | [i for i in parameters["data_dir"].joinpath("raw_images").iterdir() if not i.is_dir()]
248 | )
249 |
250 | # Get mask files if these are provided
251 | masks_provided = parameters["data_dir"].joinpath("raw_masks").is_dir()
252 | if masks_provided:
253 | mask_files = sorted([i for i in parameters["data_dir"].joinpath("raw_masks").iterdir()])
254 | assert len(image_files) == len(mask_files), "found unequal number of image/mask files!"
255 | else:
256 | mask_files = [None] * len(image_files)
257 |
258 | # Process and save image with corresponding mask (if available)
259 | for c, vars in enumerate(zip(image_files, mask_files), 1):
260 | image, mask = vars
261 | parameters["log"].log(parameters["my_level"], f" - {image.name.split('.')[0]}")
262 | data_processor = Processor(
263 | image_file=image,
264 | mask_file=mask,
265 | save_dir=parameters["save_dir"],
266 | level=parameters["image_level"],
267 | count=c,
268 | )
269 | data_processor.load()
270 | data_processor.get_otsu_mask()
271 | data_processor.get_tissueseg_mask()
272 | data_processor.combine_masks()
273 | data_processor.normalize_stains()
274 | data_processor.save()
275 |
276 | parameters["log"].log(parameters["my_level"], " > finished!\n")
277 | parameters["image_level"] = data_processor.new_level
278 |
279 | return
280 |
--------------------------------------------------------------------------------
/src/pythostitcher_utils/adjust_final_rotation.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import numpy as np
3 | import copy
4 |
5 | from .transformations import warp_image
6 |
7 |
8 | def adjust_final_rotation(image):
9 | """
10 | Custom function to compensate the slight rotation that might occur during the
11 | genetic algorithm.
12 |
13 | Input:
14 | - Image of all stitched quadrants
15 |
16 | Output:
17 | - Rotated image of all stitched quadrants
18 | """
19 |
20 | # Obtain mask
21 | mask = (image.astype("uint8")[:, :, 0] > 0) * 255
22 |
23 | # Get largest contour
24 | cnt, _ = cv2.findContours(mask.astype("uint8"), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)
25 | cnt = np.squeeze(max(cnt, key=cv2.contourArea))
26 |
27 | # Compute bounding box around contour
28 | bbox = cv2.minAreaRect(cnt)
29 |
30 | # Adjust angle
31 | angle = copy.deepcopy(bbox[2])
32 | if angle > 45:
33 | angle = 90 - angle
34 |
35 | # Get center of contour
36 | moment = cv2.moments(cnt)
37 | center_x = int(moment["m10"] / moment["m00"])
38 | center_y = int(moment["m01"] / moment["m00"])
39 |
40 | # Get rotated image
41 | final_image = warp_image(
42 | src=image, center=(center_x, center_y), translation=(0, 0), rotation=-angle
43 | )
44 |
45 | return final_image
46 |
--------------------------------------------------------------------------------
/src/pythostitcher_utils/fuse_images_highres.py:
--------------------------------------------------------------------------------
1 | import copy
2 | import numpy as np
3 | import cv2
4 | import itertools
5 | import matplotlib.pyplot as plt
6 |
7 |
8 | def is_valid_contour(cnt):
9 | """
10 | Function to check whether a given contour is valid. We define invalid contours as
11 | contours which are very small and which look more like an artefact rather than an
12 | actual contour that helps in gradient blending. We try to filter these invalid
13 | contours based on 3 criteria:
14 | - small total contour length
15 | - small domain of rows/cols
16 | - small total contour area
17 |
18 | If any of these criteria is met, the contour is deemed invalid and the contour
19 | will not be used for the gradient blending. Most cutoff values were chosen
20 | empirically.
21 | """
22 |
23 | # Prepare contour
24 | cnt = np.squeeze(cnt)
25 |
26 | # Criterium contour length
27 | if len(cnt) < 10:
28 | return False
29 |
30 | # Criterium domain
31 | xcoords = cnt[:, 0]
32 | ycoords = cnt[:, 1]
33 |
34 | if (len(np.unique(xcoords)) < 5) or (len(np.unique(ycoords)) < 5):
35 | return False
36 |
37 | # Criterium contour area
38 | area = cv2.contourArea(cnt)
39 | if area < 100:
40 | return False
41 |
42 | return True
43 |
44 |
45 | def get_gradients(bbox, overlap, direction, pad):
46 | """
47 | Custom function to obtain the gradient based on the direction. This function
48 | will also apply the mask to the gradient.
49 |
50 | Input:
51 | - Bounding box of the overlap
52 | - Mask of fragment 1
53 | - Overlapping area
54 | - Direction of overlap
55 |
56 | Output:
57 | - Masked gradient of overlap
58 | - Inverse masked gradient of overlap
59 | """
60 |
61 | # Get some bbox values
62 | bbox_center = bbox[0]
63 | angle = bbox[2]
64 |
65 | # Preallocate gradient field
66 | """
67 | gradient_2d = np.zeros_like(q1_mask)
68 | gradient_2d = np.pad(gradient_2d, [[pad, pad], [pad, pad]]).astype("float")
69 | """
70 | gradient_2d = np.zeros_like(overlap)
71 | overlap = overlap[pad:-pad, pad:-pad]
72 |
73 | # OpenCV provides angles in range [0, 90]. We rescale these values to range [-45, 45]
74 | # for our use case. One extra factor to take into account is that we have to swap
75 | # width and height for angles in original openCV range [45, 90].
76 | if bbox[2] < 45:
77 | angle = -angle
78 | width = int(bbox[1][0])
79 | height = int(bbox[1][1])
80 | elif bbox[2] >= 45:
81 | angle = 90 - angle
82 | width = int(bbox[1][1])
83 | height = int(bbox[1][0])
84 |
85 | # Get slicing locations
86 | xmin = int(bbox_center[0] - 0.5 * width)
87 | xmax = xmin + width
88 | ymin = int(bbox_center[1] - 0.5 * height)
89 | ymax = ymin + height
90 |
91 | # Create 2d gradient
92 | if direction == "horizontal":
93 | gradient_1d = np.linspace(1, 0, width)
94 | gradient_2d_fill = np.tile(gradient_1d, (height, 1))
95 | elif direction == "vertical":
96 | gradient_1d = np.linspace(1, 0, height)
97 | gradient_2d_fill = np.tile(gradient_1d, (width, 1))
98 | gradient_2d_fill = np.transpose(gradient_2d_fill)
99 |
100 | # Insert gradient in image and rotate it along its primary axis
101 | gradient_2d[ymin:ymax, xmin:xmax] = gradient_2d_fill*255
102 | rot_mat = cv2.getRotationMatrix2D(center=bbox_center, angle=angle, scale=1)
103 | gradient_2d = cv2.warpAffine(gradient_2d, rot_mat, dsize=gradient_2d.shape[::-1])
104 | gradient_2d = gradient_2d[pad:-pad, pad:-pad]
105 |
106 | # Apply overlap mask to gradient
107 | masked_gradient = (gradient_2d * overlap)/255
108 |
109 | # Get reverse gradient
110 | masked_gradient_rev = (1 - masked_gradient) * (masked_gradient > 0)
111 |
112 | return masked_gradient, masked_gradient_rev
113 |
114 |
115 | def fuse_images_highres(images, masks):
116 | """
117 | Custom function to merge overlapping fragments into a visually appealing combined
118 | image using alpha blending. This is accomplished by the following steps:
119 | 1. Compute areas of overlap between different fragments
120 | 2. Compute bounding box around the overlap
121 | 3. Compute alpha gradient field over this bounding box
122 | 4. Mask the gradient field with the area of overlap
123 | 5. Apply the masked gradient field to original image intensity
124 | 6. Sum all resulting (non)overlapping images to create the final image
125 |
126 | Inputs
127 | - Final colour image of all fragments
128 |
129 | Output
130 | - Merged output image
131 | """
132 |
133 | # Get plausible overlapping fragments
134 | names = list(images.keys())
135 | combinations = itertools.combinations(names, 2)
136 |
137 | # Possible combination pairs
138 | hor_combinations = [["ul", "ur"], ["ul", "lr"], ["ll", "ur"], ["ll", "lr"], ["left", "right"]]
139 | ver_combinations = [["ul", "ll"], ["ul", "lr"], ["ur", "ll"], ["ur", "lr"], ["top", "bottom"]]
140 |
141 | # Create some lists for iterating
142 | total_mask = np.sum(list(masks.values()), axis=0).astype("uint8")
143 | all_contours = []
144 | is_overlap_list = []
145 | overlapping_fragments = []
146 |
147 | # Create some dicts for saving results
148 | gradients = dict()
149 | overlaps = dict()
150 | nonoverlap = dict()
151 |
152 | # Some values for postprocessing
153 | patch_size_mean = 25
154 |
155 | # Loop over possible combinations of overlapping fragments
156 | for combi in combinations:
157 |
158 | # Ensure right direction in gradient
159 | all_combinations = hor_combinations + ver_combinations
160 | if list(combi) in all_combinations:
161 | q1_name, q2_name = combi
162 | elif list(combi[::-1]) in all_combinations:
163 | q2_name, q1_name = combi
164 |
165 | # Check if overlap is between horizontally or vertically aligned fragments
166 | is_horizontal = [q1_name, q2_name] in hor_combinations
167 | is_vertical = [q1_name, q2_name] in ver_combinations
168 |
169 | # Get fragments and masks
170 | q1_image = images[q1_name]
171 | q2_image = images[q2_name]
172 |
173 | q1_mask = masks[q1_name]
174 | q2_mask = masks[q2_name]
175 |
176 | # Check if there is any overlap
177 | overlap = np.squeeze(((q1_mask + q2_mask) == 2) * 1).astype("uint8")
178 | is_overlap = np.sum(overlap) > 0
179 | is_overlap_list.append(is_overlap)
180 | overlaps[(q1_name, q1_name)] = overlap
181 |
182 | # In case of overlap, apply alpha blending
183 | if is_overlap:
184 |
185 | # Save index of overlapping fragments
186 | overlapping_fragments.append([q1_name, q2_name])
187 |
188 | # Compute non overlapping part of fragments
189 | only_q1 = q1_image * (total_mask == 1)[:, :, np.newaxis]
190 | only_q2 = q2_image * (total_mask == 1)[:, :, np.newaxis]
191 | nonoverlap[q1_name] = only_q1
192 | nonoverlap[q2_name] = only_q2
193 |
194 | # When (nearly) entire image is overlap, we blend by using the average of
195 | # both images. We implement a small tolerance value (1%) to still apply 50/50
196 | # blending in case of a few stray voxels.
197 | eps = int((np.shape(overlap)[0] * np.shape(overlap)[1]) / 100)
198 | approx_max_overlap = np.shape(overlap)[0] * np.shape(overlap)[1] - eps
199 | if np.sum(overlap) > approx_max_overlap:
200 | gradients[(q1_name, q2_name)] = np.full(overlap.shape, 0.5)
201 | gradients[(q2_name, q1_name)] = np.full(overlap.shape, 0.5)
202 | continue
203 |
204 | # Pad overlap image to obtain rotated bounding boxes even in cases when
205 | # overlap reaches images border.
206 | pad = int(np.min(overlap.shape) / 2)
207 | overlap_pad = np.pad(overlap, [[pad, pad], [pad, pad]])
208 | overlap_pad = (overlap_pad * 255).astype("uint8")
209 |
210 | # Get contour of overlap
211 | cnt, _ = cv2.findContours(overlap_pad, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE,)
212 |
213 | # There are nearly always multiple contours, some existing of only a few
214 | # points which we don't want to include in gradient blending. Hence, we
215 | # filter actual contours rather than line-like artefacts, check function
216 | # for criteria.
217 | actual_cnts = [np.squeeze(c) for c in cnt if is_valid_contour(c)]
218 |
219 | # In case of multiple valid contours, create gradient for each and sum
220 | if len(actual_cnts) > 1:
221 | all_grads = []
222 | all_grads_rev = []
223 |
224 | for c in actual_cnts:
225 | all_contours.append(c)
226 | bbox = cv2.minAreaRect(c)
227 |
228 | # Get the gradient and its reverse
229 | if is_horizontal:
230 | grad, grad_rev = get_gradients(
231 | bbox=bbox,
232 | overlap=overlap_pad,
233 | direction="horizontal",
234 | pad=pad,
235 | )
236 |
237 | elif is_vertical:
238 | grad, grad_rev = get_gradients(
239 | bbox=bbox,
240 | overlap=overlap_pad,
241 | direction="vertical",
242 | pad=pad,
243 | )
244 |
245 | all_grads.append(grad)
246 | all_grads_rev.append(grad_rev)
247 |
248 | all_grad = np.sum(all_grads, axis=0)
249 | all_grad_rev = np.sum(all_grads_rev, axis=0)
250 |
251 | # Save the gradients
252 | gradients[(q1_name, q2_name)] = all_grad_rev
253 | gradients[(q2_name, q1_name)] = all_grad
254 |
255 | # In case of only 1 valid contour
256 | elif len(actual_cnts) == 1:
257 | c = np.squeeze(actual_cnts)
258 | all_contours.append(c)
259 | bbox = cv2.minAreaRect(c)
260 |
261 | # Get the gradient and its reverse
262 | if is_horizontal:
263 | all_grad, all_grad_rev = get_gradients(
264 | bbox=bbox,
265 | overlap=overlap_pad,
266 | direction="horizontal",
267 | pad=pad,
268 | )
269 |
270 | elif is_vertical:
271 | all_grad, all_grad_rev = get_gradients(
272 | bbox=bbox,
273 | overlap=overlap_pad,
274 | direction="vertical",
275 | pad=pad,
276 | )
277 |
278 | # Save the gradients
279 | gradients[(q1_name, q2_name)] = all_grad_rev
280 | gradients[(q2_name, q1_name)] = all_grad
281 |
282 | # Rare case when there is 1 contour but this contour is not valid and
283 | # basically an artefact. In this case we treat this as nonoverlap.
284 | else:
285 | is_overlap_list[-1] = False
286 |
287 | # else:
288 | # final_image_edit, final_grad, overlapping_fragments = None, None, None
289 | # valid_cnt = False
290 | # return final_image_edit, final_grad, overlapping_fragments, valid_cnt
291 |
292 | # Sum all non overlapping parts
293 | all_nonoverlap = np.sum(list(nonoverlap.values()), axis=0).astype("uint8")
294 |
295 | # Sum all overlapping parts relative to their gradient
296 | if any(is_overlap_list):
297 | grad_fragments = [images[str(j[0])] for j in gradients.keys()]
298 | all_overlap = np.sum(
299 | [
300 | (g[:, :, np.newaxis] * gq).astype("uint8")
301 | for g, gq in zip(gradients.values(), grad_fragments)
302 | ],
303 | axis=0,
304 | )
305 | else:
306 | all_overlap = np.zeros_like(all_nonoverlap)
307 |
308 | # Combine both parts and get copy for postprocessing
309 | final_image = all_nonoverlap + all_overlap
310 | final_image_edit = copy.deepcopy(final_image)
311 |
312 | # Clipping may be necessary for areas where more than 2 fragments overlap
313 | final_image_edit = np.clip(final_image_edit, 0, 255).astype("uint8")
314 |
315 | # Set 0 values to nan for plotting in blend summary.
316 | if len(list(gradients.values())) > 0:
317 | final_grad = list(gradients.values())[0]
318 | final_grad = final_grad.astype("float32")
319 | final_grad[final_grad == 0] = np.nan
320 | valid_cnt = True
321 | overlapping_fragments = overlapping_fragments[0]
322 | else:
323 | final_grad = np.full(final_image_edit.shape, np.nan)
324 | valid_cnt = False
325 |
326 | return final_image_edit, final_grad, overlapping_fragments, valid_cnt
327 |
--------------------------------------------------------------------------------
/src/pythostitcher_utils/fuse_images_lowres.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import cv2
3 | import copy
4 | import itertools
5 |
6 |
7 | def fuse_images_lowres(images, parameters):
8 | """
9 | Custom function to merge overlapping fragments into a visually appealing combined
10 | image using alpha blending. This is accomplished by the following steps:
11 | 1. Find areas of overlap
12 | 2. Compute bounding box around overlap to estimate its direction
13 | 3. Apply gradient field to region of overlap
14 | 4. Use alpha blending to blend the region of overlap
15 | """
16 |
17 | # Simple threshold to get tissue masks
18 | masks = [(np.mean(im, axis=-1) > 0) * 1 for im in images]
19 |
20 | # Get plausible overlapping fragments
21 | fragment_names = parameters["fragment_names"]
22 | combinations = itertools.combinations(fragment_names, 2)
23 | # combinations = ["AB", "AC", "BD", "CD"]
24 |
25 | # Create some lists for iterating
26 | image_list = copy.deepcopy(images)
27 | mask_list = copy.deepcopy(masks)
28 | total_mask = np.sum(mask_list, axis=0)
29 | all_contours = []
30 |
31 | # Create some dicts and lists
32 | images = dict()
33 | masks = dict()
34 | overlaps = dict()
35 | nonoverlap = dict()
36 | gradients = []
37 | gradient_directions = []
38 | bounding_boxes = []
39 |
40 | # Make dict such that images are callable by letter later on
41 | for name, im, mask in zip(fragment_names, image_list, mask_list):
42 | images[name] = im
43 | masks[name] = mask
44 |
45 | # Postprocessing value
46 | patch_size_mean = 15
47 |
48 | # Loop over all combinations where overlap might occur (2 quadrants only)
49 | for combi in combinations:
50 |
51 | # Get quadrant names
52 | q1_name = combi[0]
53 | q2_name = combi[1]
54 |
55 | # Check if overlap is between horizontally or vertically aligned quadrants
56 | hor_combo = [["ul", "ur"], ["ll", "lr"], ["left", "right"]]
57 | is_horizontal = any([sorted([q1_name, q2_name]) == i for i in hor_combo])
58 | ver_combo = [["ll", "ul"], ["lr", "ur"], ["bottom", "top"]]
59 | is_vertical = any([sorted([q1_name, q2_name]) == i for i in ver_combo])
60 |
61 | # Get quadrant images and masks
62 | q1_image = images[q1_name]
63 | q2_image = images[q2_name]
64 | q1_mask = masks[q1_name]
65 | q2_mask = masks[q2_name]
66 |
67 | # Compute non overlapping part of quadrants
68 | only_q1 = q1_image * (total_mask == 1)[:, :, np.newaxis]
69 | only_q2 = q2_image * (total_mask == 1)[:, :, np.newaxis]
70 | nonoverlap[q1_name] = only_q1
71 | nonoverlap[q2_name] = only_q2
72 |
73 | # Compute overlapping part of quadrants
74 | overlap = ((q1_mask + q2_mask) == 2) * 1
75 | overlaps[combi] = overlap
76 |
77 | # Compute bbox around overlap
78 | contours, _ = cv2.findContours(
79 | (overlap * 255).astype("uint8"), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE
80 | )
81 |
82 | # Check if overlap exists.
83 | is_overlap = len(contours) > 0
84 | if is_overlap and any([is_horizontal, is_vertical]):
85 |
86 | # Loop over all contours
87 | for cnt in contours:
88 |
89 | # Can only compute bounding box around contour when the contour is longer
90 | # than one single 2D point
91 | if len(cnt) > 2:
92 | all_contours.append(cnt)
93 | bbox = cv2.minAreaRect(cnt)
94 | bounding_boxes.append(bbox)
95 |
96 | # Extract bbox params
97 | bbox_center = bbox[0]
98 | angle = copy.deepcopy(bbox[2])
99 |
100 | # OpenCV defines angles in the cv2.minAreaRect function between
101 | # [0, 90] but instead of a rotation of 0-90 degrees we can also
102 | # rescale it to [0, 45] and swap width/height.
103 | if angle > 45:
104 | angle = 90 - angle
105 |
106 | # Prepopulate gradient field
107 | gradient_2d = np.zeros_like(q1_mask).astype("float")
108 |
109 | # If quadrants overlap horizontally
110 | if is_horizontal:
111 |
112 | # See comment in line 105. With angles closer to 90 than to 0 we
113 | # swap
114 | # width/height.
115 | if bbox[2] < 45:
116 | width = int(bbox[1][0])
117 | height = int(bbox[1][1])
118 | else:
119 | width = int(bbox[1][1])
120 | height = int(bbox[1][0])
121 |
122 | # Get slicing locations
123 | xmin = int(bbox_center[0] - 0.5 * width)
124 | xmax = xmin + width
125 | ymin = int(bbox_center[1] - 0.5 * height)
126 | ymax = ymin + height
127 |
128 | # Create 2d gradient
129 | gradient_1d = np.linspace(1, 0, width)
130 | gradient_2d_fill = np.tile(gradient_1d, (height, 1))
131 |
132 | # Rotate the gradient along its primary axis
133 | gradient_2d[ymin:ymax, xmin:xmax] = gradient_2d_fill
134 | rot_mat = cv2.getRotationMatrix2D(center=bbox_center, angle=-angle, scale=1)
135 | gradient_2d_warp = cv2.warpAffine(
136 | gradient_2d, rot_mat, dsize=gradient_2d.shape[::-1]
137 | )
138 | masked_gradient = gradient_2d_warp * overlap
139 |
140 | # Compute the reverse gradient for scaling the other quadrant
141 | gradient_2d_rev = np.zeros_like(q1_mask).astype("float")
142 | gradient_2d_rev[ymin:ymax, xmin:xmax] = np.fliplr(gradient_2d_fill)
143 | rot_mat_rev = cv2.getRotationMatrix2D(
144 | center=bbox_center, angle=-angle, scale=1
145 | )
146 | gradient_2d_warp_rev = cv2.warpAffine(
147 | gradient_2d_rev, rot_mat_rev, dsize=gradient_2d.shape[::-1]
148 | )
149 | masked_gradient_rev = gradient_2d_warp_rev * overlap
150 |
151 | # If quadrants overlap vertically
152 | elif is_vertical:
153 |
154 | # See comment in line 105. With angles closer to 90 than to 0
155 | # we swap width/height.
156 | if bbox[2] < 45:
157 | width = int(bbox[1][0])
158 | height = int(bbox[1][1])
159 | else:
160 | width = int(bbox[1][1])
161 | height = int(bbox[1][0])
162 |
163 | # Get slicing locations
164 | xmin = int(bbox_center[0] - 0.5 * width)
165 | xmax = xmin + width
166 | ymin = int(bbox_center[1] - 0.5 * height)
167 | ymax = ymin + height
168 |
169 | # Create 2d gradient
170 | gradient_1d = np.linspace(1, 0, height)
171 | gradient_2d_fill = np.tile(gradient_1d, (width, 1))
172 | gradient_2d_fill = np.transpose(gradient_2d_fill)
173 |
174 | # Rotate the gradient along its primary axis
175 | gradient_2d[ymin:ymax, xmin:xmax] = gradient_2d_fill
176 | rot_mat = cv2.getRotationMatrix2D(center=bbox_center, angle=-angle, scale=1)
177 | gradient_2d_warp = cv2.warpAffine(
178 | gradient_2d, rot_mat, dsize=gradient_2d.shape[::-1]
179 | )
180 | masked_gradient = gradient_2d_warp * overlap
181 |
182 | # Compute the reverse gradient for scaling the other quadrant
183 | gradient_2d_rev = np.zeros_like(q1_mask).astype("float")
184 | gradient_2d_rev[ymin:ymax, xmin:xmax] = np.flipud(gradient_2d_fill)
185 | rot_mat_rev = cv2.getRotationMatrix2D(
186 | center=bbox_center, angle=-angle, scale=1
187 | )
188 | gradient_2d_warp_rev = cv2.warpAffine(
189 | gradient_2d_rev, rot_mat_rev, dsize=gradient_2d_rev.shape[::-1],
190 | )
191 | masked_gradient_rev = gradient_2d_warp_rev * overlap
192 |
193 | # Save gradient and its direction for later use
194 | gradients.append(masked_gradient)
195 | gradients.append(masked_gradient_rev)
196 |
197 | gradient_directions.append(combi)
198 | gradient_directions.append(combi[::-1])
199 |
200 | # Sum all non overlapping parts
201 | all_nonoverlap = np.sum(list(nonoverlap.values()), axis=0).astype("uint8")
202 |
203 | # Sum all overlapping parts relative to their gradient
204 | if is_overlap:
205 |
206 | gradient_quadrants = [images[str(j[0])] for j in gradient_directions]
207 | all_overlap = np.sum(
208 | [
209 | (g[:, :, np.newaxis] * gq).astype("uint8")
210 | for g, gq in zip(gradients, gradient_quadrants)
211 | ],
212 | axis=0,
213 | )
214 |
215 | else:
216 | all_overlap = np.zeros_like(all_nonoverlap)
217 |
218 | # Combine both parts
219 | final_image = all_nonoverlap + all_overlap
220 |
221 | # Postprocess the final image to reduce stitching artefacts.
222 | final_image_edit = copy.deepcopy(final_image)
223 |
224 | # Loop over all contours
225 | if is_overlap:
226 |
227 | # Indices for getting patches
228 | p1, p2 = int(np.floor(patch_size_mean / 2)), int(np.ceil(patch_size_mean / 2))
229 |
230 | for cnt in all_contours:
231 |
232 | # Loop over all points in each contour
233 | for pt in cnt:
234 | # Replace each pixel value with the average value in a NxN neighbourhood
235 | # to reduce stitching artefacts
236 | x, y = np.squeeze(pt)[1], np.squeeze(pt)[0]
237 | patch = final_image[x - p1 : x + p2, y - p1 : y + p2, :]
238 | fill_val = np.mean(np.mean(patch, axis=0), axis=0)
239 | final_image_edit[x, y, :] = fill_val
240 |
241 | # Clipping may be necessary for areas where more than 2 quadrants overlap
242 | final_image_edit = np.clip(final_image_edit, 0, 255).astype("uint8")
243 |
244 | return final_image_edit
245 |
--------------------------------------------------------------------------------
/src/pythostitcher_utils/get_resname.py:
--------------------------------------------------------------------------------
1 | def get_resname(res):
2 | """
3 | Custom function to convert the resolution fraction to a string. This is required
4 | for creating directories for each resolution.
5 |
6 | Input:
7 | - Resolution
8 |
9 | Output:
10 | - Resolution name
11 | """
12 |
13 | assert res <= 1, "resolution fraction must be equal to or smaller than the original image"
14 |
15 | resname = "res" + str(int(res * 1000)).zfill(4)
16 |
17 | return resname
18 |
--------------------------------------------------------------------------------
/src/pythostitcher_utils/gradient_blending.py:
--------------------------------------------------------------------------------
1 | import multiresolutionimageinterface as mir
2 | import numpy as np
3 | import cv2
4 | import pyvips
5 | import matplotlib.pyplot as plt
6 | import time
7 |
8 | from scipy.spatial.distance import cdist
9 |
10 | from .fuse_images_highres import fuse_images_highres, is_valid_contour
11 |
12 |
13 | def perform_blending(result_image, result_mask, full_res_fragments, log, parameters):
14 | """
15 | Function to blend areas of overlap using alpha blending.
16 |
17 | Inputs
18 | - Full resolution image
19 | - Full resolution mask
20 | - All fragments
21 | - Logging instance
22 | - Dictionary with parameters
23 |
24 | Output
25 | - Full resolution blended image
26 | """
27 |
28 | # Load .tif of the mask
29 | opener = mir.MultiResolutionImageReader()
30 | tif_mask = opener.open(parameters["tif_mask_path"])
31 |
32 | # Get output level closest to a 4k image
33 | best_mask_output_dims = 4000
34 | all_mask_dims = [tif_mask.getLevelDimensions(i) for i in range(tif_mask.getNumberOfLevels())]
35 | mask_ds_level = np.argmin([(i[0] - best_mask_output_dims) ** 2 for i in all_mask_dims])
36 |
37 | mask_ds = tif_mask.getUCharPatch(
38 | startX=0,
39 | startY=0,
40 | width=int(all_mask_dims[mask_ds_level][0]),
41 | height=int(all_mask_dims[mask_ds_level][1]),
42 | level=int(mask_ds_level),
43 | )
44 |
45 | # Get contour of overlapping areas and upsample to full resolution coords
46 | mask_cnts_ds, _ = cv2.findContours(
47 | (mask_ds == 2).astype("uint8"), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE
48 | )
49 | mask_cnts = [np.squeeze(i * (2 ** mask_ds_level)) for i in mask_cnts_ds]
50 | mask_cnts = [i for i in mask_cnts if is_valid_contour(i)]
51 |
52 | # Param for saving blending result
53 | n_valid = 0
54 |
55 | # Find blending points per contour
56 | for c, mask_cnt in enumerate(mask_cnts):
57 |
58 | log.log(45, f" - area {c + 1}/{len(mask_cnts)}")
59 |
60 | # Get contour orientation and some starting variables
61 | cnt_dir = "hor" if np.std(mask_cnt[:, 0]) > np.std(mask_cnt[:, 1]) else "ver"
62 | long_end = 500
63 | max_image_width = all_mask_dims[0][0]
64 | max_image_height = all_mask_dims[0][1]
65 |
66 | # Get the length of the tile that needs to be stitched. Long end refers to the
67 | # direction of the contour while short end refers to the direction of the stitch.
68 | if cnt_dir == "hor":
69 |
70 | # Get limits of major axis which is horizontal
71 | long_end_start = np.min(mask_cnt[:, 0])
72 | long_end_end = np.max(mask_cnt[:, 0])
73 |
74 | # Get X coordinates spaced 500 pixels apart
75 | n_points = int(np.ceil((long_end_end - long_end_start) / long_end)) + 1
76 | cnt_points_x = list(np.linspace(long_end_start, long_end_end, n_points))
77 | cnt_points_x = list(map(int, cnt_points_x))
78 | cnt_points_x = np.array(
79 | [np.max([0, long_end_start - 50])]
80 | + cnt_points_x
81 | + [np.min([long_end_end + 50, max_image_width])]
82 | )
83 |
84 | # Draw a line along long axis and sample x coordinates
85 | short_end_start = mask_cnt[np.argmin(mask_cnt[:, 0]), 1]
86 | short_end_end = mask_cnt[np.argmax(mask_cnt[:, 0]), 1]
87 | short_end_len = np.max([
88 | int((np.max(mask_cnt[:, 1]) - np.min(mask_cnt[:, 1])) * 2), 1000
89 | ])
90 | cnt_points_y = np.linspace(short_end_start, short_end_end, len(cnt_points_x))
91 | cnt_points_y = cnt_points_y.astype("int")
92 |
93 | else:
94 |
95 | # Get limits of major axis which is vertical
96 | long_end_start = np.min(mask_cnt[:, 1])
97 | long_end_end = np.max(mask_cnt[:, 1])
98 |
99 | # Get Y coordinates spaced 500 pixels apart
100 | n_points = int(np.ceil((long_end_end - long_end_start) / long_end)) + 1
101 | cnt_points_y = list(np.linspace(long_end_start, long_end_end, n_points))
102 | cnt_points_y = list(map(int, cnt_points_y))
103 | cnt_points_y = np.array(
104 | [np.max([0, long_end_start - 50])]
105 | + cnt_points_y
106 | + [np.min([long_end_end + 50, max_image_height])]
107 | )
108 |
109 | # Draw a line along long axis and sample x coordinates
110 | short_end_start = mask_cnt[np.argmin(mask_cnt[:, 1]), 0]
111 | short_end_end = mask_cnt[np.argmax(mask_cnt[:, 1]), 0]
112 | short_end_len = np.max([
113 | int((np.max(mask_cnt[:, 0]) - np.min(mask_cnt[:, 0])) * 2), 1000
114 | ])
115 | cnt_points_x = np.linspace(short_end_start, short_end_end, len(cnt_points_y))
116 | cnt_points_x = cnt_points_x.astype("int")
117 |
118 | seed_points = np.vstack([cnt_points_x, cnt_points_y]).T
119 |
120 | # Blend per seed point
121 | for seed in seed_points:
122 |
123 | # Get tilesize and take into account not to cross image size borders
124 | if cnt_dir == "hor":
125 | xstart = np.max([0, seed[0]])
126 | ystart = np.max([0, seed[1] - int(0.5 * (short_end_len))])
127 | width = np.min([long_end, max_image_width - seed[0] - 1])
128 | height = np.min([short_end_len, max_image_height - seed[1] - 1])
129 |
130 | elif cnt_dir == "ver":
131 | xstart = np.max([0, seed[0] - int(0.5 * (short_end_len))])
132 | ystart = np.max([0, seed[1]])
133 | width = np.min([short_end_len, max_image_width - seed[0] - 1])
134 | height = np.min([long_end, max_image_height - seed[1] - 1])
135 |
136 | ### SANITY CHECK FOR TILE SELECTION
137 | """
138 | scale_factor = 2**mask_ds_level
139 | xvals = [
140 | xstart/scale_factor,
141 | (xstart+width)/scale_factor,
142 | (xstart+width)/scale_factor,
143 | xstart / scale_factor,
144 | xstart / scale_factor
145 | ]
146 | yvals = [
147 | ystart/scale_factor,
148 | ystart / scale_factor,
149 | (ystart + height) / scale_factor,
150 | (ystart + height) / scale_factor,
151 | ystart / scale_factor,
152 | ]
153 | plt.figure()
154 | plt.imshow(mask_ds)
155 | plt.plot(xvals, yvals, c="r")
156 | plt.show()
157 | """
158 |
159 | # Only perform bending in case of overlap
160 | tile_mask = result_mask.crop(xstart, ystart, width, height)
161 |
162 | if tile_mask.max() > 1:
163 |
164 | # Extract the corresponding image and mask for all fragments
165 | images = dict()
166 | masks = dict()
167 | for f in full_res_fragments:
168 | image_patch = f.final_image.crop(xstart, ystart, width, height)
169 | image = np.ndarray(
170 | buffer=image_patch.write_to_memory(),
171 | dtype=np.uint8,
172 | shape=[height, width, image_patch.bands],
173 | )
174 |
175 | mask_patch = f.outputres_mask.crop(xstart, ystart, width, height)
176 | mask = np.ndarray(
177 | buffer=mask_patch.write_to_memory(),
178 | dtype=np.uint8,
179 | shape=[height, width],
180 | )
181 |
182 | images[f.orientation] = image
183 | masks[f.orientation] = mask
184 |
185 | # Perform the actual blending
186 | blend, grad, overlap_fragments, valid = fuse_images_highres(images, masks)
187 |
188 | if valid:
189 |
190 | # Get overlap contours for plotting
191 | overlap = (~np.isnan(grad) * 255).astype("uint8")
192 | overlap_cnts, _ = cv2.findContours(
193 | overlap, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE,
194 | )
195 |
196 | # Show and save blended result if desired
197 | plt.figure(figsize=(12, 10))
198 | plt.suptitle(f"Result at x {xstart} and y {ystart}", fontsize=24)
199 | plt.subplot(231)
200 | plt.title(f"Mask fragment '{overlap_fragments[0]}'", fontsize=20)
201 | plt.imshow(masks[overlap_fragments[0]], cmap="gray")
202 | plt.axis("off")
203 | plt.clim([0, 1])
204 | plt.subplot(232)
205 | plt.title(f"Mask fragment '{overlap_fragments[1]}'", fontsize=20)
206 | plt.imshow(masks[overlap_fragments[1]], cmap="gray")
207 | plt.axis("off")
208 | plt.clim([0, 1])
209 | plt.subplot(233)
210 | plt.title("Mask overlap + gradient", fontsize=20)
211 | plt.imshow(
212 | (masks[overlap_fragments[0]] + masks[overlap_fragments[1]]) == 2,
213 | cmap="gray",
214 | )
215 | plt.imshow(grad, cmap="jet", alpha=0.5)
216 | plt.axis("off")
217 | plt.colorbar(fraction=0.046, pad=0.04)
218 | plt.subplot(234)
219 | plt.title(f"Image fragment '{overlap_fragments[0]}'", fontsize=20)
220 | plt.imshow(images[overlap_fragments[0]])
221 | for cnt in overlap_cnts:
222 | cnt = np.squeeze(cnt)
223 | if len(cnt.shape) > 1:
224 | plt.plot(cnt[:, 0], cnt[:, 1], c="r", linewidth=3)
225 | plt.axis("off")
226 | plt.subplot(235)
227 | plt.title(f"Image fragment '{overlap_fragments[1]}'", fontsize=20)
228 | plt.imshow(images[overlap_fragments[1]])
229 | for cnt in overlap_cnts:
230 | cnt = np.squeeze(cnt)
231 | if len(cnt.shape) > 1:
232 | plt.plot(cnt[:, 0], cnt[:, 1], c="r", linewidth=3)
233 | plt.axis("off")
234 | plt.subplot(236)
235 | plt.title("Blend image", fontsize=20)
236 | plt.imshow(blend)
237 | for cnt in overlap_cnts:
238 | cnt = np.squeeze(cnt)
239 | if len(cnt.shape) > 1:
240 | plt.plot(cnt[:, 0], cnt[:, 1], c="r", linewidth=3)
241 | plt.axis("off")
242 | plt.tight_layout()
243 | plt.savefig(
244 | f"{parameters['blend_dir']}/contour{str(c).zfill(3)}_tile{str(n_valid).zfill(4)}.png"
245 | )
246 | plt.close()
247 |
248 | ### PAPER FIGURE ###
249 | """
250 | plt.figure(figsize=(12, 6))
251 | plt.title("A", fontsize=40)
252 | plt.imshow(images[overlap_fragments[0]])
253 | plt.axis("off")
254 | plt.show()
255 |
256 | plt.figure(figsize=(12, 6))
257 | plt.title("B", fontsize=40)
258 | plt.imshow(images[overlap_fragments[1]])
259 | plt.axis("off")
260 | plt.show()
261 |
262 | plt.figure(figsize=(12, 6))
263 | plt.title("C", fontsize=40)
264 | plt.imshow(
265 | (masks[overlap_fragments[0]] + masks[overlap_fragments[1]]) == 2,
266 | cmap="gray",
267 | )
268 | plt.imshow(grad, cmap="jet", alpha=0.5)
269 | plt.axis("off")
270 | plt.colorbar(fraction=0.046, pad=0.04)
271 | plt.show()
272 |
273 |
274 | overlap = (masks[overlap_fragments[0]] + masks[overlap_fragments[1]]) == 2
275 | simple_add = (~overlap[:, :, np.newaxis] * images[overlap_fragments[0]]) + \
276 | (~overlap[:, :, np.newaxis] * images[overlap_fragments[1]]) + \
277 | (overlap[:, :, np.newaxis] * images[overlap_fragments[0]] * 0.5) + \
278 | (overlap[:, :, np.newaxis] * images[overlap_fragments[1]] * 0.5)
279 | simple_add = simple_add.astype("uint8")
280 | plt.figure(figsize=(12, 6))
281 | plt.title("D", fontsize=40)
282 | plt.imshow(simple_add[:, 850:1400, :])
283 | cnt = np.squeeze(overlap_cnts[0])
284 | # plt.plot(cnt[:, 0]-850, cnt[:, 1], c="r", linewidth=2)
285 | plt.axis("off")
286 | plt.show()
287 |
288 | plt.figure(figsize=(12, 6))
289 | plt.title("E", fontsize=40)
290 | plt.imshow(blend[:, 850:1400, :])
291 | cnt = np.squeeze(overlap_cnts[0])
292 | # plt.plot(cnt[:, 0]-850, cnt[:, 1], c="r", linewidth=1)
293 | plt.axis("off")
294 | plt.show()
295 | """
296 | ### \\\ PAPER FIGURE ###
297 |
298 | n_valid += 1
299 |
300 | # Fetch overlapping part from blended image and insert in image
301 | row, col = np.nonzero(overlap)
302 | w_start, w_end = np.min(col), np.max(col)
303 | h_start, h_end = np.min(row), np.max(row)
304 | blend_crop = blend[h_start:h_end, w_start:w_end]
305 |
306 | h, w = blend_crop.shape[:2]
307 | bands = 3
308 | dformat = "uchar"
309 | blend_image = pyvips.Image.new_from_memory(blend_crop.ravel(), w, h, bands, dformat)
310 |
311 | result_image = result_image.insert(blend_image, xstart+w_start, ystart+h_start)
312 |
313 | else:
314 | continue
315 |
316 | # Get the correct orientation of the prostate
317 | result_image = correct_orientation(
318 | mask = mask_ds,
319 | result_image = result_image,
320 | parameters = parameters,
321 | debug_visualization = False
322 | )
323 |
324 | return result_image
325 |
326 |
327 | def correct_orientation(mask, result_image, parameters, debug_visualization):
328 | """
329 | Function to automatically get the correct orientation of the prostate. We operate
330 | under the assumption that the dorsal side of the prostate is always slightly less
331 | curved than the ventral side. This means that a bounding box should fit tighter
332 | against the dorsal side. By the location of the dorsal side, we can then apply
333 | a rotation such that the dorsal side is always aligned with the x-axis in the image.
334 | """
335 |
336 | # Compute bounding box around the whole mount
337 | cnt, _ = cv2.findContours(
338 | np.squeeze(mask).astype("uint8"),
339 | cv2.RETR_CCOMP,
340 | cv2.CHAIN_APPROX_NONE
341 | )
342 | cnt = np.squeeze(max(cnt, key=cv2.contourArea))
343 | bbox = cv2.minAreaRect(cnt)
344 | bbox_points = cv2.boxPoints(bbox)
345 |
346 | # Compute min distance from bbox corners to contour.
347 | box2cnt_dist = cdist(bbox_points, cnt).min(axis=1)
348 | box2cnt_idx = np.argsort(box2cnt_dist)[:2]
349 | valid_pairs = [[0, 1], [1, 2], [2, 3], [0, 3]]
350 |
351 | # Corners must be adjacent
352 | if sorted(box2cnt_idx) in valid_pairs:
353 |
354 | # Variables to determine orientation of prostate
355 | x_coords = [bbox_points[box2cnt_idx[0], 0], bbox_points[box2cnt_idx[1], 0]]
356 | y_coords = [bbox_points[box2cnt_idx[0], 1], bbox_points[box2cnt_idx[1], 1]]
357 | x_center = int(mask.shape[1] / 2)
358 | y_center = int(mask.shape[0] / 2)
359 |
360 | # Upper
361 | if all([np.std(x_coords) > np.std(y_coords), np.mean(y_coords) < y_center]):
362 | extra_rot = 180
363 | # Lower
364 | elif all([np.std(x_coords) > np.std(y_coords), np.mean(y_coords) > y_center]):
365 | extra_rot = 0
366 | # Left
367 | elif all([np.std(x_coords) < np.std(y_coords), np.mean(x_coords) < x_center]):
368 | extra_rot = 90
369 | # Right
370 | elif all([np.std(x_coords) < np.std(y_coords), np.mean(x_coords) > x_center]):
371 | extra_rot = 270
372 | else:
373 | extra_rot = 0
374 |
375 | # Also incorporate minor rotation from bbox. Adjust angle due to opencv convention
376 | angle = bbox[2]
377 | if angle > 45:
378 | angle = 90 - angle
379 |
380 | angle = extra_rot + angle
381 |
382 | # Also change some affine tform variables when we need to flip hor/ver axes
383 | if extra_rot in [90, 270]:
384 | new_width = result_image.height
385 | new_height = result_image.width
386 | dx = int((new_width - new_height) / 2)
387 | dy = int((new_height - new_width) / 2)
388 | else:
389 | new_width = result_image.width
390 | new_height = result_image.height
391 | dx, dy = 0, 0
392 |
393 | # Rotate image
394 | rotmat = cv2.getRotationMatrix2D(
395 | center=(int(result_image.width / 2), int(result_image.height / 2)), angle=angle, scale=1
396 | )
397 | result_image = result_image.affine(
398 | (rotmat[0, 0], rotmat[0, 1], rotmat[1, 0], rotmat[1, 1]),
399 | interpolate=pyvips.Interpolate.new("nearest"),
400 | oarea=[0, 0, new_width, new_height],
401 | odx=rotmat[0, 2] + dx,
402 | ody=rotmat[1, 2] + dy
403 | )
404 |
405 | if debug_visualization:
406 |
407 | # Temp write to disk for later loading in debug visualization
408 | result_image.write_to_file(
409 | str(
410 | parameters["sol_save_dir"].joinpath(
411 | "highres", f"stitched_image_{parameters['output_res']}_micron.tif"
412 | )
413 | ),
414 | tile=True,
415 | compression="jpeg",
416 | bigtiff=True,
417 | pyramid=True,
418 | Q=80,
419 | )
420 |
421 | opener = mir.MultiResolutionImageReader()
422 | result_image_tif = opener.open(str(
423 | parameters["sol_save_dir"].joinpath(
424 | "highres", f"stitched_image_{parameters['output_res']}_micron.tif"
425 | )))
426 |
427 | # Get lowres version
428 | level = 2
429 | downsample = result_image_tif.getLevelDownsample(level)
430 | result_image_tif_lowres = result_image_tif.getUCharPatch(
431 | 0,
432 | 0,
433 | *result_image_tif.getLevelDimensions(level),
434 | level
435 | )
436 |
437 | plt.figure()
438 | plt.imshow(result_image_tif_lowres)
439 |
440 | # Also apply to landmarks
441 | for i in range(parameters["n_fragments"]):
442 | coords = np.load(
443 | str(parameters["sol_save_dir"].joinpath(
444 | "highres", "eval", f"fragment{i + 1}_coordinates.npy")),
445 | allow_pickle=True
446 | ).item()
447 |
448 | line_a = coords["a"]
449 | ones_a = np.ones((len(line_a), 1))
450 | line_a = np.hstack([line_a, ones_a]) @ rotmat.T
451 | line_a[:, 0] += dx
452 | line_a[:, 1] += dy
453 |
454 | line_b = coords["b"]
455 | ones_b = np.ones((len(line_b), 1))
456 | line_b = np.hstack([line_b, ones_b]) @ rotmat.T
457 | line_b[:, 0] += dx
458 | line_b[:, 1] += dy
459 |
460 | rot_coords = {"a": line_a, "b": line_b}
461 | np.save(
462 | str(parameters["sol_save_dir"].joinpath(
463 | "highres", "eval", f"fragment{i + 1}_coordinates.npy")),
464 | rot_coords
465 | )
466 |
467 | line_a = line_a / downsample
468 | line_b = line_b / downsample
469 | plt.scatter(line_a[:, 0], line_a[:, 1], c="r")
470 | plt.scatter(line_b[:, 0], line_b[:, 1], c="r")
471 | plt.show()
472 |
473 | else:
474 | # Just apply rotation to landmarks
475 | for i in range(parameters["n_fragments"]):
476 | coords = np.load(
477 | str(parameters["sol_save_dir"].joinpath(
478 | "highres", "eval", f"fragment{i + 1}_coordinates.npy")),
479 | allow_pickle=True
480 | ).item()
481 |
482 | line_a = coords["a"]
483 | ones_a = np.ones((len(line_a), 1))
484 | line_a = np.hstack([line_a, ones_a]) @ rotmat.T
485 | line_a[:, 0] += dx
486 | line_a[:, 1] += dy
487 |
488 | line_b = coords["b"]
489 | ones_b = np.ones((len(line_b), 1))
490 | line_b = np.hstack([line_b, ones_b]) @ rotmat.T
491 | line_b[:, 0] += dx
492 | line_b[:, 1] += dy
493 |
494 | rot_coords = {"a": line_a, "b": line_b}
495 | np.save(
496 | str(parameters["sol_save_dir"].joinpath(
497 | "highres", "eval", f"fragment{i + 1}_coordinates.npy")),
498 | rot_coords
499 | )
500 |
501 | return result_image
502 |
--------------------------------------------------------------------------------
/src/pythostitcher_utils/landmark_evaluation.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import matplotlib.pyplot as plt
3 | import multiresolutionimageinterface as mir
4 | import pandas as pd
5 |
6 | from scipy.spatial.distance import cdist
7 |
8 |
9 | class LandmarkEvaluator:
10 |
11 | def __init__(self, parameters):
12 |
13 | self.n_fragments = parameters["n_fragments"]
14 | self.image_dir = parameters["sol_save_dir"].joinpath("highres")
15 | self.coord_dir = parameters["sol_save_dir"].joinpath("highres", "eval")
16 | self.output_res = parameters["output_res"]
17 |
18 | return
19 |
20 | def get_pairs(self):
21 | """
22 | Method to get all unique line pairs. We compute these based on the location
23 | of the center points.
24 | """
25 |
26 | # Get all lines
27 | self.all_lines = []
28 | for i in range(self.n_fragments):
29 | coords = np.load(
30 | str(self.coord_dir.joinpath(f"fragment{i + 1}_coordinates.npy")),
31 | allow_pickle=True
32 | ).item()
33 |
34 | self.all_lines.append(coords["a"])
35 |
36 | if self.n_fragments == 4:
37 | self.all_lines.append(coords["b"])
38 |
39 | # Match lines based on center. This is more robust against cases with varying number
40 | # of fragments
41 | self.center_points = np.array([np.mean(i, axis=0) for i in self.all_lines])
42 |
43 | # Get all unique line combinations
44 | self.all_pairs = []
45 | for line1_idx, cp in enumerate(self.center_points):
46 |
47 | # Get closest line based on center point
48 | distances = np.squeeze(cdist(cp[np.newaxis, :], self.center_points))
49 | line2_idx = np.argsort(distances)[1]
50 |
51 | pair = sorted([line1_idx, line2_idx])
52 | if not pair in self.all_pairs:
53 | self.all_pairs.append(pair)
54 |
55 | # Sanity check
56 | assert len(np.unique(self.all_pairs)) == len(self.all_lines), \
57 | f"could not find {int(len(self.all_lines) / 2)} unique line pairs"
58 |
59 | return
60 |
61 | def get_distances(self):
62 | """
63 | Method to compute the average distance between stitch lines.
64 | """
65 |
66 | self.all_line_distances = dict()
67 |
68 | for count, (line1_idx, line2_idx) in enumerate(self.all_pairs, 1):
69 |
70 | # Get lines and sort them so we match the right coordinates of both lines
71 | line1 = self.all_lines[line1_idx]
72 | is_hor = True if np.std(line1[:, 0]) > np.std(line1[:, 1]) else False
73 | line1 = sorted(line1, key=lambda x: x[0]) if is_hor else sorted(line1,
74 | key=lambda x: x[1])
75 | line1 = np.array(line1)
76 |
77 | line2 = self.all_lines[line2_idx]
78 | is_hor = True if np.std(line2[:, 0]) > np.std(line2[:, 1]) else False
79 | line2 = sorted(line2, key=lambda x: x[0]) if is_hor else sorted(line2,
80 | key=lambda x: x[1])
81 | line2 = np.array(line2)
82 |
83 | # Compute distances
84 | distances = [np.float(cdist(line1[i][np.newaxis, :], line2[i][np.newaxis, :])) \
85 | for i in range(len(line1))]
86 |
87 | self.all_line_distances[f"stitch_line_{count}"] = distances
88 |
89 | return
90 |
91 | def scale_distances(self):
92 | """
93 | Method to scale the distances computed previously with the spacing.
94 | """
95 |
96 | # Load stitched image
97 | self.opener = mir.MultiResolutionImageReader()
98 | self.image_path = str(
99 | self.image_dir.joinpath(f"stitched_image_{self.output_res}_micron.tif")
100 | )
101 | self.image = self.opener.open(str(self.image_path))
102 |
103 | # Get scale factor from pixel spacing
104 | self.spacing = self.image.getSpacing()[0]
105 |
106 | # Scale keys by spacing so we get the distance in micron
107 | for key in self.all_line_distances.keys():
108 | self.all_line_distances[key] = [i*self.spacing for i in self.all_line_distances[key]]
109 |
110 | return
111 |
112 | def save_results(self):
113 | """
114 | Method to save the results from the residual registration error computation
115 | """
116 |
117 | # Save all distances between points and reference corresponding line
118 | self.df = pd.DataFrame()
119 | fragment_names = []
120 | fragment_values = []
121 | for key, value in self.all_line_distances.items():
122 | names = [key] * len(value)
123 | fragment_names.extend(names)
124 | fragment_values.extend(value)
125 |
126 | self.df["stitch_line"] = fragment_names
127 | self.df["dist_in_micron"] = fragment_values
128 |
129 | self.df.to_csv(str(self.coord_dir.joinpath("residual_error.csv")))
130 |
131 | return
132 |
133 | def sanity_check(self):
134 | """
135 | Method to perform a sanity check and plot all the lines on the image.
136 | """
137 |
138 | # Get image closest to 2000 pixels
139 | best_image_output_dims = 2000
140 | all_image_dims = [
141 | self.image.getLevelDimensions(i) for i in range(self.image.getNumberOfLevels())
142 | ]
143 | sanity_level = np.argmin([(i[0] - best_image_output_dims) ** 2 for i in all_image_dims])
144 | sanity_downsampling = self.image.getLevelDownsample(int(sanity_level))
145 |
146 | image_ds = self.image.getUCharPatch(
147 | startX=0,
148 | startY=0,
149 | width=int(all_image_dims[sanity_level][0]),
150 | height=int(all_image_dims[sanity_level][1]),
151 | level=int(sanity_level),
152 | )
153 |
154 | # Scale distance to pixels for easier interpretation
155 | sanity_all_dist = self.df["dist_in_micron"].tolist()
156 | sanity_all_dist = [i / (self.spacing * sanity_downsampling) for i in sanity_all_dist]
157 | sanity_avg_dist = np.mean(sanity_all_dist)
158 |
159 | # Plot image and all lines
160 | plt.figure()
161 | plt.title(f"avg dist: {sanity_avg_dist:.2f} pixels")
162 | plt.imshow(image_ds)
163 |
164 | for idx1, idx2 in self.all_pairs:
165 | line1 = self.all_lines[idx1] / sanity_downsampling
166 | line2 = self.all_lines[idx2] / sanity_downsampling
167 | plt.scatter(line1[:, 0], line1[:, 1], c="cornflowerblue")
168 | plt.scatter(line2[:, 0], line2[:, 1], c="darkblue")
169 |
170 | plt.savefig(str(self.coord_dir.joinpath("residual_error_figure.png")))
171 | plt.close()
172 |
173 | return
174 |
175 |
176 | def evaluate_landmarks(parameters):
177 | """
178 | This function computes the residual registration error for each of the stitch lines.
179 | """
180 |
181 | eval = LandmarkEvaluator(parameters)
182 | eval.get_pairs()
183 | eval.get_distances()
184 | eval.scale_distances()
185 | eval.save_results()
186 | eval.sanity_check()
187 |
188 | return
189 |
--------------------------------------------------------------------------------
/src/pythostitcher_utils/line_utils.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 |
4 | def apply_im_tform_to_coords(coords, fragment, rot_k):
5 | """
6 | Convenience function to apply a 90 degree image rotation to coordinates. You could
7 | of course do this through coordinate transform, but this is overly complex due to
8 | changing centers of rotation and image shifts. This function just converts the coords
9 | to a binary image, rotates the image and extracts the coords.
10 |
11 | NOTE: this function will also scale the coordinates to the same range as the
12 | final output image.
13 | """
14 |
15 | # Downscale for computational efficiency
16 | downscale = np.ceil(fragment.width/2000).astype("int")
17 |
18 | # Downscale coords for efficiency
19 | coords_ds = (coords / downscale).astype("int")
20 |
21 | # Clip coords to prevent out of bounds indexing due to rounding errors
22 | coords_image_dims = (int(fragment.width / downscale),
23 | int(fragment.height / downscale))
24 |
25 | coords_ds_x = coords_ds[:, 0]
26 | coords_ds_y = coords_ds[:, 1]
27 |
28 | # Convert coords to image. Use larger image to prevent out of bounds indexing.
29 | offset = int(0.3 * (coords_image_dims[0]))
30 | coords_image = np.zeros((coords_image_dims[0] + 2 * offset, coords_image_dims[1] + 2 * offset))
31 | coords_image[coords_ds_x + offset, coords_ds_y + offset] = 1
32 |
33 | # Sanity checks
34 | assert len(coords_ds_x) == len(coords_ds_y), \
35 | "mismatch in number of x/y coordinates, check input coordinates"
36 | assert len(coords_ds_x) == np.sum(coords_image).astype("int"), \
37 | "received duplicate coordinates, check input coordinates"
38 |
39 | # Rot image and extract coords
40 | coords_image = np.rot90(coords_image, rot_k, (0, 1))
41 | r, c = np.nonzero(coords_image)
42 | coords_image_rot = np.vstack([r - offset, c - offset]).T
43 | coords_image_rot = (coords_image_rot * downscale).astype("int")
44 |
45 | # Sort coords by x or y values depending on line direction
46 | if np.std(coords_image_rot[:, 0]) > np.std(coords_image_rot[:, 1]):
47 | coords_image_rot_sort = sorted(coords_image_rot, key=lambda x: x[0])
48 | coords_image_rot_sort = np.array(coords_image_rot_sort)
49 | else:
50 | coords_image_rot_sort = sorted(coords_image_rot, key=lambda x: x[1])
51 | coords_image_rot_sort = np.array(coords_image_rot_sort)
52 |
53 | return coords_image_rot_sort
54 |
--------------------------------------------------------------------------------
/src/pythostitcher_utils/map_tform_low_res.py:
--------------------------------------------------------------------------------
1 | import os
2 | import numpy as np
3 |
4 | from .get_resname import get_resname
5 |
6 |
7 | def map_tform_low_res(parameters):
8 | """
9 | Custom function to upsample the previously acquired tform matrix. This upsampling
10 | can be performed linearly since the different images from different resolutions
11 | were preprocessed to be the same ratio relative to the change in resolution.
12 |
13 | Input:
14 | - Dict with parameters
15 |
16 | Output:
17 | - Upsampled transformation matrix
18 | """
19 |
20 | # Calculate ratio between current resolution and previous resolution
21 | ratio = (
22 | parameters["resolutions"][parameters["iteration"]]
23 | / parameters["resolutions"][parameters["iteration"] - 1]
24 | )
25 |
26 | # Set some filepaths for loading and saving
27 | prev_resname = get_resname(parameters["resolutions"][parameters["iteration"] - 1])
28 | prev_filepath_final_tform = (
29 | f"{parameters['sol_save_dir']}/" f"tform/{prev_resname}_tform_final.npy"
30 | )
31 |
32 | current_resname = get_resname(parameters["resolutions"][parameters["iteration"]])
33 | current_filepath_initial_tform = (
34 | f"{parameters['sol_save_dir']}/" f"tform/{current_resname}_tform_initial.npy"
35 | )
36 | # Load genetic algorithm tform
37 | if os.path.isfile(prev_filepath_final_tform):
38 | initial_tform = np.load(prev_filepath_final_tform, allow_pickle=True).item()
39 | else:
40 | raise ValueError(f"No transformation found in {prev_filepath_final_tform}")
41 |
42 | new_initial_tform = dict()
43 |
44 | # Apply conversion ratio for all fragments. Each transformation matrix is organised
45 | # as follows:
46 | # [translation_x (int), translation_y (int), angle (float),
47 | # center to rotate around (tuple), output_shape (tuple)]
48 | for fragment in parameters["fragment_names"]:
49 |
50 | new_center = [int(np.round(ratio * t)) for t in initial_tform[fragment][3]]
51 | new_center = tuple(new_center)
52 | new_outshape = [int(np.round(ratio * t)) for t in initial_tform[fragment][4]]
53 | new_outshape = tuple(new_outshape)
54 |
55 | new_initial_tform[fragment] = [
56 | int(np.round(initial_tform[fragment][0] * ratio)),
57 | int(np.round(initial_tform[fragment][1] * ratio)),
58 | np.round(initial_tform[fragment][2], 1),
59 | new_center,
60 | new_outshape,
61 | ]
62 |
63 | # Although we technically don't need to save this initial tform for this resolution, this
64 | # can come in handy for comparing the results of the genetic algorithm for a given
65 | # resolution.
66 | np.save(current_filepath_initial_tform, new_initial_tform)
67 |
68 | return new_initial_tform
69 |
--------------------------------------------------------------------------------
/src/pythostitcher_utils/optimize_stitch.py:
--------------------------------------------------------------------------------
1 | import time
2 | import pickle
3 | import cv2
4 | import pandas as pd
5 |
6 | from .get_resname import get_resname
7 | from .genetic_algorithm import genetic_algorithm
8 | from .map_tform_low_res import map_tform_low_res
9 | from .plot_tools import *
10 | from .adjust_final_rotation import adjust_final_rotation
11 | from .transformations import warp_image
12 | from .fuse_images_lowres import fuse_images_lowres
13 |
14 |
15 | def optimize_stitch(parameters):
16 | """
17 | Function to optimize the stitching between fragments. This will consist of the
18 | following steps:
19 | 1. Compute the smallest bounding box around the fragment
20 | 2. Rotate the fragment as a first step towards alignment
21 | 3. Globally align the fragment such that they share the same coordinate system
22 | 4. Identify cornerpoints in the fragment and extract relevant edges
23 | 5. Compute a Theil-Sen line through the edges as a robust approximation of the edge
24 | 6. Use a genetic algorithm to "stitch" the fragments together
25 |
26 | Input:
27 | - Dictionary with parameters
28 | - Logging object
29 |
30 | Output:
31 | - Final stitched image
32 | """
33 |
34 | parameters["log"].log(
35 | parameters["my_level"],
36 | f"Optimizing stitch at resolution {parameters['resolutions'][parameters['iteration']]}",
37 | )
38 |
39 | current_res_name = get_resname(parameters["resolutions"][parameters["iteration"]])
40 | dirpath_tform = f"{parameters['sol_save_dir']}/tform"
41 |
42 | dir_fragments = (
43 | f"{parameters['sol_save_dir']}/images/{parameters['slice_idx']}/" f"{current_res_name}"
44 | )
45 |
46 | # Check if optimized tform already exists
47 | parameters["filepath_tform"] = f"{dirpath_tform}/{current_res_name}_tform_final.npy"
48 | file_exists = os.path.isfile(parameters["filepath_tform"])
49 |
50 | # Start optimizing stitch
51 | if not file_exists:
52 |
53 | start_time = time.time()
54 | fragment_info = dict()
55 |
56 | for fragment in parameters["fragment_names"]:
57 |
58 | with open(f"{dir_fragments}/fragment_{fragment}", "rb") as f:
59 | fragment_info_file = pickle.load(f)
60 | fragment_info[str(fragment)] = fragment_info_file
61 | f.close()
62 |
63 | fragments = list(fragment_info.values())
64 |
65 | # Load images
66 | parameters["log"].log(parameters["my_level"], " - loading images...")
67 | for f in fragments:
68 | f.load_images()
69 |
70 | # Get center of the fragment
71 | for f in fragments:
72 | f.get_image_center()
73 |
74 | # Perform initialization at the lowest resolution
75 | if parameters["iteration"] == 0:
76 |
77 | # Get bounding box based on the tissue mask
78 | for f in fragments:
79 | f.get_bbox_corners(image=f.mask)
80 |
81 | # Get the initial transform consisting of rotation and cropping
82 | for f in fragments:
83 | f.get_initial_transform()
84 |
85 | # Plot the rotation result as a visual check
86 | plot_rotation_result(fragments=fragments, parameters=parameters)
87 |
88 | # Irrespective of number of fragments, we always need to compute an initial
89 | # pairwise transformation. In case of 2 fragments, this is also the final
90 | # initialization. In case of 4 fragments, we also need to compute an additional
91 | # pairwise transformation of the two formed pairs
92 | for f in fragments:
93 | f.get_tformed_images_pair(fragments=fragments)
94 |
95 | if parameters["n_fragments"] == 4:
96 | for f in fragments:
97 | f.get_tformed_images_total(fragments=fragments)
98 |
99 | # Get final tform params for plotting later on
100 | initial_tform = dict()
101 | for f in fragments:
102 | total_x = f.crop_trans_x + f.pad_trans_x + f.trans_x
103 | total_y = f.crop_trans_y + f.pad_trans_y + f.trans_y
104 | initial_tform[f.final_orientation] = [
105 | total_x,
106 | total_y,
107 | f.angle,
108 | f.image_center_pre,
109 | f.output_shape,
110 | ]
111 |
112 | np.save(f"{dirpath_tform}/{current_res_name}_tform_initial.npy", initial_tform)
113 |
114 | # If initial transformation already exists, load and upsample it.
115 | elif parameters["iteration"] > 0:
116 | initial_tform = map_tform_low_res(parameters)
117 |
118 | # Apply transformation to the original images
119 | for f in fragments:
120 | f.get_tformed_images(tform=initial_tform[f.final_orientation])
121 |
122 | # Required for cost function
123 | parameters["image_centers"] = [f.image_center_peri for f in fragments]
124 |
125 | # Plot transformation result
126 | plot_transformation_result(fragments=fragments, parameters=parameters)
127 |
128 | # Get edges from fragments
129 | parameters["log"].log(parameters["my_level"], f" - extracting edges from images...")
130 | for f in fragments:
131 | f.get_edges()
132 |
133 | # Compute Theil Sen lines through edges
134 | parameters["log"].log(
135 | parameters["my_level"], " - computing Theil-Sen estimation of edge..."
136 | )
137 | for f in fragments:
138 | f.fit_theilsen_lines()
139 |
140 | # Plot all acquired Theil-Sen lines
141 | plot_theilsen_result(fragments=fragments, parameters=parameters)
142 |
143 | # Optimization with genetic algorithm
144 | parameters["log"].log(
145 | parameters["my_level"], " - computing reconstruction with genetic algorithm..."
146 | )
147 | parameters["output_shape"] = fragments[0].tform_image.shape
148 |
149 | ga_dict = genetic_algorithm(
150 | fragments=fragments,
151 | parameters=parameters,
152 | initial_tform=initial_tform,
153 | )
154 | np.save(parameters["filepath_tform"], ga_dict)
155 |
156 | # Get final transformed image per fragment
157 | all_images = []
158 | for f in fragments:
159 | final_tform = ga_dict[f.final_orientation]
160 | f.final_image = warp_image(
161 | src=f.colour_image_original,
162 | center=final_tform[3],
163 | rotation=final_tform[2],
164 | translation=final_tform[:2],
165 | output_shape=final_tform[4],
166 | )
167 | all_images.append(f.final_image)
168 |
169 | # Get final fused image and display it
170 | final_image = fuse_images_lowres(images=all_images, parameters=parameters)
171 | plot_ga_result(final_image=final_image, parameters=parameters)
172 |
173 | # Provide verbose on computation time
174 | end_time = time.time()
175 | current_res = parameters["resolutions"][parameters["iteration"]]
176 | sec = np.round(end_time - start_time, 1)
177 | parameters["log"].log(
178 | parameters["my_level"],
179 | f" > time to optimize " f"resolution {current_res}: {sec} seconds\n",
180 | )
181 |
182 | # At final resolution provide some extra visualizations
183 | if parameters["iteration"] == 3:
184 |
185 | # Save the final result
186 | r, c = np.nonzero((final_image[:, :, 0] > 3) * 1)
187 | cv2.imwrite(
188 | f"{parameters['sol_save_dir']}/images/GA_endresult.png",
189 | cv2.cvtColor(
190 | final_image[np.min(r) : np.max(r), np.min(c) : np.max(c), :], cv2.COLOR_RGB2BGR
191 | ),
192 | )
193 |
194 | # Make a gif of the tform result
195 | make_tform_gif(parameters)
196 |
197 | # Plot the fitness trajectory over the multiple resolutions
198 | plot_ga_multires(parameters)
199 |
200 | else:
201 | parameters["log"].log(parameters["my_level"], " - already optimized this resolution!\n")
202 |
203 | return
204 |
--------------------------------------------------------------------------------
/src/pythostitcher_utils/plot_tools.py:
--------------------------------------------------------------------------------
1 | import glob
2 | import os
3 | import warnings
4 | import numpy as np
5 | import imageio
6 | import matplotlib.pyplot as plt
7 | import pandas as pd
8 |
9 | from .fuse_images_lowres import fuse_images_lowres
10 | from .get_resname import get_resname
11 |
12 |
13 | def plot_rotation_result(fragments, parameters):
14 | """
15 | Custom function to plot the result of the automatic rotation of all fragments.
16 |
17 | Input:
18 | - All fragments
19 |
20 | Output:
21 | - Figure displaying the rotation
22 | """
23 |
24 | # Get pad values for every image
25 | fragment_shapes = [f.mask.shape for f in fragments]
26 | max_shape = np.max(fragment_shapes, axis=0)
27 | pad_mask = [((max_shape - f.mask.shape) / 2).astype(int) for f in fragments]
28 | pad_rot_mask = [((max_shape - f.rot_mask.shape) / 2).astype(int) for f in fragments]
29 |
30 | # Apply padding
31 | padded_mask = [
32 | np.pad(f.mask, [[p[0], p[0]], [p[1], p[1]]]) for p, f in zip(pad_mask, fragments)
33 | ]
34 | padded_rot_mask = [
35 | np.pad(f.rot_mask, [[p[0], p[0]], [p[1], p[1]]]) for p, f in zip(pad_rot_mask, fragments)
36 | ]
37 |
38 | # Get x/y values of bounding box around fragment
39 | corners_x = [
40 | [c[0] + p[1] for c in f.bbox_corners] + [f.bbox_corners[0][0] + p[1]]
41 | for p, f in zip(pad_mask, fragments)
42 | ]
43 | corners_y = [
44 | [c[1] + p[0] for c in f.bbox_corners] + [f.bbox_corners[0][1] + p[0]]
45 | for p, f in zip(pad_mask, fragments)
46 | ]
47 |
48 | # Plot rotation result
49 | plt.figure(figsize=(6, len(fragments) * 3))
50 | plt.suptitle("Fragments before and after \nautomatic rotation", fontsize=20)
51 |
52 | for c, (pad, p_mask, p_rmask, c_x, c_y, f) in enumerate(
53 | zip(pad_mask, padded_mask, padded_rot_mask, corners_x, corners_y, fragments), 1
54 | ):
55 | plt.subplot(parameters["n_fragments"], 2, (c * 2) - 1)
56 | plt.axis("off")
57 | plt.title(f.final_orientation, fontsize=16)
58 | plt.imshow(p_mask, cmap="gray")
59 | plt.scatter(
60 | f.mask_corner_a[0] + pad[1], f.mask_corner_a[1] + pad[0], facecolor="r", s=100,
61 | )
62 | plt.plot(c_x, c_y, linewidth=4, c="r")
63 | plt.subplot(parameters["n_fragments"], 2, c * 2)
64 | plt.axis("off")
65 | plt.title(f.final_orientation, fontsize=16)
66 | plt.imshow(p_rmask, cmap="gray")
67 | plt.savefig(f"{parameters['sol_save_dir']}/images/debug/rotation_result.png")
68 | plt.close()
69 |
70 | return
71 |
72 |
73 | def plot_transformation_result(fragments, parameters):
74 | """
75 | Custom function to plot the result of the initial transformation to globally
76 | align the fragments.
77 |
78 | Input:
79 | - All fragments
80 | - Dict with parameters
81 |
82 | Output:
83 | - Figure displaying the aligned fragments
84 | """
85 |
86 | # Merge all individual fragments images into one final image
87 | images = [f.colour_image for f in fragments]
88 | result = fuse_images_lowres(images=images, parameters=parameters)
89 |
90 | current_res = parameters["resolutions"][parameters["iteration"]]
91 |
92 | # Plot figure
93 | plt.figure()
94 | plt.title(f"Initial alignment at resolution {current_res}")
95 | plt.imshow(result, cmap="gray")
96 | if parameters["iteration"] == 0:
97 | plt.savefig(f"{parameters['sol_save_dir']}/images/" f"ga_progression/initial_alignment.png")
98 | plt.close()
99 |
100 | return
101 |
102 |
103 | def plot_theilsen_result(fragments, parameters):
104 | """
105 | Custom function to plot the result of the Theil-Sen line approximation of the
106 | fragments' edges.
107 |
108 | Input:
109 | - All fragments
110 | - Dict with parameters
111 |
112 | Output:
113 | - Figure displaying the Theil-Sen lines for each fragment
114 | """
115 |
116 | current_res_name = get_resname(parameters["resolutions"][parameters["iteration"]])
117 |
118 | # Combine all images
119 | images = [f.colour_image for f in fragments]
120 | combi_image = fuse_images_lowres(images=images, parameters=parameters)
121 |
122 | # Set some plotting parameters
123 | ratio = parameters["resolution_scaling"][parameters["iteration"]]
124 | ms = np.sqrt(2500 * np.sqrt(ratio))
125 |
126 | # Plot theilsen lines with marked endpoints
127 | plt.figure()
128 | plt.title(
129 | f"Alignment at resolution {parameters['resolutions'][parameters['iteration']]}"
130 | f"\n before genetic algorithm"
131 | )
132 | plt.imshow(combi_image, cmap="gray")
133 | for f in fragments:
134 | if hasattr(f, "v_edge_theilsen_endpoints"):
135 | plt.plot(
136 | f.v_edge_theilsen_endpoints[:, 0],
137 | f.v_edge_theilsen_endpoints[:, 1],
138 | linewidth=2,
139 | color="g",
140 | )
141 | plt.scatter(
142 | f.v_edge_theilsen_coords[:, 0],
143 | f.v_edge_theilsen_coords[:, 1],
144 | marker="*",
145 | s=ms,
146 | color="g",
147 | label="_nolegend_",
148 | )
149 | if hasattr(f, "h_edge_theilsen_endpoints"):
150 | plt.plot(
151 | f.h_edge_theilsen_endpoints[:, 0],
152 | f.h_edge_theilsen_endpoints[:, 1],
153 | linewidth=2,
154 | color="b",
155 | )
156 | plt.scatter(
157 | f.h_edge_theilsen_coords[:, 0],
158 | f.h_edge_theilsen_coords[:, 1],
159 | marker="+",
160 | s=ms,
161 | color="b",
162 | label="_nolegend_",
163 | )
164 | plt.savefig(
165 | f"{parameters['sol_save_dir']}/images/debug/theilsen_estimate_{current_res_name}.png"
166 | )
167 | plt.close()
168 |
169 | return
170 |
171 |
172 | def plot_rotated_bbox(fragments, parameters):
173 | """
174 | Custom function to plot the bounding box points after the box has been rotated.
175 | This basically offers a sanity check to verify that the corner points have been
176 | rotated correctly.
177 |
178 | Input:
179 | - All fragments
180 | - Parameter dict
181 |
182 | Output:
183 | - Figure displaying the rotated bounding box points
184 | """
185 |
186 | current_res_name = get_resname(parameters["resolutions"][parameters["iteration"]])
187 |
188 | # X and y coordinates of the bounding box
189 | plt.figure()
190 | plt.title("Rotated bunding box points")
191 |
192 | for c, f in enumerate(fragments):
193 | scat_x = [
194 | f.bbox_corner_a[0],
195 | f.bbox_corner_b[0],
196 | f.bbox_corner_c[0],
197 | f.bbox_corner_d[0],
198 | ]
199 | scat_y = [
200 | f.bbox_corner_a[1],
201 | f.bbox_corner_b[1],
202 | f.bbox_corner_c[1],
203 | f.bbox_corner_d[1],
204 | ]
205 | plt.subplot(parameters["n_fragments"], 2, c + 1)
206 | plt.imshow(f.tform_image, cmap="gray")
207 | plt.scatter(scat_x, scat_y, s=25, c="r")
208 | plt.savefig(f"{parameters['sol_save_dir']}/images/debug/rotation_bbox_{current_res_name}.png")
209 | plt.close()
210 |
211 | return
212 |
213 |
214 | def plot_tformed_edges(fragments, parameters):
215 | """
216 | Custom function to plot the transformed edges before inputting them into the
217 | genetic algorithm. This mainly serves as a sanity check while debugging.
218 |
219 | Input:
220 | - All fragments
221 |
222 | Output:
223 | - Figure displaying the transformed edges
224 | """
225 |
226 | current_res_name = get_resname(parameters["resolutions"][parameters["iteration"]])
227 |
228 | plt.figure()
229 | plt.title("Transformed edges")
230 | for f in fragments:
231 | if hasattr(f, "h_edge_tform"):
232 | plt.plot(f.h_edge[:, 0], f.h_edge[:, 1], c="b")
233 | if hasattr(f, "v_edge_tform"):
234 | plt.plot(f.v_edge[:, 0], f.v_edge[:, 1], c="g")
235 | plt.legend(["Hor", "Ver"])
236 | plt.savefig(
237 | f"{parameters['sol_save_dir']}/images/debug/tformed_edges_inputGA_{current_res_name}.png"
238 | )
239 | plt.close()
240 |
241 | return
242 |
243 |
244 | def plot_tformed_theilsen_lines(fragments, parameters):
245 | """
246 | Custom function to plot the transformed Theilsen lines before inputting them
247 | into the genetic algorithm. This function is analogous to the plot_tformed_edges
248 | function and serves as a sanity check during debugging.
249 |
250 | Input:
251 | - All fragments
252 |
253 | Output:
254 | - Figure displaying the transformed Theil-Sen lines
255 | """
256 |
257 | current_res_name = get_resname(parameters["resolutions"][parameters["iteration"]])
258 |
259 | plt.figure()
260 | plt.title("Transformed Theil-Sen lines")
261 | for f in fragments:
262 | if hasattr(f, "h_edge_theilsen"):
263 | plt.plot(f.h_edge_theilsen_tform[:, 0], f.h_edge_theilsen_tform[:, 1], c="b")
264 | if hasattr(f, "v_edge_theilsen"):
265 | plt.plot(f.v_edge_theilsen_tform[:, 0], f.v_edge_theilsen_tform[:, 1], c="g")
266 | plt.legend(["Hor", "Ver"])
267 | plt.savefig(
268 | f"{parameters['sol_save_dir']}/images/debug/theilsenlines_inputGA_{current_res_name}.png"
269 | )
270 | plt.close()
271 |
272 | return
273 |
274 |
275 | def plot_ga_tform(fragments, parameters):
276 | """
277 | Custom function to show the transformation of the Theil-Sen lines which was found
278 | by the genetic algorithm.
279 |
280 | Input:
281 | - All fragments
282 |
283 | Output:
284 | - Figure displaying the transformed Theil-Sen lines by only taking into account
285 | the optimal transformation found by the genetic algorithm.
286 | """
287 |
288 | current_res_name = get_resname(parameters["resolutions"][parameters["iteration"]])
289 |
290 | plt.figure()
291 | plt.subplot(121)
292 | plt.title("Theil-Sen lines before GA tform")
293 | for f in fragments:
294 | plt.plot(f.h_edge_theilsen_coords, linewidth=3, color="b")
295 | plt.plot(f.v_edge_theilsen_coords, linewidth=3, color="g")
296 |
297 | plt.subplot(122)
298 | plt.title("Theil-Sen lines after GA tform")
299 | for f in fragments:
300 | plt.plot(f.h_edge_theilsen_tform, linewidth=3, color="b")
301 | plt.plot(f.v_edge_theilsen_tform, linewidth=3, color="g")
302 | plt.savefig(
303 | f"{parameters['sol_save_dir']}/images/debug/theilsenlines_outputGA_{current_res_name}.png"
304 | )
305 | plt.close()
306 |
307 | return
308 |
309 |
310 | def plot_ga_result(final_image, parameters):
311 | """
312 | Plotting function to plot the transformation of the fragments which was found
313 | by the genetic algorithm.
314 |
315 | Input:
316 | - All fragments
317 | - Dict with parameters
318 | - Final transformation from genetic algorithm
319 |
320 | Output:
321 | - Figure displaying the end result obtained by the genetic algorithm
322 | """
323 |
324 | current_res_name = get_resname(parameters["resolutions"][parameters["iteration"]])
325 |
326 | # Save result
327 | plt.figure()
328 | plt.title(
329 | f"Alignment at resolution {current_res_name}\n after genetic algorithm "
330 | f"(fitness={np.round(parameters['GA_fitness'][-1], 2)})"
331 | )
332 | plt.imshow(final_image)
333 | plt.savefig(
334 | f"{parameters['sol_save_dir']}/images/ga_progression/" f"ga_result_{current_res_name}.png"
335 | )
336 | plt.close()
337 |
338 | return
339 |
340 |
341 | def make_tform_gif(parameters):
342 | """
343 | Custom function to make a gif of all the tformed results after the genetic algorithm
344 | as a visual check of the result. This function requires that intermediate results of
345 | the genetic algorithm from the different resolutions are saved in a directory called
346 | images. This function will then combine these images into the GIF.
347 |
348 | Input:
349 | - Dict with parameters
350 |
351 | Output:
352 | - GIF file of the transformation
353 | """
354 |
355 | # Make gif of the transformation
356 | imsavedir = f"{parameters['sol_save_dir']}/images/ga_progression/"
357 | gifsavedir = f"{parameters['sol_save_dir']}/images/tform_progression.gif"
358 |
359 | all_images = glob.glob(imsavedir + "/*")
360 | all_images = sorted(all_images, key=lambda t: os.stat(t).st_mtime)
361 |
362 | images = []
363 | for name in all_images:
364 | image = imageio.imread(name)
365 | images.append(image)
366 |
367 | imageio.mimsave(gifsavedir, images, duration=0.75)
368 |
369 | return
370 |
371 |
372 | def plot_sampled_patches(total_im, patch_indices_x, patch_indices_y, ts_lines):
373 | """
374 | Custom function to visualize the patches which are extracted in the histogram cost
375 | function. This function serves as a visual check that the patches are extracted
376 | correctly.
377 |
378 | Input:
379 | - Final recombined image
380 | - X/Y indices of the extracted patches
381 | - Theil-Sen lines
382 |
383 | Output:
384 | - Figure displaying the sampled patches
385 | """
386 |
387 | # Plotting parameters
388 | ts_line_colours = ["b", "g"] * 4
389 |
390 | plt.figure()
391 | plt.title("Sampled patches on TheilSen lines")
392 | plt.imshow(total_im, cmap="gray")
393 | for x, y in zip(patch_indices_x.values(), patch_indices_y.values()):
394 | plt.plot(x, y, linewidth=0.5, c="r")
395 | for ts, c in zip(ts_lines, ts_line_colours):
396 | plt.plot(ts[:, 0], ts[:, 1], linewidth=2, c=c)
397 | plt.close()
398 |
399 | return
400 |
401 |
402 | def plot_overlap_cost(im, relative_overlap):
403 | """
404 | Custom function to plot the overlap between the fragments. Currently the overlap
405 | between the fragments is visualized as a rather gray area, this could of course be
406 | visualized more explicitly.
407 |
408 | Input:
409 | - Final stitched image
410 | - Percentual overlap
411 |
412 | Output:
413 | - Figure displaying the overlap between the fragments
414 | """
415 |
416 | plt.figure()
417 | plt.title(f"Visualization of overlapping fragments {np.round(relative_overlap*100, 1)}%")
418 | plt.imshow(im, cmap="gray")
419 | plt.close()
420 |
421 | return
422 |
423 |
424 | def plot_ga_multires(parameters):
425 | """
426 | Custom function to plot how the fitness improves at multiple resolutions.
427 |
428 | NOTE: The fitness depends on the cost function being used and may not scale correctly
429 | with the resolutions. This may result in a decreasing fitness for higher resolutions
430 | while the visual fitness increases. Example: absolute distance between the endpoints
431 | of the edges increases for higher resolutions leading to a lower fitness when this is
432 | the only factor in the cost function.
433 |
434 | Input:
435 | - Dict with parameters
436 |
437 | Output:
438 | - Figure displaying the evolution of the fitness over different resolutions.
439 | """
440 |
441 | # Set some plotting parameters
442 | fitness = parameters["GA_fitness"]
443 | xticks_loc = list(np.arange(0, 5))
444 | xticks_label = ["Initial"] + parameters["resolutions"]
445 |
446 | # Save csv of cost (inverse of fitness, lower cost is better) per res
447 | df_savepath = parameters["sol_save_dir"].joinpath("tform", "cost_per_res.csv")
448 | df = pd.DataFrame()
449 | df["resolution"] = xticks_label
450 | df["cost"] = [np.round(1/i, 3) for i in fitness]
451 | df.to_csv(df_savepath, index=False)
452 |
453 | # Only plot when the GA fitness has been tracked properly (e.g. when the cost function
454 | # has been scaled properly throughout the different resolutions).
455 | if len(fitness) == len(xticks_label):
456 | plt.figure()
457 | plt.title("Fitness progression at multiple resolutions")
458 | plt.plot(xticks_loc, fitness)
459 | plt.xlabel("Resolution")
460 | plt.xticks(xticks_loc, xticks_label)
461 | plt.ylabel("Fitness")
462 | plt.savefig(f"{parameters['sol_save_dir']}/images/GA_fitness_result.png")
463 | plt.close()
464 | else:
465 | warnings.warn(
466 | "Could not plot fitness progression for multiple resolutions, "
467 | "try running the genetic algorithm from scratch by deleting "
468 | "the results directory of this patient"
469 | )
470 |
471 | return
472 |
--------------------------------------------------------------------------------
/src/pythostitcher_utils/preprocess.py:
--------------------------------------------------------------------------------
1 | def preprocess(fragments, parameters):
2 | """
3 | Function to load and preprocess all the tissue fragment images. The preprocessing
4 | mainly consists of resizing and saving the image at multiple resolutions.
5 |
6 | Input:
7 | - List of all fragments
8 | - Dict with parameters
9 | - Logging object for logging
10 |
11 | Output:
12 | - Fragment class with all loaded images
13 | """
14 |
15 | # Create new directories to save results where necessary
16 | new_dirs = [
17 | parameters["sol_save_dir"],
18 | parameters["sol_save_dir"].joinpath("highres", "blend_summary"),
19 | parameters["sol_save_dir"].joinpath("fragments"),
20 | parameters["sol_save_dir"].joinpath("tform"),
21 | parameters["sol_save_dir"].joinpath("images", "debug"),
22 | parameters["sol_save_dir"].joinpath("images", "ga_progression"),
23 | parameters["sol_save_dir"].joinpath("images", "ga_result_per_iteration"),
24 | parameters["sol_save_dir"].joinpath(
25 | "images", parameters["slice_idx"], parameters["res_name"]
26 | ),
27 | ]
28 | for d in new_dirs:
29 | if not d.is_dir():
30 | d.mkdir(parents=True)
31 |
32 | for f in fragments:
33 |
34 | # Read fragment transformations
35 | if parameters["n_fragments"] == 4:
36 | f.read_transforms()
37 |
38 | # Read all original images
39 | f.read_image()
40 |
41 | # Normalize the stain
42 | f.normalize_stain()
43 |
44 | # Preprocess (resize+pad) gray images
45 | f.downsample_image()
46 |
47 | # Segment tissue. This basically loads in the stored segmentations
48 | f.segment_tissue()
49 |
50 | # Apply mask to both gray and colour image
51 | f.apply_masks()
52 |
53 | # Save the fragment class for later use
54 | f.save_fragment()
55 |
56 | # Save rot90 steps for later in high resolution reconstruction
57 | if not parameters["n_fragments"] == 2:
58 | parameters["rot_steps"] = dict()
59 | for f in fragments:
60 | parameters["rot_steps"][f.original_name] = f.rot_k
61 |
62 | return
63 |
--------------------------------------------------------------------------------
/src/pythostitcher_utils/stain_normalization.py:
--------------------------------------------------------------------------------
1 | import torchstain
2 | import numpy as np
3 | import pyvips
4 | from typing import Tuple
5 | from torchvision import transforms
6 |
7 |
8 | class Reinhard_normalizer(object):
9 | """
10 | A stain normalization object for PyVips. Fits a reference PyVips image,
11 | transforms a PyVips Image. Can also be initialized with precalculated
12 | means and stds (in LAB colorspace).
13 |
14 | Adapted from https://gist.github.com/munick/badb6582686762bb10265f8a66c26d48
15 | """
16 |
17 | def __init__(self, target_means=None, target_stds=None):
18 | self.target_means = target_means
19 | self.target_stds = target_stds
20 |
21 | return
22 |
23 | def fit(self, target: pyvips.Image):
24 | """
25 | Fit a Pyvips image.
26 | """
27 |
28 | # Get the means and stds of the target image
29 | means, stds = self.get_mean_std(target)
30 | self.target_means = means
31 | self.target_stds = stds
32 |
33 | return
34 |
35 | def transform(self, image):
36 | """
37 | Method to apply the transformation to a PyVips image.
38 | """
39 |
40 | # Split the image into LAB channels
41 | L, A, B = self.lab_split(image)
42 | means, stds = self.get_mean_std(image)
43 |
44 | # Apply normalization to each channel
45 | norm1 = ((L - means[0]) * (self.target_stds[0] / stds[0])) + self.target_means[0]
46 | norm2 = ((A - means[1]) * (self.target_stds[1] / stds[1])) + self.target_means[1]
47 | norm3 = ((B - means[2]) * (self.target_stds[2] / stds[2])) + self.target_means[2]
48 |
49 | return self.merge_to_rgb(norm1, norm2, norm3)
50 |
51 | def lab_split(self, img: pyvips.Image) -> Tuple[pyvips.Image, pyvips.Image, pyvips.Image]:
52 | """
53 | Method to convert a PyVips image to LAB colorspace.
54 | """
55 |
56 | img_lab = img.colourspace("VIPS_INTERPRETATION_LAB")
57 | L, A, B = img_lab.bandsplit()[:3]
58 |
59 | return L, A, B
60 |
61 | def get_mean_std(self, image: pyvips.Image) -> Tuple:
62 | """
63 | Method to calculate the mean and standard deviation of a PyVips image.
64 | """
65 |
66 | L, A, B = self.lab_split(image)
67 | m1, sd1 = L.avg(), L.deviate()
68 | m2, sd2 = A.avg(), A.deviate()
69 | m3, sd3 = B.avg(), B.deviate()
70 | means = m1, m2, m3
71 | stds = sd1, sd2, sd3
72 | self.image_stats = means, stds
73 |
74 | return means, stds
75 |
76 | def merge_to_rgb(self, L: pyvips.Image, A: pyvips.Image, B: pyvips.Image) -> pyvips.Image:
77 | """
78 | Method to merge the L, A, B bands to an RGB image.
79 | """
80 |
81 | img_lab = L.bandjoin([A,B])
82 | img_rgb = img_lab.colourspace('VIPS_INTERPRETATION_sRGB')
83 |
84 | return img_rgb
85 |
86 |
87 | def apply_stain_norm(images):
88 | """
89 | Function to apply stain normalization to a set of regular images.
90 | """
91 |
92 | # Always use first image as reference
93 | ref_image = images[0]
94 |
95 | # Initiate normalizer
96 | T = transforms.Compose([
97 | transforms.ToTensor(),
98 | transforms.Lambda(lambda x: x*255)
99 | ])
100 | stain_normalizer = torchstain.normalizers.ReinhardNormalizer(backend="torch")
101 | stain_normalizer.fit(T(ref_image))
102 |
103 | normalized_images = []
104 |
105 | # Apply stain normalization
106 | for image in images:
107 | norm_im = stain_normalizer.normalize(T(image))
108 | norm_im = norm_im.numpy().astype("uint8")
109 | normalized_images.append(norm_im)
110 |
111 | return normalized_images
112 |
113 |
114 | def apply_fullres_stain_norm(images):
115 | """
116 | Function to apply stain normalization on full resolution pyvips images.
117 | """
118 |
119 | # Always use first image as reference
120 | ref_image = images[0]
121 |
122 | normalizer = Reinhard_normalizer()
123 | normalizer.fit(ref_image)
124 |
125 | normalized_images = []
126 |
127 | # Apply stain normalization
128 | for image in images:
129 | norm_im = normalizer.transform(image)
130 | normalized_images.append(norm_im.cast("uchar", shift=False))
131 |
132 | return normalized_images
133 |
--------------------------------------------------------------------------------
/src/pythostitcher_utils/transformations.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import cv2
3 |
4 |
5 | def warp_2d_points(src, center, rotation, translation):
6 | """
7 | Custom function to warp a set of 2D coordinates using an affine transform.
8 |
9 | Input:
10 | - Nx2 matrix with points to warp
11 | - Center to rotate around
12 | - Angle of rotation in degrees
13 | - Translation in pixels
14 |
15 | Output:
16 | - Nx2 matrix with warped points
17 | """
18 |
19 | # Catch use case where only 1 coordinate pair is provided as input
20 | if len(np.array(src).shape) == 1:
21 | src = np.array(src)
22 | src = np.transpose(src[:, np.newaxis])
23 |
24 | assert (
25 | len(np.array(src).shape) == 2 and np.array(src).shape[-1] == 2
26 | ), "Input must be 2 dimensionsal and be ordered as Nx2 matrix"
27 | assert len(translation) == 2, "Translation must consist of X/Y component"
28 |
29 | # Ensure variables are in correct format
30 | center = tuple([int(i) for i in np.squeeze(center)])
31 | src = src.astype("float32")
32 |
33 | # Create rotation matrix
34 | rot_mat = cv2.getRotationMatrix2D(center=center, angle=rotation, scale=1)
35 | rot_mat[0, 2] += translation[0]
36 | rot_mat[1, 2] += translation[1]
37 |
38 | # Add list of ones as pseudo third dimension to ensure proper matrix calculations
39 | add_ones = np.ones((src.shape[0], 1))
40 | src = np.hstack([src, add_ones])
41 |
42 | # Transform points
43 | tform_src = rot_mat.dot(src.T).T
44 | tform_src = np.round(tform_src, 1)
45 |
46 | return tform_src
47 |
48 |
49 | def warp_image(src, center, rotation, translation, output_shape=None):
50 | """
51 | Custom function to warp a 2D image using an affine transformation.
52 |
53 | Input:
54 | - Image to warp
55 | - Center to rotate around
56 | - Angle of rotation in degrees
57 | - Translation in pixels
58 | - Output shape of warped image
59 |
60 | Output:
61 | - Warped image
62 | """
63 |
64 | # Get output shape if it is specified. Switch XY for opencv convention
65 | if output_shape:
66 | if len(output_shape) == 2:
67 | output_shape = tuple(output_shape[::-1])
68 | elif len(output_shape) == 3:
69 | output_shape = tuple(output_shape[:2][::-1])
70 |
71 | # Else keep same output size as input image
72 | else:
73 | if len(src.shape) == 2:
74 | output_shape = src.shape
75 | output_shape = tuple(output_shape[::-1])
76 | elif len(src.shape) == 3:
77 | output_shape = src.shape[:2]
78 | output_shape = tuple(output_shape[::-1])
79 |
80 | # Ensure that shape only holds integers
81 | output_shape = [int(i) for i in output_shape]
82 |
83 | # Convert to uint8 for opencv
84 | if src.dtype == "float":
85 | src = ((src / np.max(src)) * 255).astype("uint8")
86 |
87 | # Ensure center is in correct format
88 | center = tuple([int(i) for i in np.squeeze(center)])
89 |
90 | # Create rotation matrix
91 | rot_mat = cv2.getRotationMatrix2D(center=center, angle=rotation, scale=1)
92 | rot_mat[0, 2] += translation[0]
93 | rot_mat[1, 2] += translation[1]
94 |
95 | # Warp image
96 | tform_src = cv2.warpAffine(src=src, M=rot_mat, dsize=output_shape)
97 |
98 | return tform_src
99 |
--------------------------------------------------------------------------------