├── .github └── workflows │ └── book.yml ├── .gitignore ├── LICENSE-CC-BY ├── ObjectClassifier.cl ├── README.md ├── build.sh ├── clean.sh ├── data ├── Haase_MRT_tfl3d1.tif ├── IXMtest_A02_s9.tif ├── IXMtest_A02_s9_image_data_source.txt └── blobs.tif ├── docs ├── 00_course_preparation │ ├── Readme.md │ └── img.png ├── 10_Clesperanto │ ├── 10_select_devices.ipynb │ ├── 20_gpu_arrays_and_memory_managment.ipynb │ ├── 30_apply_operations_on_data.ipynb │ ├── 40_nuclei_segmentation.ipynb │ ├── 50_measurement_and_quantifications.ipynb │ ├── 60_custom_kernel_execution.ipynb │ ├── 70_benchmarking.ipynb │ ├── clesperanto-polbias-course.pdf │ ├── maximum_z_projection.cl │ └── readme.md ├── 23_clesperanto_assistant │ ├── Intro_Napari_Assistant.pdf │ ├── images │ │ ├── export_notebooks.mp4 │ │ ├── export_notebooks01.jpg │ │ ├── export_notebooks02.jpg │ │ ├── export_notebooks03.jpg │ │ ├── export_notebooks04.jpg │ │ ├── export_notebooks05.jpg │ │ ├── export_notebooks06.jpg │ │ ├── export_notebooks07.jpg │ │ ├── export_notebooks08.jpg │ │ ├── export_notebooks09.jpg │ │ ├── filter_clesperanto.png │ │ ├── hela_cells_screenshot.png │ │ ├── install_package01.png │ │ ├── napari-assistant.mp4 │ │ ├── napari-assistant01.jpg │ │ ├── napari-assistant02.jpg │ │ ├── napari-assistant03.jpg │ │ ├── napari-assistant04.jpg │ │ ├── napari-assistant05.jpg │ │ ├── napari-assistant06.jpg │ │ ├── napari-assistant07.jpg │ │ ├── napari-assistant08.jpg │ │ ├── napari-assistant09.jpg │ │ ├── napari-assistant10.jpg │ │ ├── napari-assistant11.jpg │ │ ├── napari-assistant12.jpg │ │ ├── napari-assistant13.jpg │ │ ├── napari-assistant14.jpg │ │ ├── napari-assistant15.jpg │ │ ├── napari-assistant16.jpg │ │ ├── napari-assistant17.jpg │ │ ├── napari-assistant18.jpg │ │ └── napari-assistant1_.jpg │ ├── intro.md │ ├── napari-assistant.md │ └── notebook_export.md ├── 25_cupy │ ├── 10_basics.ipynb │ ├── 20_dropin_replacement.ipynb │ ├── 30_filtering.ipynb │ ├── 40_custom_kernels.ipynb │ ├── 50_napari-cupy-image-processing.ipynb │ ├── 60_benchmark_affine_transforms.ipynb │ ├── colab_runtime.png │ ├── cupy.pdf │ ├── napari-assistant-cupy.png │ ├── napari-assistant-generated-cupy-notebook.ipynb │ └── readme.md ├── 30_Deconvolution │ ├── 0_intro_to_decon.md │ ├── 10_decon_systems_design.ipynb │ ├── 1_test_libs.ipynb │ ├── 2_cupy_forward.ipynb │ ├── 3_Nuclei_Deconvolution_Compare_to_Truth.ipynb │ ├── 4_Nuclei_Deconvolution_Segmentation.ipynb │ ├── 5_edges.ipynb │ ├── 6_decon_bead_edge_handling.ipynb │ ├── 7_decon_regularization.ipynb │ ├── 8_extract_psf.ipynb │ ├── 9_Dask_Deconvolution.ipynb │ ├── cluster_access.md │ ├── decon_helper.py │ ├── images │ │ ├── 5_open_terminal.png │ │ ├── 6_terminal_output.png │ │ ├── 8_select_kernel.png │ │ ├── PSF_xy.jpg │ │ ├── PSF_xz.jpg │ │ ├── rl_iterations │ │ │ ├── rl10xy.jpg │ │ │ ├── rl10xz.jpg │ │ │ ├── rl20xy.jpg │ │ │ ├── rl20xz.jpg │ │ │ ├── rl30xy.jpg │ │ │ ├── rl30xz.jpg │ │ │ ├── xy.jpg │ │ │ └── xz.jpg │ │ ├── rl_rltv │ │ │ ├── sphere_deconvolved_rla50_xy.jpg │ │ │ ├── sphere_deconvolved_rla50_xz.jpg │ │ │ ├── sphere_deconvolved_rltv0.002_a50_xy.jpg │ │ │ └── sphere_deconvolved_rltv0.002_a50_xz.jpg │ │ ├── sphere_convolved_noise_xy.jpg │ │ ├── sphere_convolved_noise_xz.jpg │ │ ├── sphere_convolved_xy.jpg │ │ ├── sphere_convolved_xz.jpg │ │ ├── sphere_xy.jpg │ │ └── sphere_xz.jpg │ ├── labels_decon1.png │ ├── labels_decon2.png │ ├── labels_im.png │ ├── test.png │ └── test_libs.py ├── 40_HPC_Intro │ ├── Intro_to_HPC_POLBIAS_Handout.pdf │ ├── data │ │ ├── myscript.py │ │ ├── myscript.sbatch │ │ └── requirements.txt │ └── readme.md ├── 50_Clesperanto_on_HPC │ ├── exercises.md │ ├── images │ │ ├── 5_open_terminal.png │ │ ├── 6_terminal_output.png │ │ ├── taurus_login.png │ │ ├── taurus_login_1.png │ │ ├── taurus_login_2.png │ │ ├── taurus_login_3.png │ │ ├── taurus_login_4.png │ │ ├── taurus_login_6.png │ │ └── taurus_login_7.png │ ├── login_taurus.md │ ├── modified_generated_notebook.ipynb │ ├── napari_assistant_generated_notebook.ipynb │ └── readme.md ├── 60_Pytorch │ ├── 00_versions.ipynb │ ├── 01_data_exploration.ipynb │ ├── 02_dataset.ipynb │ ├── 03_data_batching_and_setup_model.ipynb │ ├── 04_model_training.ipynb │ ├── 05_model_training_with_device.ipynb │ ├── 06_model_training_with_logging.ipynb │ ├── 07_model_training_with_checkpoints.ipynb │ ├── 08_pytorch_lightning.ipynb │ ├── data.py │ ├── enable_env_in_jupyter.sh │ ├── readme.md │ ├── requirements.txt │ └── unet.png ├── 70_AI_Segmentation_Denoising │ ├── 01_2D_unet_training.ipynb │ ├── 02_Noise2Void.ipynb │ ├── 03_Noise2Void_3D.ipynb │ ├── CNNs_N2V.pdf │ ├── HPC_settings.PNG │ ├── Readme.md │ └── models │ │ └── n2v_3D │ │ └── logs │ │ └── images │ │ └── .nfs0000000005caaf5600000072 ├── 80_image_analysis_with_dask │ ├── 1_dask_basics.ipynb │ ├── 202308_dask_workshop_slides.pdf │ ├── 2_dask_image.ipynb │ ├── 3_lazy_image_processing.ipynb │ ├── README.md │ ├── dask-array.svg │ ├── dask-overview.svg │ ├── environment.yml │ ├── mydask.html │ └── mydask.png ├── _config.yml ├── _toc.yml ├── biapol_logo.png ├── how_to_download.png ├── intro.md ├── jupyterlab.png ├── jupyterlab2.png └── timetable.png └── requirements.txt /.github/workflows/book.yml: -------------------------------------------------------------------------------- 1 | name: deploy-book 2 | 3 | # Only run this when the master branch changes 4 | on: 5 | push: 6 | branches: 7 | - master 8 | - main 9 | 10 | # This job installs dependencies, build the book, and pushes it to `gh-pages` 11 | jobs: 12 | deploy-book: 13 | runs-on: ubuntu-latest 14 | steps: 15 | - uses: actions/checkout@v2 16 | 17 | # Install dependencies 18 | - name: Set up Python 3.7 19 | uses: actions/setup-python@v1 20 | with: 21 | python-version: 3.7 22 | 23 | - name: Install dependencies 24 | run: | 25 | pip install -r requirements.txt 26 | 27 | 28 | # Build the book 29 | - name: Build the book 30 | run: | 31 | jupyter-book build docs/ 32 | 33 | # Push the book's HTML to github-pages 34 | - name: GitHub Pages action 35 | uses: peaceiris/actions-gh-pages@v3.5.9 36 | with: 37 | github_token: ${{ secrets.GITHUB_TOKEN }} 38 | publish_dir: ./docs/_build/html -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Mac OS 2 | .DS_Store 3 | 4 | # Byte-compiled / optimized / DLL files 5 | __pycache__/ 6 | *.py[cod] 7 | *$py.class 8 | 9 | # C extensions 10 | *.so 11 | 12 | # Data that should not be downloaded pytorch segmentation/denoising 13 | docs/70_AI_Segmentation_Denoising/data/ 14 | docs/70_AI_Segmentation_Denoising/data.zip 15 | **/lightning_outputs/ 16 | docs/70_AI_Segmentation_Denoising/pn2v 17 | 18 | # Distribution / packaging 19 | .Python 20 | build/ 21 | develop-eggs/ 22 | dist/ 23 | downloads/ 24 | eggs/ 25 | .eggs/ 26 | lib/ 27 | lib64/ 28 | parts/ 29 | sdist/ 30 | var/ 31 | wheels/ 32 | share/python-wheels/ 33 | *.egg-info/ 34 | .installed.cfg 35 | *.egg 36 | MANIFEST 37 | 38 | # PyInstaller 39 | # Usually these files are written by a python script from a template 40 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 41 | *.manifest 42 | *.spec 43 | 44 | # Installer logs 45 | pip-log.txt 46 | pip-delete-this-directory.txt 47 | 48 | # Unit test / coverage reports 49 | htmlcov/ 50 | .tox/ 51 | .nox/ 52 | .coverage 53 | .coverage.* 54 | .cache 55 | nosetests.xml 56 | coverage.xml 57 | *.cover 58 | *.py,cover 59 | .hypothesis/ 60 | .pytest_cache/ 61 | cover/ 62 | 63 | # Translations 64 | *.mo 65 | *.pot 66 | 67 | # Django stuff: 68 | *.log 69 | local_settings.py 70 | db.sqlite3 71 | db.sqlite3-journal 72 | 73 | # Flask stuff: 74 | instance/ 75 | .webassets-cache 76 | 77 | # Scrapy stuff: 78 | .scrapy 79 | 80 | # Sphinx documentation 81 | docs/_build/ 82 | 83 | # PyBuilder 84 | .pybuilder/ 85 | target/ 86 | 87 | # Jupyter Notebook 88 | .ipynb_checkpoints 89 | 90 | # IPython 91 | profile_default/ 92 | ipython_config.py 93 | 94 | # pyenv 95 | # For a library or package, you might want to ignore these files since the code is 96 | # intended to run in multiple environments; otherwise, check them in: 97 | # .python-version 98 | 99 | # pipenv 100 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 101 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 102 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 103 | # install all needed dependencies. 104 | #Pipfile.lock 105 | 106 | # poetry 107 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 108 | # This is especially recommended for binary packages to ensure reproducibility, and is more 109 | # commonly ignored for libraries. 110 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 111 | #poetry.lock 112 | 113 | # pdm 114 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 115 | #pdm.lock 116 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 117 | # in version control. 118 | # https://pdm.fming.dev/#use-with-ide 119 | .pdm.toml 120 | 121 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 122 | __pypackages__/ 123 | 124 | # Celery stuff 125 | celerybeat-schedule 126 | celerybeat.pid 127 | 128 | # SageMath parsed files 129 | *.sage.py 130 | 131 | # Environments 132 | .env 133 | .venv 134 | env/ 135 | venv/ 136 | ENV/ 137 | env.bak/ 138 | venv.bak/ 139 | 140 | # Spyder project settings 141 | .spyderproject 142 | .spyproject 143 | 144 | # Rope project settings 145 | .ropeproject 146 | 147 | # mkdocs documentation 148 | /site 149 | 150 | # mypy 151 | .mypy_cache/ 152 | .dmypy.json 153 | dmypy.json 154 | 155 | # Pyre type checker 156 | .pyre/ 157 | 158 | # pytype static type analyzer 159 | .pytype/ 160 | 161 | # Cython debug symbols 162 | cython_debug/ 163 | 164 | # PyCharm 165 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 166 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 167 | # and can be added to the global gitignore or merged into this file. For a more nuclear 168 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 169 | .idea/ 170 | -------------------------------------------------------------------------------- /ObjectClassifier.cl: -------------------------------------------------------------------------------- 1 | /* 2 | OpenCL RandomForestClassifier 3 | classifier_class_name = ObjectClassifier 4 | feature_specification = average_distance_of_n_nearest_neighbors=1 average_distance_of_n_nearest_neighbors=6 average_distance_of_n_nearest_neighbors=10 5 | num_ground_truth_dimensions = 1 6 | num_classes = 2 7 | num_features = 3 8 | max_depth = 2 9 | num_trees = 100 10 | feature_importances = 0.24965823396297196,0.45603854129028126,0.29430322474674675 11 | apoc_version = 0.10.0 12 | */ 13 | __kernel void predict (IMAGE_in0_TYPE in0, IMAGE_in1_TYPE in1, IMAGE_in2_TYPE in2, IMAGE_out_TYPE out) { 14 | sampler_t sampler = CLK_NORMALIZED_COORDS_FALSE | CLK_ADDRESS_CLAMP_TO_EDGE | CLK_FILTER_NEAREST; 15 | const int x = get_global_id(0); 16 | const int y = get_global_id(1); 17 | const int z = get_global_id(2); 18 | float i0 = READ_IMAGE(in0, sampler, POS_in0_INSTANCE(x,y,z,0)).x; 19 | float i1 = READ_IMAGE(in1, sampler, POS_in1_INSTANCE(x,y,z,0)).x; 20 | float i2 = READ_IMAGE(in2, sampler, POS_in2_INSTANCE(x,y,z,0)).x; 21 | float s0=0; 22 | float s1=0; 23 | if(i2<16.29216957092285){ 24 | s0+=64.0; 25 | } else { 26 | s1+=13.0; 27 | } 28 | if(i0<8.404618263244629){ 29 | if(i2<17.123249053955078){ 30 | s0+=65.0; 31 | } else { 32 | s1+=1.0; 33 | } 34 | } else { 35 | s1+=11.0; 36 | } 37 | if(i0<8.44096565246582){ 38 | if(i2<16.286584854125977){ 39 | s0+=63.0; 40 | } else { 41 | s1+=2.0; 42 | } 43 | } else { 44 | s1+=12.0; 45 | } 46 | if(i1<12.685188293457031){ 47 | s0+=63.0; 48 | } else { 49 | s1+=14.0; 50 | } 51 | if(i0<8.404618263244629){ 52 | if(i2<17.09811782836914){ 53 | s0+=61.0; 54 | } else { 55 | s1+=3.0; 56 | } 57 | } else { 58 | s1+=13.0; 59 | } 60 | if(i1<13.170796394348145){ 61 | s0+=56.0; 62 | } else { 63 | s1+=21.0; 64 | } 65 | if(i2<16.78363609313965){ 66 | s0+=65.0; 67 | } else { 68 | s1+=12.0; 69 | } 70 | if(i1<12.685188293457031){ 71 | s0+=64.0; 72 | } else { 73 | s1+=13.0; 74 | } 75 | if(i1<13.899157524108887){ 76 | s0+=64.0; 77 | } else { 78 | s1+=13.0; 79 | } 80 | if(i0<11.423654556274414){ 81 | if(i0<5.212059020996094){ 82 | s0+=25.0; 83 | s1+=5.0; 84 | } else { 85 | s0+=42.0; 86 | } 87 | } else { 88 | s1+=5.0; 89 | } 90 | if(i1<13.082313537597656){ 91 | s0+=61.0; 92 | } else { 93 | s1+=16.0; 94 | } 95 | if(i1<12.685188293457031){ 96 | s0+=60.0; 97 | } else { 98 | s1+=17.0; 99 | } 100 | if(i2<16.664493560791016){ 101 | s0+=62.0; 102 | } else { 103 | s1+=15.0; 104 | } 105 | if(i2<17.15595817565918){ 106 | s0+=64.0; 107 | } else { 108 | s1+=13.0; 109 | } 110 | if(i0<11.779717445373535){ 111 | if(i2<17.09811782836914){ 112 | s0+=68.0; 113 | } else { 114 | s1+=2.0; 115 | } 116 | } else { 117 | s1+=7.0; 118 | } 119 | if(i2<17.15595817565918){ 120 | s0+=60.0; 121 | } else { 122 | s1+=17.0; 123 | } 124 | if(i1<13.134271621704102){ 125 | s0+=65.0; 126 | } else { 127 | s1+=12.0; 128 | } 129 | if(i0<8.44096565246582){ 130 | if(i2<16.664493560791016){ 131 | s0+=58.0; 132 | } else { 133 | s1+=2.0; 134 | } 135 | } else { 136 | s1+=17.0; 137 | } 138 | if(i1<12.685188293457031){ 139 | s0+=63.0; 140 | } else { 141 | s1+=14.0; 142 | } 143 | if(i1<12.685188293457031){ 144 | s0+=66.0; 145 | } else { 146 | s1+=11.0; 147 | } 148 | if(i2<16.78363609313965){ 149 | s0+=62.0; 150 | } else { 151 | s1+=15.0; 152 | } 153 | if(i0<9.130266189575195){ 154 | if(i1<12.685188293457031){ 155 | s0+=62.0; 156 | } else { 157 | s1+=4.0; 158 | } 159 | } else { 160 | s1+=11.0; 161 | } 162 | if(i0<9.130266189575195){ 163 | if(i0<5.1273956298828125){ 164 | s0+=31.0; 165 | s1+=2.0; 166 | } else { 167 | s0+=35.0; 168 | } 169 | } else { 170 | s1+=9.0; 171 | } 172 | if(i0<9.043896675109863){ 173 | if(i2<16.29216957092285){ 174 | s0+=59.0; 175 | } else { 176 | s1+=7.0; 177 | } 178 | } else { 179 | s1+=11.0; 180 | } 181 | if(i1<12.681735038757324){ 182 | s0+=67.0; 183 | } else { 184 | s1+=10.0; 185 | } 186 | if(i0<8.44096565246582){ 187 | if(i0<5.162413597106934){ 188 | s0+=27.0; 189 | s1+=3.0; 190 | } else { 191 | s0+=33.0; 192 | } 193 | } else { 194 | s1+=14.0; 195 | } 196 | if(i0<8.404618263244629){ 197 | if(i1<12.685188293457031){ 198 | s0+=67.0; 199 | } else { 200 | s1+=2.0; 201 | } 202 | } else { 203 | s1+=8.0; 204 | } 205 | if(i2<16.29216957092285){ 206 | s0+=59.0; 207 | } else { 208 | s1+=18.0; 209 | } 210 | if(i2<16.664493560791016){ 211 | s0+=61.0; 212 | } else { 213 | s1+=16.0; 214 | } 215 | if(i0<9.130266189575195){ 216 | if(i1<12.685188293457031){ 217 | s0+=61.0; 218 | } else { 219 | s1+=2.0; 220 | } 221 | } else { 222 | s1+=14.0; 223 | } 224 | if(i1<12.685188293457031){ 225 | s0+=59.0; 226 | } else { 227 | s1+=18.0; 228 | } 229 | if(i2<17.09811782836914){ 230 | s0+=61.0; 231 | } else { 232 | s1+=16.0; 233 | } 234 | if(i1<12.685188293457031){ 235 | s0+=68.0; 236 | } else { 237 | s1+=9.0; 238 | } 239 | if(i0<9.09391975402832){ 240 | if(i2<16.664493560791016){ 241 | s0+=61.0; 242 | } else { 243 | s1+=5.0; 244 | } 245 | } else { 246 | s1+=11.0; 247 | } 248 | if(i1<12.681735038757324){ 249 | s0+=67.0; 250 | } else { 251 | s1+=10.0; 252 | } 253 | if(i2<16.664493560791016){ 254 | s0+=58.0; 255 | } else { 256 | s1+=19.0; 257 | } 258 | if(i0<8.404618263244629){ 259 | if(i2<17.141380310058594){ 260 | s0+=65.0; 261 | } else { 262 | s1+=2.0; 263 | } 264 | } else { 265 | s1+=10.0; 266 | } 267 | if(i1<12.685188293457031){ 268 | s0+=60.0; 269 | } else { 270 | s1+=17.0; 271 | } 272 | if(i1<12.685188293457031){ 273 | s0+=60.0; 274 | } else { 275 | s1+=17.0; 276 | } 277 | if(i2<16.664493560791016){ 278 | s0+=65.0; 279 | } else { 280 | s1+=12.0; 281 | } 282 | if(i1<12.685188293457031){ 283 | s0+=58.0; 284 | } else { 285 | s1+=19.0; 286 | } 287 | if(i2<16.29216957092285){ 288 | s0+=67.0; 289 | } else { 290 | s1+=10.0; 291 | } 292 | if(i1<12.681735038757324){ 293 | s0+=68.0; 294 | } else { 295 | s1+=9.0; 296 | } 297 | if(i0<8.44096565246582){ 298 | if(i1<14.05517864227295){ 299 | s0+=65.0; 300 | } else { 301 | s1+=2.0; 302 | } 303 | } else { 304 | s1+=10.0; 305 | } 306 | if(i2<16.664493560791016){ 307 | s0+=62.0; 308 | } else { 309 | s1+=15.0; 310 | } 311 | if(i1<12.681735038757324){ 312 | s0+=66.0; 313 | } else { 314 | s1+=11.0; 315 | } 316 | if(i0<8.404618263244629){ 317 | if(i0<4.987161636352539){ 318 | s0+=33.0; 319 | } else { 320 | s0+=33.0; 321 | s1+=2.0; 322 | } 323 | } else { 324 | s1+=9.0; 325 | } 326 | if(i1<13.170796394348145){ 327 | s0+=57.0; 328 | } else { 329 | s1+=20.0; 330 | } 331 | if(i0<8.404618263244629){ 332 | if(i2<17.09811782836914){ 333 | s0+=66.0; 334 | } else { 335 | s1+=1.0; 336 | } 337 | } else { 338 | s1+=10.0; 339 | } 340 | if(i1<12.681735038757324){ 341 | s0+=64.0; 342 | } else { 343 | s1+=13.0; 344 | } 345 | if(i2<17.2148380279541){ 346 | s0+=67.0; 347 | } else { 348 | s1+=10.0; 349 | } 350 | if(i1<13.532312393188477){ 351 | s0+=69.0; 352 | } else { 353 | s1+=8.0; 354 | } 355 | if(i1<12.681735038757324){ 356 | s0+=60.0; 357 | } else { 358 | s1+=17.0; 359 | } 360 | if(i1<13.167343139648438){ 361 | s0+=72.0; 362 | } else { 363 | s1+=5.0; 364 | } 365 | if(i1<12.685188293457031){ 366 | s0+=60.0; 367 | } else { 368 | s1+=17.0; 369 | } 370 | if(i0<9.130266189575195){ 371 | if(i1<12.685188293457031){ 372 | s0+=59.0; 373 | } else { 374 | s1+=5.0; 375 | } 376 | } else { 377 | s1+=13.0; 378 | } 379 | if(i1<12.648662567138672){ 380 | s0+=71.0; 381 | } else { 382 | s1+=6.0; 383 | } 384 | if(i2<16.78363609313965){ 385 | s0+=70.0; 386 | } else { 387 | s1+=7.0; 388 | } 389 | if(i2<17.141380310058594){ 390 | s0+=60.0; 391 | } else { 392 | s1+=17.0; 393 | } 394 | if(i1<12.685188293457031){ 395 | s0+=57.0; 396 | } else { 397 | s1+=20.0; 398 | } 399 | if(i0<9.130266189575195){ 400 | s0+=64.0; 401 | } else { 402 | s1+=13.0; 403 | } 404 | if(i1<13.170796394348145){ 405 | s0+=64.0; 406 | } else { 407 | s1+=13.0; 408 | } 409 | if(i0<9.09391975402832){ 410 | if(i2<16.274038314819336){ 411 | s0+=65.0; 412 | } else { 413 | s1+=1.0; 414 | } 415 | } else { 416 | s1+=11.0; 417 | } 418 | if(i2<17.15595817565918){ 419 | s0+=63.0; 420 | } else { 421 | s1+=14.0; 422 | } 423 | if(i0<8.44096565246582){ 424 | if(i1<12.685188293457031){ 425 | s0+=67.0; 426 | } else { 427 | s1+=3.0; 428 | } 429 | } else { 430 | s1+=7.0; 431 | } 432 | if(i2<16.75387954711914){ 433 | s0+=71.0; 434 | } else { 435 | s1+=6.0; 436 | } 437 | if(i1<12.685188293457031){ 438 | s0+=64.0; 439 | } else { 440 | s1+=13.0; 441 | } 442 | if(i1<12.685188293457031){ 443 | s0+=68.0; 444 | } else { 445 | s1+=9.0; 446 | } 447 | if(i2<17.470439910888672){ 448 | s0+=64.0; 449 | } else { 450 | s1+=13.0; 451 | } 452 | if(i0<9.09391975402832){ 453 | if(i2<17.09811782836914){ 454 | s0+=63.0; 455 | } else { 456 | s1+=3.0; 457 | } 458 | } else { 459 | s1+=11.0; 460 | } 461 | if(i1<13.170796394348145){ 462 | s0+=60.0; 463 | } else { 464 | s1+=17.0; 465 | } 466 | if(i1<13.532312393188477){ 467 | s0+=60.0; 468 | } else { 469 | s1+=17.0; 470 | } 471 | if(i2<16.664493560791016){ 472 | s0+=58.0; 473 | } else { 474 | s1+=19.0; 475 | } 476 | if(i0<8.404618263244629){ 477 | if(i1<12.685188293457031){ 478 | s0+=55.0; 479 | } else { 480 | s1+=7.0; 481 | } 482 | } else { 483 | s1+=15.0; 484 | } 485 | if(i1<12.685188293457031){ 486 | s0+=58.0; 487 | } else { 488 | s1+=19.0; 489 | } 490 | if(i0<11.387308120727539){ 491 | if(i2<16.664493560791016){ 492 | s0+=65.0; 493 | } else { 494 | s1+=1.0; 495 | } 496 | } else { 497 | s1+=11.0; 498 | } 499 | if(i1<13.167343139648438){ 500 | s0+=66.0; 501 | } else { 502 | s1+=11.0; 503 | } 504 | if(i1<12.685188293457031){ 505 | s0+=64.0; 506 | } else { 507 | s1+=13.0; 508 | } 509 | if(i2<17.470439910888672){ 510 | s0+=64.0; 511 | } else { 512 | s1+=13.0; 513 | } 514 | if(i1<12.681735038757324){ 515 | s0+=66.0; 516 | } else { 517 | s1+=11.0; 518 | } 519 | if(i2<16.664493560791016){ 520 | s0+=61.0; 521 | } else { 522 | s1+=16.0; 523 | } 524 | if(i2<16.664493560791016){ 525 | s0+=66.0; 526 | } else { 527 | s1+=11.0; 528 | } 529 | if(i0<8.404618263244629){ 530 | s0+=60.0; 531 | } else { 532 | s1+=17.0; 533 | } 534 | if(i1<12.685188293457031){ 535 | s0+=68.0; 536 | } else { 537 | s1+=9.0; 538 | } 539 | if(i0<8.44096565246582){ 540 | if(i1<13.170796394348145){ 541 | s0+=58.0; 542 | } else { 543 | s1+=4.0; 544 | } 545 | } else { 546 | s1+=15.0; 547 | } 548 | if(i0<8.44096565246582){ 549 | if(i0<4.987161636352539){ 550 | s0+=32.0; 551 | } else { 552 | s0+=31.0; 553 | s1+=3.0; 554 | } 555 | } else { 556 | s1+=11.0; 557 | } 558 | if(i1<12.681735038757324){ 559 | s0+=60.0; 560 | } else { 561 | s1+=17.0; 562 | } 563 | if(i0<8.380342483520508){ 564 | if(i1<12.685188293457031){ 565 | s0+=63.0; 566 | } else { 567 | s1+=5.0; 568 | } 569 | } else { 570 | s1+=9.0; 571 | } 572 | if(i2<16.29216957092285){ 573 | s0+=66.0; 574 | } else { 575 | s1+=11.0; 576 | } 577 | if(i0<8.44096565246582){ 578 | if(i0<5.162413597106934){ 579 | s0+=25.0; 580 | s1+=4.0; 581 | } else { 582 | s0+=34.0; 583 | } 584 | } else { 585 | s1+=14.0; 586 | } 587 | if(i0<9.130266189575195){ 588 | s0+=62.0; 589 | } else { 590 | s1+=15.0; 591 | } 592 | if(i1<13.862632751464844){ 593 | s0+=66.0; 594 | } else { 595 | s1+=11.0; 596 | } 597 | if(i2<16.664493560791016){ 598 | s0+=63.0; 599 | } else { 600 | s1+=14.0; 601 | } 602 | if(i2<17.15595817565918){ 603 | s0+=65.0; 604 | } else { 605 | s1+=12.0; 606 | } 607 | if(i1<12.685188293457031){ 608 | s0+=65.0; 609 | } else { 610 | s1+=12.0; 611 | } 612 | if(i2<16.664493560791016){ 613 | s0+=61.0; 614 | } else { 615 | s1+=16.0; 616 | } 617 | if(i1<13.170796394348145){ 618 | s0+=57.0; 619 | } else { 620 | s1+=20.0; 621 | } 622 | if(i1<12.681735038757324){ 623 | s0+=62.0; 624 | } else { 625 | s1+=15.0; 626 | } 627 | if(i1<12.685188293457031){ 628 | s0+=59.0; 629 | } else { 630 | s1+=18.0; 631 | } 632 | if(i1<13.134271621704102){ 633 | s0+=60.0; 634 | } else { 635 | s1+=17.0; 636 | } 637 | float max_s=s0; 638 | int cls=1; 639 | if (max_s < s1) { 640 | max_s = s1; 641 | cls=2; 642 | } 643 | WRITE_IMAGE (out, POS_out_INSTANCE(x,y,z,0), cls); 644 | } 645 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # PoL Bio-Image Analysis Training School on GPU-Accelerated Image Analysis 2 | 3 | Here, we cover the *GPU-Accelerated Image Analysis* Track of the [PoL Bio-Image Analysis Symposium](https://biopol-training.eventmember.de/) 4 | 5 | * clesperanto 6 | * cupy 7 | * dask 8 | * Deconvolution 9 | * Pytorch 10 | * AI-based Denoising 11 | * AI-based Segmentation 12 | 13 | ## License 14 | 15 | [![CC BY 4.0][cc-by-shield]][cc-by] 16 | 17 | This work is licensed by Stephane Rigaud, Brian Northan, Till Korten, Neringa Jurenaite, Apurv Deepak Kulkarni, Peter Steinbach, Sebastian Starke, Johannes Soltwedel, Marvin Albert and Robert Haase under a 18 | [Creative Commons Attribution 4.0 International License][cc-by]. 19 | 20 | [cc-by]: http://creativecommons.org/licenses/by/4.0/ 21 | [cc-by-image]: https://i.creativecommons.org/l/by/4.0/88x31.png 22 | [cc-by-shield]: https://img.shields.io/badge/License-CC%20BY%204.0-lightgrey.svg 23 | 24 | This repository hosts notebooks, information and data for the *GPU-Accelerated Image Analysis* Track of the [PoL Bio-Image Analysis Symposium](https://biopol-training.eventmember.de/). 25 | 26 | https://biapol.github.io/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/ 27 | 28 | It is maintained using [Jupyter lab](https://jupyterlab.readthedocs.io/en/stable/) and build using [Jupyter book](https://jupyterbook.org/intro.html). 29 | 30 | ## Acknowledgements 31 | 32 | This course was held in Dresden, August 2023. 33 | We would like to thank all the people who shared teaching materials we are reusing here. 34 | We acknowledge support by the Deutsche Forschungsgemeinschaft under Germany’s Excellence Strategy—EXC2068–Cluster of Excellence Physics of Life of TU Dresden. 35 | This project has been made possible in part by grant number 2021-237734 (GPU-accelerating Fiji and friends using distributed CLIJ, NEUBIAS-style, EOSS4) from the Chan Zuckerberg Initiative DAF, an advised fund of the Silicon Valley Community Foundation. 36 | 37 | -------------------------------------------------------------------------------- /build.sh: -------------------------------------------------------------------------------- 1 | jupyter-book build docs/ 2 | 3 | open docs/_build/html/index.html 4 | 5 | -------------------------------------------------------------------------------- /clean.sh: -------------------------------------------------------------------------------- 1 | rm -rf build docs/_build 2 | -------------------------------------------------------------------------------- /data/Haase_MRT_tfl3d1.tif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/2ed77a9bb176161cc20045c791a8a00e2e6b917a/data/Haase_MRT_tfl3d1.tif -------------------------------------------------------------------------------- /data/IXMtest_A02_s9.tif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/2ed77a9bb176161cc20045c791a8a00e2e6b917a/data/IXMtest_A02_s9.tif -------------------------------------------------------------------------------- /data/IXMtest_A02_s9_image_data_source.txt: -------------------------------------------------------------------------------- 1 | This is a crop of the IXMtest_A02_s9_*.tif image, which is part of BBBC022_v1_images_20585w1.zip, BBBC022_v1_images_20585w2.zip and BBBC022_v1_images_20585w3.zip 2 | 3 | We used image set BBBC022v1 [Gustafsdottir et al., PLOS ONE, 2013], available from the Broad Bioimage Benchmark Collection [Ljosa et al., Nature Methods, 2012]. 4 | 5 | CC0 6 | 7 | To the extent possible under law, Anne Carpenter has waived all copyright and related or neighboring rights to BBBC022v1. This work is published from: United States. 8 | 9 | https://bbbc.broadinstitute.org/BBBC022 -------------------------------------------------------------------------------- /data/blobs.tif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/2ed77a9bb176161cc20045c791a8a00e2e6b917a/data/blobs.tif -------------------------------------------------------------------------------- /docs/00_course_preparation/Readme.md: -------------------------------------------------------------------------------- 1 | # Course preparation 2 | Before attending the course, please install mambaforge on your computer as explained in this [blog post](https://biapol.github.io/blog/mara_lampert/getting_started_with_mambaforge_and_python/readme.html). 3 | 4 | If you already have some conda or anaconda installation on your computer, ***please install mambaforge*** anyway as explained in the blog post linked above. 5 | 6 | Furthermore, please install [devbio-napari](https://github.com/haesleinhuepf/devbio-napari#installation) into a fresh conda environment, e.g. using this command: 7 | 8 | ``` 9 | mamba create --name devbio-napari-env python=3.9 devbio-napari pyqt -c conda-forge 10 | ``` 11 | 12 | When you are done, you can test your setup by executing these commands from the command line: 13 | ``` 14 | conda activate devbio-napari-env 15 | 16 | naparia 17 | ``` 18 | 19 | After Napari opens, click the menu `File > Open Samples > clEsperanto > blobs (from ImageJ)`. In the Panel on the right click on the `Label` button. If Napari then looks like this, you are ready to go: 20 | 21 | ![img.png](img.png) 22 | 23 | ## NVidia graphics cards 24 | 25 | If you have access to an NVidia graphics card, please also set up this conda environment: 26 | 27 | ``` 28 | mamba create --name cupy39 python=3.9 devbio-napari pyqt cupy cudatoolkit napari-cupy-image-processing -c conda-forge 29 | ``` 30 | 31 | ## Access to the ZIH HPC System 32 | 33 | Please follow the instructions outlined here to set up your access to the ZIH HPC System in advance of the training. 34 | 35 | ### VPN - for all users (Linux, Win, MacOS) 36 | The ZIH HPC system can be accessed only via VPN (or within the campus data net). 37 | 38 | Please, configure the VPN access as follows: 39 | ● [Linux users](https://tu-dresden.de/zih/dienste/service-katalog/arbeitsumgebung/zugang_datennetz/vpn/openvpn/linux) 40 | ● [Windows users](https://tu-dresden.de/zih/dienste/service-katalog/arbeitsumgebung/zugang_datennetz/vpn/openvpn/windows) 41 | ● [MacOS](https://tu-dresden.de/zih/dienste/service-katalog/arbeitsumgebung/zugang_datennetz/vpn/openvpn/macos) 42 | 43 | Please use the credentials that have been provided to you, esp. the concrete username. 44 | 45 | ### ssh connection (Linux, MacOS) 46 | Linux and MacOS users: Please check whether you can access the ZIH HPC system. Make sure you have established a VPN connection! 47 | 1) Open a console and type in the following (change scadsXXX to your username) 48 | ``` 49 | ssh scadsXXX@taurus.hrsk.tu-dresden.de 50 | ``` 51 | 52 | ### ssh connection (Windows) 53 | Windows users need an ssh client for connecting to the ZIH HPC system. 54 | 0) we recommend using MobaXterm 55 | 1) download the home edition (free) of [MobaXterm here](https://mobaxterm.mobatek.net/download.html) 56 | 2) Install the client to your machine. A short demo on MobaXterm can be found [here](https://mobaxterm.mobatek.net/demo.html) 57 | 3) After starting MobXterm an ssh connection to the ZIH HPC system can be established using the credentials as for VPN (for connection 58 | use remote host: taurus.hrsk.tu-dresden.de): 59 | 60 | Hint: make sure to have the VPN connection established! 61 | 62 | ## Troubleshooting: DLL load failed 63 | 64 | In case of error messages such as this one: 65 | ``` 66 | [...] _get_win_folder_with_pywin32 67 | from win32com.shell import shellcon, shell 68 | ImportError: DLL load failed while importing shell: The specified procedure could not be found. 69 | ``` 70 | 71 | Try this command, within the base environment: 72 | 73 | ``` 74 | conda activate base 75 | 76 | pip install --upgrade pywin32==228 77 | ``` 78 | 79 | [Source](https://github.com/conda/conda/issues/11503) 80 | 81 | ## Troubleshooting: Graphics cards drivers 82 | 83 | In case error messages contains "ImportError: DLL load failed while importing cl: The specified procedure could not be found" [see also](https://github.com/clEsperanto/pyclesperanto_prototype/issues/55) or ""clGetPlatformIDs failed: PLATFORM_NOT_FOUND_KHR", please install recent drivers for your graphics card and/or OpenCL device. Select the right driver source depending on your hardware from this list: 84 | 85 | * [AMD drivers](https://www.amd.com/en/support) 86 | * [NVidia drivers](https://www.nvidia.com/download/index.aspx) 87 | * [Intel GPU drivers]()(https://www.intel.com/content/www/us/en/download/726609/intel-arc-graphics-windows-dch-driver.html) 88 | * [Intel CPU OpenCL drivers](https://www.intel.com/content/www/us/en/developer/articles/tool/opencl-drivers.html#latest_CPU_runtime) 89 | * [Microsoft Windows OpenCL support](https://www.microsoft.com/en-us/p/opencl-and-opengl-compatibility-pack/9nqpsl29bfff) 90 | 91 | Sometimes, mac-users need to install this: 92 | 93 | mamba install -c conda-forge ocl_icd_wrapper_apple 94 | 95 | Sometimes, linux users need to install this: 96 | 97 | mamba install -c conda-forge ocl-icd-system 98 | 99 | In case installation didn't work in the first attempt, you may have to call this command line to reset the napari configuration: 100 | 101 | ``` 102 | napari --reset 103 | ``` 104 | 105 | ## Online support 106 | 107 | In case you run into any trouble while installing software, please create a thread on [https://image.sc](https://image.sc) and tag @haesleinhuepf 108 | -------------------------------------------------------------------------------- /docs/00_course_preparation/img.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/2ed77a9bb176161cc20045c791a8a00e2e6b917a/docs/00_course_preparation/img.png -------------------------------------------------------------------------------- /docs/10_Clesperanto/10_select_devices.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "fa467b9c", 6 | "metadata": {}, 7 | "source": [ 8 | "# List and select devices\n", 9 | "\n", 10 | "pyclesperanto relies on sending computing instructions to compute devices. Here, devices are Computational Units (CUs) compatible with `OpenCL`. This can be GPUs and CPUs although we would prefer to use GPUs for their speed capacities.\n", 11 | "\n", 12 | "Before starting to use pyclesperanto, it is important to know which devices are available on your system and which one you want to use based on their performances." 13 | ] 14 | }, 15 | { 16 | "cell_type": "code", 17 | "execution_count": 10, 18 | "id": "69c32066", 19 | "metadata": {}, 20 | "outputs": [], 21 | "source": [ 22 | "import pyclesperanto_prototype as cle" 23 | ] 24 | }, 25 | { 26 | "cell_type": "markdown", 27 | "id": "52f3036a-a773-46df-987f-7a68c2a9cac2", 28 | "metadata": {}, 29 | "source": [ 30 | "### Exercice 1: List devices and select a device\n", 31 | "\n", 32 | "Using the method `available_device_names()` and `select_device()` from the `pyclesperanto_prototype` package, list the available devices on your machine and select one." 33 | ] 34 | }, 35 | { 36 | "cell_type": "code", 37 | "execution_count": 11, 38 | "id": "1922cc9a-556c-414e-ade1-be8c517c209b", 39 | "metadata": {}, 40 | "outputs": [ 41 | { 42 | "name": "stdout", 43 | "output_type": "stream", 44 | "text": [ 45 | "2 devices found:\n", 46 | "['pthread-Intel(R) Core(TM) i7-7820X CPU @ 3.60GHz', 'NVIDIA GeForce RTX 2080 SUPER']\n" 47 | ] 48 | } 49 | ], 50 | "source": [ 51 | "# TODO" 52 | ] 53 | }, 54 | { 55 | "cell_type": "code", 56 | "execution_count": 12, 57 | "id": "dc4eaf76-f528-4256-a611-02ea3053bb73", 58 | "metadata": {}, 59 | "outputs": [ 60 | { 61 | "data": { 62 | "text/plain": [ 63 | "" 64 | ] 65 | }, 66 | "execution_count": 12, 67 | "metadata": {}, 68 | "output_type": "execute_result" 69 | } 70 | ], 71 | "source": [ 72 | "# TODO" 73 | ] 74 | }, 75 | { 76 | "cell_type": "markdown", 77 | "id": "a91eb56e", 78 | "metadata": {}, 79 | "source": [ 80 | "__Tips:__ Devices are defined by a `name` and a `dev_type` (gpu, cpu, all). You can use the `select_device` arguments to precisely select the device you want to use. This can be useful if you have multiple devices of the same type but not the same type (Hello Macbook Pro with M1 and M2 chips !)." 81 | ] 82 | }, 83 | { 84 | "cell_type": "markdown", 85 | "id": "274a322e", 86 | "metadata": {}, 87 | "source": [ 88 | "## Exercice 2: Which device to choose?\n", 89 | "\n", 90 | "For the lucky of you who have access to multiple devices, you might wonder which one to choose. Here are some hints:\n", 91 | "- Prefer GPU over CPU for speed\n", 92 | "- Prefer GPU with more memory over GPU with less memory (*`GLOBAL_MEM_SIZE`*, *`MAX_MEM_ALLOC_SIZE`*)\n", 93 | "- Prefer GPU with more or faster compute units (*`MAX_COMPUTE_UNITS`*, *`MAX_CLOCK_FREQUENCY`*)\n", 94 | "\n", 95 | "Those information can be retrieve using the `cl_info` method to print the __full__ information of all the devices available.\n", 96 | "\n", 97 | "Print them and tell us which one, in your opinion, is the most adapted.\n" 98 | ] 99 | }, 100 | { 101 | "cell_type": "code", 102 | "execution_count": 13, 103 | "id": "0c0d5c20", 104 | "metadata": {}, 105 | "outputs": [], 106 | "source": [ 107 | "# TODO" 108 | ] 109 | }, 110 | { 111 | "cell_type": "markdown", 112 | "id": "e6416521", 113 | "metadata": {}, 114 | "source": [ 115 | "__Tips:__ you may want to save this into a text file\n", 116 | "```python\n", 117 | "with open(\"cl_info.txt\", \"w\") as f:\n", 118 | " f.write( ... )\n", 119 | "```" 120 | ] 121 | } 122 | ], 123 | "metadata": { 124 | "kernelspec": { 125 | "display_name": "Python 3 (ipykernel)", 126 | "language": "python", 127 | "name": "python3" 128 | }, 129 | "language_info": { 130 | "codemirror_mode": { 131 | "name": "ipython", 132 | "version": 3 133 | }, 134 | "file_extension": ".py", 135 | "mimetype": "text/x-python", 136 | "name": "python", 137 | "nbconvert_exporter": "python", 138 | "pygments_lexer": "ipython3", 139 | "version": "3.9.17" 140 | } 141 | }, 142 | "nbformat": 4, 143 | "nbformat_minor": 5 144 | } 145 | -------------------------------------------------------------------------------- /docs/10_Clesperanto/clesperanto-polbias-course.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/2ed77a9bb176161cc20045c791a8a00e2e6b917a/docs/10_Clesperanto/clesperanto-polbias-course.pdf -------------------------------------------------------------------------------- /docs/10_Clesperanto/maximum_z_projection.cl: -------------------------------------------------------------------------------- 1 | // the sample define the GPU behaviour when accessing pixels outside of the image 2 | __constant sampler_t sampler = CLK_NORMALIZED_COORDS_FALSE | CLK_ADDRESS_CLAMP_TO_EDGE | CLK_FILTER_NEAREST; 3 | 4 | // the kernel function itself, with the parameters 5 | __kernel void maximum_z_projection( 6 | IMAGE_src_TYPE src, // the input image, named 'src' 7 | IMAGE_dst_TYPE dst // the output image, named 'dst' 8 | ) 9 | { 10 | const int x = get_global_id(0); // the x coordinate of the current pixel, provided by the GPU thread 11 | const int y = get_global_id(1); // the y coordinate of the current pixel, provided by the GPU thread 12 | 13 | IMAGE_src_PIXEL_TYPE max = 0; 14 | for (int z = 0; z < GET_IMAGE_DEPTH(src); ++z) // loop over all z-axis range of the image 15 | { 16 | // read the pixel value at the current position (x,y,z) from the input image 'src' 17 | const IMAGE_src_PIXEL_TYPE value = READ_IMAGE(src, sampler, POS_src_INSTANCE(x,y,z,0)).x; 18 | if (value > max || z == 0) { // conditional statement to find the maximum value 19 | max = value; 20 | } 21 | } 22 | 23 | // write the pixel value at the current position (x,y,z) of the output image 'dst' 24 | WRITE_IMAGE(dst, POS_dst_INSTANCE(x,y,0,0), CONVERT_dst_PIXEL_TYPE(max)); 25 | } -------------------------------------------------------------------------------- /docs/10_Clesperanto/readme.md: -------------------------------------------------------------------------------- 1 | # Clesperanto 2 | 3 | This regroup a collections of introduction notebook for the cours on the py-Clesperanto librairy for image processing. 4 | 5 | ## Installation instruction for pyclesperanto-prototype 6 | 7 | First create a virtual environment: 8 | 9 | ```bash 10 | mamba create -n myenv python=3.9 11 | mamba activate myenv 12 | ``` 13 | Note: 14 | - You can replace `myenv` by the name of your choice. 15 | - You can replace `python=3.9` by the version of python you want to use. 16 | 17 | Then install the packages: 18 | ```bash 19 | mamba install -c conda-forge pyclesperanto-prototype 20 | ``` 21 | or using pip: 22 | ```bash 23 | pip install pyclesperanto-prototype 24 | ``` 25 | 26 | ### Troubleshooting 27 | 28 | #### MacOs 29 | ```bash 30 | mamba install -c conda-forge ocl_icd_wrapper_apple 31 | ``` 32 | #### Linux 33 | ```bash 34 | mamba install -c conda-forge ocl-icd-system 35 | ``` 36 | #### No OpenCL device compatible (no GPUs found) 37 | ```bash 38 | mamba install oclgrind -c conda-forge 39 | ``` 40 | OR 41 | ```bash 42 | mamba install oclgrind -c conda-forge 43 | ``` 44 | #### Other issues? 45 | Please go have a look [here](https://github.com/clEsperanto/pyclesperanto_prototype/issues) and call for help if possible. 46 | 47 | 48 | 49 | ## Installation instruction for pyclesperanto 50 | 51 | __WARNING__: this is still under development, but we are looking for testers and people to start using it for improvement and feedbacks. 52 | 53 | First create a virtual environment: 54 | 55 | ```bash 56 | mamba create -n myenv 57 | mamba activate myenv 58 | ``` 59 | 60 | Then install the packages: 61 | ```bash 62 | pip install pyclesperanto 63 | ``` 64 | 65 | Note: no mamba recipe is available yet. 66 | 67 | ### Notebook update 68 | 69 | If you want to try the ongoing development version of the librairy with these notebooks, you will have to update the `import` line of the library and remove the `_prototype` to: 70 | ```python 71 | import pyclesperanto as cle 72 | ``` -------------------------------------------------------------------------------- /docs/23_clesperanto_assistant/Intro_Napari_Assistant.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/2ed77a9bb176161cc20045c791a8a00e2e6b917a/docs/23_clesperanto_assistant/Intro_Napari_Assistant.pdf -------------------------------------------------------------------------------- /docs/23_clesperanto_assistant/images/export_notebooks.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/2ed77a9bb176161cc20045c791a8a00e2e6b917a/docs/23_clesperanto_assistant/images/export_notebooks.mp4 -------------------------------------------------------------------------------- /docs/23_clesperanto_assistant/images/export_notebooks01.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/2ed77a9bb176161cc20045c791a8a00e2e6b917a/docs/23_clesperanto_assistant/images/export_notebooks01.jpg -------------------------------------------------------------------------------- /docs/23_clesperanto_assistant/images/export_notebooks02.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/2ed77a9bb176161cc20045c791a8a00e2e6b917a/docs/23_clesperanto_assistant/images/export_notebooks02.jpg -------------------------------------------------------------------------------- /docs/23_clesperanto_assistant/images/export_notebooks03.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/2ed77a9bb176161cc20045c791a8a00e2e6b917a/docs/23_clesperanto_assistant/images/export_notebooks03.jpg -------------------------------------------------------------------------------- /docs/23_clesperanto_assistant/images/export_notebooks04.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/2ed77a9bb176161cc20045c791a8a00e2e6b917a/docs/23_clesperanto_assistant/images/export_notebooks04.jpg -------------------------------------------------------------------------------- /docs/23_clesperanto_assistant/images/export_notebooks05.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/2ed77a9bb176161cc20045c791a8a00e2e6b917a/docs/23_clesperanto_assistant/images/export_notebooks05.jpg -------------------------------------------------------------------------------- /docs/23_clesperanto_assistant/images/export_notebooks06.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/2ed77a9bb176161cc20045c791a8a00e2e6b917a/docs/23_clesperanto_assistant/images/export_notebooks06.jpg -------------------------------------------------------------------------------- /docs/23_clesperanto_assistant/images/export_notebooks07.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/2ed77a9bb176161cc20045c791a8a00e2e6b917a/docs/23_clesperanto_assistant/images/export_notebooks07.jpg -------------------------------------------------------------------------------- /docs/23_clesperanto_assistant/images/export_notebooks08.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/2ed77a9bb176161cc20045c791a8a00e2e6b917a/docs/23_clesperanto_assistant/images/export_notebooks08.jpg -------------------------------------------------------------------------------- /docs/23_clesperanto_assistant/images/export_notebooks09.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/2ed77a9bb176161cc20045c791a8a00e2e6b917a/docs/23_clesperanto_assistant/images/export_notebooks09.jpg -------------------------------------------------------------------------------- /docs/23_clesperanto_assistant/images/filter_clesperanto.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/2ed77a9bb176161cc20045c791a8a00e2e6b917a/docs/23_clesperanto_assistant/images/filter_clesperanto.png -------------------------------------------------------------------------------- /docs/23_clesperanto_assistant/images/hela_cells_screenshot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/2ed77a9bb176161cc20045c791a8a00e2e6b917a/docs/23_clesperanto_assistant/images/hela_cells_screenshot.png -------------------------------------------------------------------------------- /docs/23_clesperanto_assistant/images/install_package01.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/2ed77a9bb176161cc20045c791a8a00e2e6b917a/docs/23_clesperanto_assistant/images/install_package01.png -------------------------------------------------------------------------------- /docs/23_clesperanto_assistant/images/napari-assistant.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/2ed77a9bb176161cc20045c791a8a00e2e6b917a/docs/23_clesperanto_assistant/images/napari-assistant.mp4 -------------------------------------------------------------------------------- /docs/23_clesperanto_assistant/images/napari-assistant01.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/2ed77a9bb176161cc20045c791a8a00e2e6b917a/docs/23_clesperanto_assistant/images/napari-assistant01.jpg -------------------------------------------------------------------------------- /docs/23_clesperanto_assistant/images/napari-assistant02.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/2ed77a9bb176161cc20045c791a8a00e2e6b917a/docs/23_clesperanto_assistant/images/napari-assistant02.jpg -------------------------------------------------------------------------------- /docs/23_clesperanto_assistant/images/napari-assistant03.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/2ed77a9bb176161cc20045c791a8a00e2e6b917a/docs/23_clesperanto_assistant/images/napari-assistant03.jpg -------------------------------------------------------------------------------- /docs/23_clesperanto_assistant/images/napari-assistant04.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/2ed77a9bb176161cc20045c791a8a00e2e6b917a/docs/23_clesperanto_assistant/images/napari-assistant04.jpg -------------------------------------------------------------------------------- /docs/23_clesperanto_assistant/images/napari-assistant05.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/2ed77a9bb176161cc20045c791a8a00e2e6b917a/docs/23_clesperanto_assistant/images/napari-assistant05.jpg -------------------------------------------------------------------------------- /docs/23_clesperanto_assistant/images/napari-assistant06.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/2ed77a9bb176161cc20045c791a8a00e2e6b917a/docs/23_clesperanto_assistant/images/napari-assistant06.jpg -------------------------------------------------------------------------------- /docs/23_clesperanto_assistant/images/napari-assistant07.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/2ed77a9bb176161cc20045c791a8a00e2e6b917a/docs/23_clesperanto_assistant/images/napari-assistant07.jpg -------------------------------------------------------------------------------- /docs/23_clesperanto_assistant/images/napari-assistant08.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/2ed77a9bb176161cc20045c791a8a00e2e6b917a/docs/23_clesperanto_assistant/images/napari-assistant08.jpg -------------------------------------------------------------------------------- /docs/23_clesperanto_assistant/images/napari-assistant09.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/2ed77a9bb176161cc20045c791a8a00e2e6b917a/docs/23_clesperanto_assistant/images/napari-assistant09.jpg -------------------------------------------------------------------------------- /docs/23_clesperanto_assistant/images/napari-assistant10.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/2ed77a9bb176161cc20045c791a8a00e2e6b917a/docs/23_clesperanto_assistant/images/napari-assistant10.jpg -------------------------------------------------------------------------------- /docs/23_clesperanto_assistant/images/napari-assistant11.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/2ed77a9bb176161cc20045c791a8a00e2e6b917a/docs/23_clesperanto_assistant/images/napari-assistant11.jpg -------------------------------------------------------------------------------- /docs/23_clesperanto_assistant/images/napari-assistant12.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/2ed77a9bb176161cc20045c791a8a00e2e6b917a/docs/23_clesperanto_assistant/images/napari-assistant12.jpg -------------------------------------------------------------------------------- /docs/23_clesperanto_assistant/images/napari-assistant13.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/2ed77a9bb176161cc20045c791a8a00e2e6b917a/docs/23_clesperanto_assistant/images/napari-assistant13.jpg -------------------------------------------------------------------------------- /docs/23_clesperanto_assistant/images/napari-assistant14.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/2ed77a9bb176161cc20045c791a8a00e2e6b917a/docs/23_clesperanto_assistant/images/napari-assistant14.jpg -------------------------------------------------------------------------------- /docs/23_clesperanto_assistant/images/napari-assistant15.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/2ed77a9bb176161cc20045c791a8a00e2e6b917a/docs/23_clesperanto_assistant/images/napari-assistant15.jpg -------------------------------------------------------------------------------- /docs/23_clesperanto_assistant/images/napari-assistant16.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/2ed77a9bb176161cc20045c791a8a00e2e6b917a/docs/23_clesperanto_assistant/images/napari-assistant16.jpg -------------------------------------------------------------------------------- /docs/23_clesperanto_assistant/images/napari-assistant17.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/2ed77a9bb176161cc20045c791a8a00e2e6b917a/docs/23_clesperanto_assistant/images/napari-assistant17.jpg -------------------------------------------------------------------------------- /docs/23_clesperanto_assistant/images/napari-assistant18.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/2ed77a9bb176161cc20045c791a8a00e2e6b917a/docs/23_clesperanto_assistant/images/napari-assistant18.jpg -------------------------------------------------------------------------------- /docs/23_clesperanto_assistant/images/napari-assistant1_.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/2ed77a9bb176161cc20045c791a8a00e2e6b917a/docs/23_clesperanto_assistant/images/napari-assistant1_.jpg -------------------------------------------------------------------------------- /docs/23_clesperanto_assistant/intro.md: -------------------------------------------------------------------------------- 1 | # Using clesperanto from the Napari Assistant 2 | 3 | In this section we will use the [napari-assistant](https://github.com/haesleinhuepf/napari-assistant), a Napari plugin that allows you to construct GPU-accelerated image processing workflows interactively and generate Jupyter Notebooks from them. 4 | 5 | Download the slides: 6 | * [Introduction to the Napari Assistant](https://github.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/tree/main/docs/23_clesperanto_assistant/Intro_Napari_Assistant.pdf) 7 | -------------------------------------------------------------------------------- /docs/23_clesperanto_assistant/napari-assistant.md: -------------------------------------------------------------------------------- 1 | # The Napari Assistant 2 | 3 | The Napari Assistant is a plugin for napari that allows you setting up an image processing workflow. 4 | 5 | This tutorial is also available as video [napari-assistant.mp4](images/napari-assistant.mp4). 6 | 7 | Start napari from the command line like this: 8 | 9 | ```bash 10 | conda activate my_first_env 11 | 12 | napari 13 | ``` 14 | 15 | ![](images/napari-assistant01.jpg) 16 | 17 | The napari window will open. Click on the menu `File > Open Samples Cells(3D+2Ch)` to open an example image. 18 | 19 | ![](images/napari-assistant02.jpg) 20 | 21 | ![](images/napari-assistant03.jpg) 22 | 23 | You can explore this dataset by clicking on the `2D/3D` view button. 24 | 25 | ![](images/napari-assistant04.jpg) 26 | 27 | Start the Napari Assistant from the `Tools > Utilities > Assistant (na)` menu. 28 | 29 | ![](images/napari-assistant05.jpg) 30 | 31 | Enter "clesperanto" in the search field to only show functions from the clesperanto library. 32 | 33 | ![](images/filter_clesperanto.png) 34 | 35 | Within the `Assistant` panel, click on the `Remove noise` button. 36 | 37 | ![](images/napari-assistant06.jpg) 38 | 39 | Click on the `Eye` buttons in the layer list to hide the original image and show the result of the `Remove noise` step only. 40 | 41 | ![](images/napari-assistant07.jpg) 42 | 43 | Click on the `Binarize` button in the Assistant panel to add a new step to the workflow that generates a binary image from the current layer. 44 | 45 | ![](images/napari-assistant08.jpg) 46 | 47 | Toggle 2D/3D view and layer visibility to explore the result of the `Binarize` step. 48 | 49 | ![](images/napari-assistant09.jpg) 50 | 51 | After switching back to 2D view, click the `Label` button in the Assistant and choose the operation `Connected component labeling (clEsperanto)`. 52 | 53 | ![](images/napari-assistant11.jpg) 54 | 55 | Select the `Result of gaussian_blur` layer in the layer list and modify its `sigma` parameters. You will note that the subsequent steps (Threshold Otsu and Connected Component Labeling) are also updated. 56 | 57 | ![](images/napari-assistant12.jpg) 58 | 59 | Switch to grid view, show all layers using their `Eye` buttons and continue modifying the parameters. 60 | 61 | ![](images/napari-assistant13.jpg) 62 | 63 | ![](images/napari-assistant14.jpg) 64 | 65 | Close all layers except `nuclei` and `membrane`. 66 | 67 | ![](images/napari-assistant15.jpg) 68 | 69 | Turn of Gridview and click again on the `Label` button in the Assistant. 70 | 71 | ![](images/napari-assistant16.jpg) 72 | 73 | This time, do not change the operation but the `spot_sigma` parameter instead. 74 | 75 | ![](images/napari-assistant17.jpg) 76 | 77 | Toggle again to 3D view and inspect the result of this single step. 78 | 79 | ![](images/napari-assistant18.jpg) 80 | 81 | 82 | 83 | 84 | 85 | 86 | 87 | -------------------------------------------------------------------------------- /docs/23_clesperanto_assistant/notebook_export.md: -------------------------------------------------------------------------------- 1 | # Generating Jupyter Notebooks from the Napari Assistant 2 | 3 | After setting up a workflow using the Napari Assistant, we can export Python code, e.g. as Jupyter Notebook. 4 | 5 | This tutorial is also available as video [export_notebooks.mp4](images/export_notebooks.mp4) 6 | 7 | In the Assistant panel, click on the `Generate Code...` button and the `Export Jupyter Notebook using Napari` menu. 8 | 9 | ![](images/export_notebooks01.jpg) 10 | 11 | Jupyter lab will open and ask you to select a Kernel. Keep the default option and click on `Select`. 12 | 13 | ![](images/export_notebooks02.jpg) 14 | 15 | When executing the notebook, errors may appear, e.g. when loading the data. 16 | 17 | ![](images/export_notebooks03.jpg) 18 | 19 | Scroll down to the end of the error message to read what didn't work. 20 | 21 | ![](images/export_notebooks04.jpg) 22 | 23 | Scoll up again to the notebook cell that didn't work and modify the code so that it uses the `imread` function to load the image from disc. 24 | 25 | ![](images/export_notebooks05.jpg) 26 | 27 | In case you don't have the `nuclei` image saved on disc already, use the menu `File > Save current Layer(s)` to save the `nuclei` layer as `.tif` file. 28 | 29 | ![](images/export_notebooks06.jpg) 30 | 31 | Afterwards, rerun the notebook and inspect the result. The Napari viewer that opened in the background will also be shown within the notebook. 32 | 33 | ![](images/export_notebooks07.jpg) 34 | 35 | If you also want to view the raw image data together with the segmentation result, add these lines to your code: 36 | 37 | ```python 38 | viewer.add_image(image0_n) 39 | napari.utils.nbscreenshot(viewer) 40 | ``` 41 | 42 | ![](images/export_notebooks08.jpg) 43 | 44 | Rerun the notebook, or modify the order of layers manually in the Napari viewer. At the end it should look like this. 45 | 46 | ![](images/export_notebooks09.jpg) 47 | 48 | Voila! You have now generated a Jupyter Notebook from a Napari Assistant workflow. This notebook documents your work reproducibly and can be shared with others. 49 | -------------------------------------------------------------------------------- /docs/25_cupy/colab_runtime.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/2ed77a9bb176161cc20045c791a8a00e2e6b917a/docs/25_cupy/colab_runtime.png -------------------------------------------------------------------------------- /docs/25_cupy/cupy.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/2ed77a9bb176161cc20045c791a8a00e2e6b917a/docs/25_cupy/cupy.pdf -------------------------------------------------------------------------------- /docs/25_cupy/napari-assistant-cupy.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/2ed77a9bb176161cc20045c791a8a00e2e6b917a/docs/25_cupy/napari-assistant-cupy.png -------------------------------------------------------------------------------- /docs/25_cupy/napari-assistant-generated-cupy-notebook.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": null, 6 | "id": "6aeccf6b", 7 | "metadata": { 8 | "lines_to_next_cell": 2 9 | }, 10 | "outputs": [], 11 | "source": [ 12 | "from skimage.io import imread\n", 13 | "import napari_cupy_image_processing as ncupy # version 0.4.1" 14 | ] 15 | }, 16 | { 17 | "cell_type": "markdown", 18 | "id": "dbf02833", 19 | "metadata": {}, 20 | "source": [ 21 | "## Loading 'blobs'" 22 | ] 23 | }, 24 | { 25 | "cell_type": "code", 26 | "execution_count": null, 27 | "id": "46077aea", 28 | "metadata": { 29 | "lines_to_next_cell": 2 30 | }, 31 | "outputs": [], 32 | "source": [ 33 | "image0_b = imread(\n", 34 | " \"C:/Users/haase/mambaforge/envs/cupy39_1/lib/site-packages/napari_pyclesperanto_assistant/data/blobs.tif\")\n", 35 | "image0_b" 36 | ] 37 | }, 38 | { 39 | "cell_type": "markdown", 40 | "id": "0f5b9244", 41 | "metadata": {}, 42 | "source": [ 43 | "## gaussian filter" 44 | ] 45 | }, 46 | { 47 | "cell_type": "code", 48 | "execution_count": null, 49 | "id": "c908a03d", 50 | "metadata": {}, 51 | "outputs": [], 52 | "source": [ 53 | "image1_G = ncupy.gaussian_filter(image0_b, 1.0)\n", 54 | "image1_G" 55 | ] 56 | }, 57 | { 58 | "cell_type": "markdown", 59 | "id": "f481ef57", 60 | "metadata": {}, 61 | "source": [ 62 | "## threshold otsu" 63 | ] 64 | }, 65 | { 66 | "cell_type": "code", 67 | "execution_count": null, 68 | "id": "678ca612", 69 | "metadata": {}, 70 | "outputs": [], 71 | "source": [ 72 | "image2_T = ncupy.threshold_otsu(image1_G)\n", 73 | "image2_T" 74 | ] 75 | }, 76 | { 77 | "cell_type": "markdown", 78 | "id": "a238fcbf", 79 | "metadata": {}, 80 | "source": [ 81 | "## label" 82 | ] 83 | }, 84 | { 85 | "cell_type": "code", 86 | "execution_count": null, 87 | "id": "11b5c893", 88 | "metadata": {}, 89 | "outputs": [], 90 | "source": [ 91 | "image3_C = ncupy.label(image2_T)\n", 92 | "image3_C" 93 | ] 94 | } 95 | ], 96 | "metadata": { 97 | "jupytext": { 98 | "cell_metadata_filter": "-all", 99 | "main_language": "python", 100 | "notebook_metadata_filter": "-all" 101 | } 102 | }, 103 | "nbformat": 4, 104 | "nbformat_minor": 5 105 | } -------------------------------------------------------------------------------- /docs/25_cupy/readme.md: -------------------------------------------------------------------------------- 1 | # cupy 2 | In this session we will focus on image processing using [cupy](https://docs.cupy.dev/) a library that makes processing of images on [CUDA](https://developer.nvidia.com/cuda-toolkit)-compatible NVidia graphics cards available from Python. 3 | 4 | [Download slides](https://github.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/tree/main/docs/25_cupy/cupy.pdf) 5 | 6 | To get started, we need to install cupy, e.g. like this: 7 | ``` 8 | mamba create --name cupy39 python=3.9 devbio-napari pyqt cupy cudatoolkit napari-cupy-image-processing -c conda-forge 9 | ``` 10 | 11 | Afterwards, we can activate the environment and start jupyter lab: 12 | ```bash 13 | mamba create --name cupy39 14 | ``` 15 | 16 | ```bash 17 | jupyter lab 18 | ``` 19 | 20 | In case your computer does not have an NVidia graphics card, you can follow the exercises on [Google colab](https://colab.research.google.com/?utm_source=scs-index), where cupy is commonly installed. 21 | 22 | You can directly load notebooks there by entering the name of the `` in this URL: 23 | ``` 24 | https://colab.research.google.com/github/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/blob/main/docs/25_cupy/.ipynb 25 | ``` 26 | 27 | For example [https://colab.research.google.com/github/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/blob/main/docs/25_cupy/10_basics.ipynb](https://colab.research.google.com/github/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/blob/main/docs/25_cupy/10_basics.ipynb) 28 | 29 | When working with Google colab, you may have to install packages in your kernel, such as [stackview](https://github.com/haesleinhuepf/stackview): 30 | 31 | ``` 32 | !pip install stackview ipycanvas==0.11 33 | ``` 34 | 35 | Make sure to select a GPU-runtime from the menu `Runtime > Change runtime type` ([read more](https://biapol.github.io/blog/robert_haase/cupy_cucim/readme.html)) 36 | 37 | ![](colab_runtime.png) 38 | 39 | -------------------------------------------------------------------------------- /docs/30_Deconvolution/0_intro_to_decon.md: -------------------------------------------------------------------------------- 1 | # Intro to Deconvolution and Restoration 2 | 3 | ## What is Deconvolution? 4 | 5 | * A procedure used to reverse convolution 6 | * Convolution (blurring) -> Deconvolution (reverse blur) 7 | * Also need to consider noise 8 | 9 | Deconvolution is a type of image restoration which attempts to restore the true signal in the presence of blur and noise. 10 | 11 | The problem is ill-poised (information is lost thus cannot restore original completely) 12 | 13 | ## What is the Point Spread Function (PSF) ? 14 | 15 | * Describes response of imaging system to a point like object. 16 | * PSF is the basic unit of Image Formation. 17 | * A lens does not focus light as a point but rather as a difraction pattern in the lateral plane. 18 | * The 3D PSF of widefield instruments are hour glass when viewed in the z plane. 19 | * The 3D PSF of confocal instruments are football shaped when viewed in the z plane. 20 | 21 | ![](images/PSF_xy.jpg) 22 | ![](images/PSF_xz.jpg) 23 | 24 | ### Variable Point Spread Function 25 | 26 | * The PSF varies both axially and laterally because of aberrations. 27 | * Examples in this session use a stationary PSF (approximation) 28 | * Good idea to image a field of beads to evaluate how much the PSF varies 29 | * If varying too much can process in blocks and interpolate (approximation) 30 | * Can modify deconvolution equations to consider variable PSF (Preza, C. and Conchello, J-A., "Depth-variant maximum-likelihood restoration for three-dimensional fluorescence microscopy") software available [here]( https://www.memphis.edu/cirl/cosmos/index.php) 31 | 32 | 33 | ## Imaging Process 34 | 35 | * Image = Truth convolved with PSF + Noise 36 | 37 | Having a means to run a forward imaging model is important for 38 | 39 | 1. Creating simulations to test deconvolution 40 | 2. Creating simulations to train deep learning restoration systems. 41 | 42 | ![](images/sphere_xy.jpg) 43 | ![](images/sphere_convolved_xy.jpg) 44 | ![](images/sphere_convolved_noise_xy.jpg) 45 | 46 | ![](images/sphere_xz.jpg) 47 | ![](images/sphere_convolved_xz.jpg) 48 | ![](images/sphere_convolved_noise_xz.jpg) 49 | 50 | 51 | ## Deconvolution 52 | 53 | * Deconvolution restores high frequencies 54 | * Microscope OTF is band limited so some frequencies lost completely (ill-posed) 55 | * Noise has high frequencies thus deconvolution can amplify noise. 56 | 57 | ## Deconvolution approaches 58 | 59 | 1. Solve in frequency domain (Inverse Filter) 60 | 2. Solve in frequency domain and use regularization to minimize noise (Wiener Filter) 61 | 3. Iterative approaches (Richardson Lucy) 62 | 4. Iterative approaches with regularization (Richardson Lucy with Total Variation Regularization) 63 | 64 | ## Richardson Lucy Iterations 65 | 66 | The images below show left to right the blurred sphere and the result of 10, 20 and 30 accelerated Richardson Lucy iterations. 67 | 68 | ![](images/rl_iterations/xy.jpg) 69 | ![](images/rl_iterations/rl10xy.jpg) 70 | ![](images/rl_iterations/rl20xy.jpg) 71 | ![](images/rl_iterations/rl30xy.jpg) 72 | 73 | ![](images/rl_iterations/xz.jpg) 74 | ![](images/rl_iterations/rl10xz.jpg) 75 | ![](images/rl_iterations/rl20xz.jpg) 76 | ![](images/rl_iterations/rl30xz.jpg) 77 | 78 | 79 | ## Richardson Lucy with Total Variation regularization (Noisy Image) 80 | 81 | Richardson Lucy Deconvolution can amplify noise, so regularization is used to limit noise. Once approch is the Richardson Lucy with Total Variation algorithm (Dey etc. al. 2006). The image on the left was deconvolved with 50 iterations of accelerated Richardson Lucy Deconvolution, the Image on the right with 50 iterations of accelerated Richardson Lucy with Total Variation regularization (regularization factor = 0.002). 82 | 83 | ![](images/rl_rltv/sphere_deconvolved_rla50_xy.jpg) 84 | ![](images/rl_rltv/sphere_deconvolved_rltv0.002_a50_xy.jpg) 85 | 86 | 87 | ![](images/rl_rltv/sphere_deconvolved_rla50_xz.jpg) 88 | ![](images/rl_rltv/sphere_deconvolved_rltv0.002_a50_xz.jpg) 89 | 90 | 91 | ## Edge Handling 92 | 93 | Non-circulant deconvolution (BIG Lab technical note 2014, M. Bertero, P. Boccaci 2005) 94 | 95 | ## How do we make deconvolution fast? 96 | 97 | * Algorithm acceleration 98 | * Take a bigger step at each iteration 99 | * Vector Acceleration (Biggs) 100 | * Hardware acceleration 101 | * Fast math libraries 102 | * Multi-threading 103 | * GPU 104 | 105 | -------------------------------------------------------------------------------- /docs/30_Deconvolution/9_Dask_Deconvolution.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "## Dask deconvolution\n", 8 | "\n", 9 | "With modern (many Gigabyte) image sizes it is often not possible to load the entire image into GPU memory. And even if the entire image does fit into GPU memory keep in mind multiple buffers are needed to perform FFT Convolution and Deconvolution (remember how many buffers were needed for example 2).\n", 10 | "\n", 11 | "In the case where we don't have enough GPU memory to perform the entire calculation, we can use Dask to load the image in chunks and perform the deconvolution on each chunk. This is a bit more complicated than the previous examples, but it is still possible to do it in just a few lines of code." 12 | ] 13 | }, 14 | { 15 | "cell_type": "code", 16 | "execution_count": 1, 17 | "metadata": {}, 18 | "outputs": [], 19 | "source": [ 20 | "import dask.array as da\n", 21 | "from clij2fft.richardson_lucy import richardson_lucy_nc \n", 22 | "import numpy as np\n", 23 | "import stackview\n" 24 | ] 25 | }, 26 | { 27 | "cell_type": "code", 28 | "execution_count": 2, 29 | "metadata": {}, 30 | "outputs": [ 31 | { 32 | "name": "stdout", 33 | "output_type": "stream", 34 | "text": [ 35 | "(128, 256, 256) (128, 256, 256) (128, 256, 256)\n" 36 | ] 37 | } 38 | ], 39 | "source": [ 40 | "from skimage.io import imread\n", 41 | "\n", 42 | "\n", 43 | "from skimage.io import imread\n", 44 | "from decon_helper import image_path\n", 45 | "\n", 46 | "image_name='Bars-G10-P30-stack.tif'\n", 47 | "psf_name='PSF-Bars-stack.tif'\n", 48 | "truth_name='Bars-stack.tif'\n", 49 | "\n", 50 | "im=imread(image_path / image_name)\n", 51 | "psf=imread(image_path / psf_name)\n", 52 | "truth=imread(image_path / truth_name)\n", 53 | "im=im.astype('float32')\n", 54 | "psf=psf.astype('float32')\n", 55 | "psf=psf/psf.sum()\n", 56 | "print(im.shape, psf.shape, truth.shape)\n", 57 | "\n", 58 | "# define the PSF XY half size and the XY overlap, we want the PSF half size to be smaller than the overlap\n", 59 | "psfHalfSize = 16\n", 60 | "\n", 61 | "# crop PSF using PSFHalfSize\n", 62 | "psf=psf[:,int(psf.shape[1]/2)-psfHalfSize:int(psf.shape[1]/2)+psfHalfSize-1,int(psf.shape[2]/2)-psfHalfSize:int(psf.shape[2]/2)+psfHalfSize-1]\n" 63 | ] 64 | }, 65 | { 66 | "cell_type": "markdown", 67 | "metadata": {}, 68 | "source": [ 69 | "## Define number of chunks\n", 70 | "\n", 71 | "Define the number of chunks to divide the image into. \n", 72 | "\n", 73 | "(In this example the image is relatively small so likely the image and arrays needed for FFT based calculations would fit into the GPU without chunking, in a real life example we would pre-compute the largest chunk size we could process given memory constraints and base the chunk size on that)." 74 | ] 75 | }, 76 | { 77 | "cell_type": "code", 78 | "execution_count": 3, 79 | "metadata": {}, 80 | "outputs": [ 81 | { 82 | "name": "stdout", 83 | "output_type": "stream", 84 | "text": [ 85 | "chunks 128 128 128\n" 86 | ] 87 | } 88 | ], 89 | "source": [ 90 | "num_x_chunks = 2\n", 91 | "num_y_chunks = 2\n", 92 | "num_z_chunks = 1\n", 93 | "\n", 94 | "z_chunk_size = im.shape[0]\n", 95 | "y_chunk_size = int(im.shape[1]/num_x_chunks)\n", 96 | "x_chunk_size = int(im.shape[2]/num_y_chunks)\n", 97 | "print('chunks', z_chunk_size, y_chunk_size, x_chunk_size)\n", 98 | "# create dask image\n" 99 | ] 100 | }, 101 | { 102 | "cell_type": "markdown", 103 | "metadata": {}, 104 | "source": [ 105 | "## Define the deconvolver" 106 | ] 107 | }, 108 | { 109 | "cell_type": "code", 110 | "execution_count": 4, 111 | "metadata": {}, 112 | "outputs": [], 113 | "source": [ 114 | "try:\n", 115 | " from clij2fft.richardson_lucy import richardson_lucy_nc \n", 116 | " def deconv_chunk(img):\n", 117 | " print(img.shape,psf.shape)\n", 118 | " result = richardson_lucy_nc(img, psf, iterations, reg)\n", 119 | " print('finished decon chunk')\n", 120 | " return result\n", 121 | " #return stack\n", 122 | "except ImportError:\n", 123 | " print('clij2fft non-circulant rl not imported')\n", 124 | " try:\n", 125 | " import RedLionfishDeconv as rl\n", 126 | " print('redlionfish rl imported')\n", 127 | " def deconv_chunk(img, psf, iterations):\n", 128 | " print(img.shape,psf.shape)\n", 129 | " result = rl.doRLDeconvolutionFromNpArrays(img, psf, niter=iterations, method='gpu', resAsUint8=False )\n", 130 | " print('finished decon chunk')\n", 131 | " return result\n", 132 | " except ImportError:\n", 133 | " print('redlionfish rl not imported')\n", 134 | "\n" 135 | ] 136 | }, 137 | { 138 | "cell_type": "markdown", 139 | "metadata": {}, 140 | "source": [ 141 | "## Deconvolve in chunks with overlap between chunks\n", 142 | "\n", 143 | "Here we call the dask deconvolution using an overlap factor to prevent edge artifacts between chunks." 144 | ] 145 | }, 146 | { 147 | "cell_type": "code", 148 | "execution_count": 5, 149 | "metadata": {}, 150 | "outputs": [ 151 | { 152 | "name": "stdout", 153 | "output_type": "stream", 154 | "text": [ 155 | "(0, 0, 0) (128, 31, 31)\n", 156 | "(128, 176, 176) (128, 31, 31)\n", 157 | "get lib\n" 158 | ] 159 | }, 160 | { 161 | "name": "stderr", 162 | "output_type": "stream", 163 | "text": [ 164 | "1 warning generated.\n", 165 | "2 warnings generated.\n", 166 | "2 warnings generated.\n", 167 | "2 warnings generated.\n", 168 | "2 warnings generated.\n", 169 | "2 warnings generated.\n", 170 | "1 warning generated.\n" 171 | ] 172 | }, 173 | { 174 | "name": "stdout", 175 | "output_type": "stream", 176 | "text": [ 177 | "\n", 178 | "Richardson Lucy Started\n", 179 | "0 10 20 30 40 50 60 70 80 90 \n", 180 | "Richardson Lucy Finishedfinished decon chunk\n", 181 | "(128, 176, 176) (128, 31, 31)\n", 182 | "get lib\n", 183 | "\n", 184 | "Richardson Lucy Started\n", 185 | "0 10 20 30 40 50 60 70 80 90 \n", 186 | "Richardson Lucy Finishedfinished decon chunk\n", 187 | "(128, 176, 176) (128, 31, 31)\n", 188 | "get lib\n", 189 | "\n", 190 | "Richardson Lucy Started\n", 191 | "0 10 20 30 40 50 60 70 80 90 \n", 192 | "Richardson Lucy Finishedfinished decon chunk\n", 193 | "(128, 176, 176) (128, 31, 31)\n", 194 | "get lib\n", 195 | "\n", 196 | "Richardson Lucy Started\n", 197 | "0 10 20 30 40 50 60 70 80 90 \n", 198 | "Richardson Lucy Finishedfinished decon chunk\n" 199 | ] 200 | }, 201 | { 202 | "data": { 203 | "application/vnd.jupyter.widget-view+json": { 204 | "model_id": "f3a6a968548c4b4b81d3c35d02c2edcc", 205 | "version_major": 2, 206 | "version_minor": 0 207 | }, 208 | "text/plain": [ 209 | "HBox(children=(VBox(children=(HBox(children=(VBox(children=(ImageWidget(height=256, width=256),)),)), IntSlide…" 210 | ] 211 | }, 212 | "execution_count": 5, 213 | "metadata": {}, 214 | "output_type": "execute_result" 215 | } 216 | ], 217 | "source": [ 218 | "iterations = 100\n", 219 | "reg = 0.0001\n", 220 | "\n", 221 | "overlap = 24\n", 222 | "dimg = da.from_array(im,chunks=(z_chunk_size, y_chunk_size, x_chunk_size))\n", 223 | "\n", 224 | "out = dimg.map_overlap(deconv_chunk, depth={0:0, 1:overlap, 2:overlap}, boundary='reflect', dtype=np.float32)\n", 225 | "\n", 226 | "decon_overlap_24 = out.compute(num_workers=1)\n", 227 | "stackview.orthogonal(decon_overlap_24)\n", 228 | " " 229 | ] 230 | }, 231 | { 232 | "cell_type": "markdown", 233 | "metadata": {}, 234 | "source": [ 235 | "## Deconvolve in chunks with no overlap\n", 236 | "\n", 237 | "Here we deconvolve in chunks without overlap (this will be a bit faster and use a little less memory) but as you can see we end up with artifacts on the seems of the chunks." 238 | ] 239 | }, 240 | { 241 | "cell_type": "code", 242 | "execution_count": 6, 243 | "metadata": {}, 244 | "outputs": [ 245 | { 246 | "name": "stdout", 247 | "output_type": "stream", 248 | "text": [ 249 | "(0, 0, 0) (128, 31, 31)\n", 250 | "(128, 128, 128) (128, 31, 31)\n", 251 | "get lib\n" 252 | ] 253 | }, 254 | { 255 | "name": "stderr", 256 | "output_type": "stream", 257 | "text": [ 258 | "2 warnings generated.\n" 259 | ] 260 | }, 261 | { 262 | "name": "stdout", 263 | "output_type": "stream", 264 | "text": [ 265 | "\n", 266 | "Richardson Lucy Started\n", 267 | "0 10 20 30 40 50 60 70 80 90 \n", 268 | "Richardson Lucy Finishedfinished decon chunk\n", 269 | "(128, 128, 128) (128, 31, 31)\n", 270 | "get lib\n", 271 | "\n", 272 | "Richardson Lucy Started\n", 273 | "0 10 20 30 40 50 60 70 80 90 \n", 274 | "Richardson Lucy Finishedfinished decon chunk\n", 275 | "(128, 128, 128) (128, 31, 31)\n", 276 | "get lib\n", 277 | "\n", 278 | "Richardson Lucy Started\n", 279 | "0 10 20 30 40 50 60 70 80 90 \n", 280 | "Richardson Lucy Finishedfinished decon chunk\n", 281 | "(128, 128, 128) (128, 31, 31)\n", 282 | "get lib\n", 283 | "\n", 284 | "Richardson Lucy Started\n", 285 | "0 10 20 30 40 50 60 70 80 90 \n", 286 | "Richardson Lucy Finishedfinished decon chunk\n" 287 | ] 288 | }, 289 | { 290 | "data": { 291 | "application/vnd.jupyter.widget-view+json": { 292 | "model_id": "a0f3c0e7a0034659a684fe8bb9ecb7f0", 293 | "version_major": 2, 294 | "version_minor": 0 295 | }, 296 | "text/plain": [ 297 | "HBox(children=(VBox(children=(HBox(children=(VBox(children=(ImageWidget(height=256, width=256),)),)), IntSlide…" 298 | ] 299 | }, 300 | "execution_count": 6, 301 | "metadata": {}, 302 | "output_type": "execute_result" 303 | } 304 | ], 305 | "source": [ 306 | "overlap = 0 \n", 307 | "dimg = da.from_array(im,chunks=(z_chunk_size, y_chunk_size, x_chunk_size))\n", 308 | "\n", 309 | "out = dimg.map_overlap(deconv_chunk, depth={0:0, 1:overlap, 2:overlap}, boundary='reflect', dtype=np.float32)\n", 310 | "\n", 311 | "decon_overlap_24 = out.compute(num_workers=1)\n", 312 | "stackview.orthogonal(decon_overlap_24)\n", 313 | " " 314 | ] 315 | }, 316 | { 317 | "cell_type": "code", 318 | "execution_count": null, 319 | "metadata": {}, 320 | "outputs": [], 321 | "source": [] 322 | } 323 | ], 324 | "metadata": { 325 | "kernelspec": { 326 | "display_name": "dresden-decon-test1", 327 | "language": "python", 328 | "name": "python3" 329 | }, 330 | "language_info": { 331 | "codemirror_mode": { 332 | "name": "ipython", 333 | "version": 3 334 | }, 335 | "file_extension": ".py", 336 | "mimetype": "text/x-python", 337 | "name": "python", 338 | "nbconvert_exporter": "python", 339 | "pygments_lexer": "ipython3", 340 | "version": "3.9.16" 341 | }, 342 | "orig_nbformat": 4 343 | }, 344 | "nbformat": 4, 345 | "nbformat_minor": 2 346 | } 347 | -------------------------------------------------------------------------------- /docs/30_Deconvolution/cluster_access.md: -------------------------------------------------------------------------------- 1 | # Running deconvolution on the ZIH cluster 2 | 3 | Log in to taurus as described [here](https://biapol.github.io/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/50_Clesperanto_on_HPC/login_taurus.html). 4 | 5 | Once you are in jupyter lab, open a terminal by clicking on `File` (1 in the image below) -> `New` (2) -> `Terminal` (3) 6 | 7 | 8 | 9 | to download the jupyter notebooks, execute the following command: 10 | 11 | ```bash 12 | git clone https://github.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis 13 | ``` 14 | 15 | To install a devbio-napari python environment, execute the following code in the terminal: 16 | 17 | ```bash 18 | git clone https://gitlab.mn.tu-dresden.de/bia-pol/singularity-devbio-napari.git 19 | cd singularity-devbio-napari 20 | ./install.sh deconvolution_course 21 | ``` 22 | 23 | Wait 2-15 min until the image is downloaded and verified (the time depends on how much network and disk load is on the cluster). The output should look something like this: 24 | 25 | 26 | 27 | No you can open a Jupyter Notebook with the newly installed environment 28 | 29 | Reload the browser tab. 30 | 31 | Use the file browser on the left to navigate to the deconvolution notebooks: `PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/docs/30_Deconvolution` 32 | 33 | Open a new notebook by double clicking it 34 | 35 | Now you are asked to select a kernel. Click on the drop down button (red rectangle in the image below). 36 | 37 | 38 | 39 | Choose the kernel you just installed: `devbio-napari_-_deconvolution_course` 40 | 41 | NB: for an existing notebook, you can click on the kernel name (by default `Python 3`) in the top right corner of the notebook and aelect the devbio-napari kernel as described above. 42 | 43 | 44 | -------------------------------------------------------------------------------- /docs/30_Deconvolution/decon_helper.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | # local path to the data folder 4 | image_path = Path('../../data/deconvolution/') 5 | 6 | # Path to the data folder on the cluster 7 | # image_path = Path('/beegfs/ws/0/tkorten-cache/deconvolution/') 8 | 9 | # Assuming you've installed the required libraries: lib1 and lib2 10 | try: 11 | import tnia.plotting.projections as tnia_proj 12 | is_tnia_available = True 13 | print('tnia available') 14 | except ImportError: 15 | print('error importing tnia') 16 | is_tnia_available = False 17 | 18 | try: 19 | import stackview 20 | is_stackview_available = True 21 | print('stackview available') 22 | except ImportError: 23 | is_stackview_available = False 24 | print('error importing stackview') 25 | 26 | use_stackview = True 27 | use_tnia = False 28 | 29 | def test_helper(): 30 | print("Hello from decon_helper.py") 31 | print("tnia available: ", is_tnia_available) 32 | print("stackview available: ", is_stackview_available) 33 | return True 34 | 35 | def show_xyz_slice(img, imp='stackview'): 36 | z = img.shape[0] // 2 37 | y = img.shape[1] // 2 38 | x = img.shape[2] // 2 39 | 40 | if is_tnia_available and imp == 'tnia': 41 | tnia_proj.show_xyz_slice(img, x, y, z) 42 | elif is_stackview_available and imp == 'stackview': 43 | return stackview.orthogonal(img, continuous_update=True) 44 | else: 45 | return stackview.orthogonal(img, continuous_update=True) 46 | 47 | # code for jaccard index from https://github.com/haesleinhuepf/the-segmentation-game/blob/main/src/the_segmentation_game/metrics.py 48 | def jaccard_index_binary(reference_label_image, test_label_image) -> float: 49 | """ 50 | Determine how correct a given test segmentation is. 51 | As metric we use the Jaccard index. 52 | Assumtion: test and reference are binary images or 53 | label images with values 0=False, otherwise: True. 54 | """ 55 | tp, tn, fp, fn = compute_tp_tn_fp_fn(reference_label_image, test_label_image) 56 | 57 | # return Jaccard Index 58 | return tp / (tp + fn + fp) 59 | 60 | 61 | def compute_tp_tn_fp_fn(reference_label_image, test_label_image): 62 | """Compute overlap statistics: 63 | * tp = true positives 64 | * tn = true negatives 65 | * fp = false positives 66 | * fn = false negatives 67 | 68 | Parameters 69 | ---------- 70 | reference_label_image: Image, e.g. a manual ground truth annotation 71 | test_label_image: Image, e.g. an algorithm result to determine the quality of 72 | 73 | Returns 74 | ------- 75 | tp, tn, fp, fn 76 | """ 77 | ### adapted from https://github.com/haesleinhuepf/napari-workflow-optimizer/blob/20c3baaf3009caf26909b57f08181108a731e67e/src/napari_workflow_optimizer/_optimizer.py#L248 78 | try: 79 | import pyclesperanto_prototype as cle 80 | binary_and = cle.binary_and 81 | 82 | reference_label_image = cle.push(reference_label_image) 83 | test_label_image = cle.push(test_label_image) 84 | except ImportError: 85 | binary_and = np.logical_and 86 | 87 | reference_label_image = np.asarray(reference_label_image) 88 | test_label_image = np.asarray(test_label_image) 89 | 90 | negative_reference = reference_label_image == 0 91 | positive_reference = reference_label_image != 0 92 | negative_test = test_label_image == 0 93 | positive_test = test_label_image != 0 94 | 95 | # true positive: 96 | tp = binary_and(positive_reference, positive_test).sum() 97 | 98 | # true negative: 99 | tn = binary_and(negative_reference, negative_test).sum() 100 | 101 | # false positive 102 | fp = binary_and(negative_reference, positive_test).sum() 103 | 104 | # false negative 105 | fn = binary_and(positive_reference, negative_test).sum() 106 | 107 | return tp, tn, fp, fn -------------------------------------------------------------------------------- /docs/30_Deconvolution/images/5_open_terminal.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/2ed77a9bb176161cc20045c791a8a00e2e6b917a/docs/30_Deconvolution/images/5_open_terminal.png -------------------------------------------------------------------------------- /docs/30_Deconvolution/images/6_terminal_output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/2ed77a9bb176161cc20045c791a8a00e2e6b917a/docs/30_Deconvolution/images/6_terminal_output.png -------------------------------------------------------------------------------- /docs/30_Deconvolution/images/8_select_kernel.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/2ed77a9bb176161cc20045c791a8a00e2e6b917a/docs/30_Deconvolution/images/8_select_kernel.png -------------------------------------------------------------------------------- /docs/30_Deconvolution/images/PSF_xy.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/2ed77a9bb176161cc20045c791a8a00e2e6b917a/docs/30_Deconvolution/images/PSF_xy.jpg -------------------------------------------------------------------------------- /docs/30_Deconvolution/images/PSF_xz.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/2ed77a9bb176161cc20045c791a8a00e2e6b917a/docs/30_Deconvolution/images/PSF_xz.jpg -------------------------------------------------------------------------------- /docs/30_Deconvolution/images/rl_iterations/rl10xy.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/2ed77a9bb176161cc20045c791a8a00e2e6b917a/docs/30_Deconvolution/images/rl_iterations/rl10xy.jpg -------------------------------------------------------------------------------- /docs/30_Deconvolution/images/rl_iterations/rl10xz.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/2ed77a9bb176161cc20045c791a8a00e2e6b917a/docs/30_Deconvolution/images/rl_iterations/rl10xz.jpg -------------------------------------------------------------------------------- /docs/30_Deconvolution/images/rl_iterations/rl20xy.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/2ed77a9bb176161cc20045c791a8a00e2e6b917a/docs/30_Deconvolution/images/rl_iterations/rl20xy.jpg -------------------------------------------------------------------------------- /docs/30_Deconvolution/images/rl_iterations/rl20xz.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/2ed77a9bb176161cc20045c791a8a00e2e6b917a/docs/30_Deconvolution/images/rl_iterations/rl20xz.jpg -------------------------------------------------------------------------------- /docs/30_Deconvolution/images/rl_iterations/rl30xy.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/2ed77a9bb176161cc20045c791a8a00e2e6b917a/docs/30_Deconvolution/images/rl_iterations/rl30xy.jpg -------------------------------------------------------------------------------- /docs/30_Deconvolution/images/rl_iterations/rl30xz.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/2ed77a9bb176161cc20045c791a8a00e2e6b917a/docs/30_Deconvolution/images/rl_iterations/rl30xz.jpg -------------------------------------------------------------------------------- /docs/30_Deconvolution/images/rl_iterations/xy.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/2ed77a9bb176161cc20045c791a8a00e2e6b917a/docs/30_Deconvolution/images/rl_iterations/xy.jpg -------------------------------------------------------------------------------- /docs/30_Deconvolution/images/rl_iterations/xz.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/2ed77a9bb176161cc20045c791a8a00e2e6b917a/docs/30_Deconvolution/images/rl_iterations/xz.jpg -------------------------------------------------------------------------------- /docs/30_Deconvolution/images/rl_rltv/sphere_deconvolved_rla50_xy.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/2ed77a9bb176161cc20045c791a8a00e2e6b917a/docs/30_Deconvolution/images/rl_rltv/sphere_deconvolved_rla50_xy.jpg -------------------------------------------------------------------------------- /docs/30_Deconvolution/images/rl_rltv/sphere_deconvolved_rla50_xz.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/2ed77a9bb176161cc20045c791a8a00e2e6b917a/docs/30_Deconvolution/images/rl_rltv/sphere_deconvolved_rla50_xz.jpg -------------------------------------------------------------------------------- /docs/30_Deconvolution/images/rl_rltv/sphere_deconvolved_rltv0.002_a50_xy.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/2ed77a9bb176161cc20045c791a8a00e2e6b917a/docs/30_Deconvolution/images/rl_rltv/sphere_deconvolved_rltv0.002_a50_xy.jpg -------------------------------------------------------------------------------- /docs/30_Deconvolution/images/rl_rltv/sphere_deconvolved_rltv0.002_a50_xz.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/2ed77a9bb176161cc20045c791a8a00e2e6b917a/docs/30_Deconvolution/images/rl_rltv/sphere_deconvolved_rltv0.002_a50_xz.jpg -------------------------------------------------------------------------------- /docs/30_Deconvolution/images/sphere_convolved_noise_xy.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/2ed77a9bb176161cc20045c791a8a00e2e6b917a/docs/30_Deconvolution/images/sphere_convolved_noise_xy.jpg -------------------------------------------------------------------------------- /docs/30_Deconvolution/images/sphere_convolved_noise_xz.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/2ed77a9bb176161cc20045c791a8a00e2e6b917a/docs/30_Deconvolution/images/sphere_convolved_noise_xz.jpg -------------------------------------------------------------------------------- /docs/30_Deconvolution/images/sphere_convolved_xy.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/2ed77a9bb176161cc20045c791a8a00e2e6b917a/docs/30_Deconvolution/images/sphere_convolved_xy.jpg -------------------------------------------------------------------------------- /docs/30_Deconvolution/images/sphere_convolved_xz.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/2ed77a9bb176161cc20045c791a8a00e2e6b917a/docs/30_Deconvolution/images/sphere_convolved_xz.jpg -------------------------------------------------------------------------------- /docs/30_Deconvolution/images/sphere_xy.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/2ed77a9bb176161cc20045c791a8a00e2e6b917a/docs/30_Deconvolution/images/sphere_xy.jpg -------------------------------------------------------------------------------- /docs/30_Deconvolution/images/sphere_xz.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/2ed77a9bb176161cc20045c791a8a00e2e6b917a/docs/30_Deconvolution/images/sphere_xz.jpg -------------------------------------------------------------------------------- /docs/30_Deconvolution/labels_decon1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/2ed77a9bb176161cc20045c791a8a00e2e6b917a/docs/30_Deconvolution/labels_decon1.png -------------------------------------------------------------------------------- /docs/30_Deconvolution/labels_decon2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/2ed77a9bb176161cc20045c791a8a00e2e6b917a/docs/30_Deconvolution/labels_decon2.png -------------------------------------------------------------------------------- /docs/30_Deconvolution/labels_im.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/2ed77a9bb176161cc20045c791a8a00e2e6b917a/docs/30_Deconvolution/labels_im.png -------------------------------------------------------------------------------- /docs/30_Deconvolution/test.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/2ed77a9bb176161cc20045c791a8a00e2e6b917a/docs/30_Deconvolution/test.png -------------------------------------------------------------------------------- /docs/30_Deconvolution/test_libs.py: -------------------------------------------------------------------------------- 1 | from clij2fft.libs import getlib 2 | from clij2fft.richardson_lucy import richardson_lucy 3 | import numpy as np 4 | 5 | img= np.ones((256, 256, 128), dtype=np.float32) 6 | psf = np.ones((128, 128, 64), dtype=np.float32) 7 | 8 | result = richardson_lucy(img, psf, 100, 0) 9 | 10 | print() 11 | print(result.shape, result.mean()) -------------------------------------------------------------------------------- /docs/40_HPC_Intro/Intro_to_HPC_POLBIAS_Handout.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/2ed77a9bb176161cc20045c791a8a00e2e6b917a/docs/40_HPC_Intro/Intro_to_HPC_POLBIAS_Handout.pdf -------------------------------------------------------------------------------- /docs/40_HPC_Intro/data/myscript.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | torch_version = torch.__version__ 4 | print(f"PyTorch version: {torch_version}") 5 | 6 | num_of_gpus = torch.cuda.device_count(); 7 | print(f"Total number of GPU devices available: {num_of_gpus}") 8 | print("Following device(s) are available:") 9 | for i in range(torch.cuda.device_count()): 10 | print(torch.cuda.get_device_properties(i).name) 11 | -------------------------------------------------------------------------------- /docs/40_HPC_Intro/data/myscript.sbatch: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #SBATCH --job-name=example 4 | #SBATCH --nodes=1 # Number of nodes 5 | #SBATCH --ntasks=1 # Run on a single CPU 6 | #SBATCH --gres=gpu:1 # 1 GPU per node 7 | #SBATCH --time=0-00:05:00 # d-hh:mm:ss 8 | #SBATCH --partition=alpha 9 | #SBATCH --mem=2GB # Memory per node 10 | #SBATCH --output=%j.out # Standard output and error log 11 | 12 | module load modenv/hiera GCC/10.2.0 CUDA/11.1.1 OpenMPI/4.0.5 PyTorch/1.10.0 tqdm/4.56.2 13 | 14 | myworkspace="$(ws_find myworkspace)" 15 | 16 | python "$myworkspace"/src/myscript.py 17 | -------------------------------------------------------------------------------- /docs/40_HPC_Intro/data/requirements.txt: -------------------------------------------------------------------------------- 1 | pandas==2.0.3 2 | tzdata==2023.3 3 | numpy==1.25.2 4 | -------------------------------------------------------------------------------- /docs/40_HPC_Intro/readme.md: -------------------------------------------------------------------------------- 1 | # High-performance-computing 2 | 3 | In this session, we will get an introduction to high-performance-computing (HPC) on [TU Dresdens Taurus cluster](https://tu-dresden.de/zih/hochleistungsrechnen/hpc). 4 | 5 | [Download slides](https://github.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/blob/main/docs/40_HPC_Intro/Intro_to_HPC_POLBIAS_Handout.pdf). 6 | -------------------------------------------------------------------------------- /docs/50_Clesperanto_on_HPC/exercises.md: -------------------------------------------------------------------------------- 1 | # Exercises: GPU-accelerated image processing on HPC 2 | 3 | ## clesperanto & cupy benchmarking 4 | 5 | On the Taurus cluster you have access to powerful NVidia A100 GPUs. Upload an run the benchmarking notebook from the [cupy session](https://github.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/tree/main/docs/25_cupy) and compare the performance of the cluster's GPU with your local GPU. 6 | 7 | ## Bio-image Analysis workflows on HPC 8 | 9 | Upload a tif file from this [zenodo repository](https://zenodo.org/record/5837363) to the project space at the cluster. Program a Jupyter notebook using clesperano that segments the nuclei in this 3D dataset (hint 1: generate a notebook on your local computer and upload it to the cluster, hint 2: use [top-hat background-removal](https://haesleinhuepf.github.io/BioImageAnalysisNotebooks/18_image_filtering/03_background_removal.html) and [Voronoi-Otsu-Labeling](https://haesleinhuepf.github.io/BioImageAnalysisNotebooks/20_image_segmentation/11_voronoi_otsu_labeling.html)). Store the segmentation result on the project space. 10 | -------------------------------------------------------------------------------- /docs/50_Clesperanto_on_HPC/images/5_open_terminal.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/2ed77a9bb176161cc20045c791a8a00e2e6b917a/docs/50_Clesperanto_on_HPC/images/5_open_terminal.png -------------------------------------------------------------------------------- /docs/50_Clesperanto_on_HPC/images/6_terminal_output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/2ed77a9bb176161cc20045c791a8a00e2e6b917a/docs/50_Clesperanto_on_HPC/images/6_terminal_output.png -------------------------------------------------------------------------------- /docs/50_Clesperanto_on_HPC/images/taurus_login.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/2ed77a9bb176161cc20045c791a8a00e2e6b917a/docs/50_Clesperanto_on_HPC/images/taurus_login.png -------------------------------------------------------------------------------- /docs/50_Clesperanto_on_HPC/images/taurus_login_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/2ed77a9bb176161cc20045c791a8a00e2e6b917a/docs/50_Clesperanto_on_HPC/images/taurus_login_1.png -------------------------------------------------------------------------------- /docs/50_Clesperanto_on_HPC/images/taurus_login_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/2ed77a9bb176161cc20045c791a8a00e2e6b917a/docs/50_Clesperanto_on_HPC/images/taurus_login_2.png -------------------------------------------------------------------------------- /docs/50_Clesperanto_on_HPC/images/taurus_login_3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/2ed77a9bb176161cc20045c791a8a00e2e6b917a/docs/50_Clesperanto_on_HPC/images/taurus_login_3.png -------------------------------------------------------------------------------- /docs/50_Clesperanto_on_HPC/images/taurus_login_4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/2ed77a9bb176161cc20045c791a8a00e2e6b917a/docs/50_Clesperanto_on_HPC/images/taurus_login_4.png -------------------------------------------------------------------------------- /docs/50_Clesperanto_on_HPC/images/taurus_login_6.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/2ed77a9bb176161cc20045c791a8a00e2e6b917a/docs/50_Clesperanto_on_HPC/images/taurus_login_6.png -------------------------------------------------------------------------------- /docs/50_Clesperanto_on_HPC/images/taurus_login_7.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/2ed77a9bb176161cc20045c791a8a00e2e6b917a/docs/50_Clesperanto_on_HPC/images/taurus_login_7.png -------------------------------------------------------------------------------- /docs/50_Clesperanto_on_HPC/login_taurus.md: -------------------------------------------------------------------------------- 1 | # Executing clesperanto on the TU Dresden HPC 2 | 3 | To execute Python Jupyter notebooks on TU Dresden's HPC cluster, navigate to this URL: 4 | https://taurus.hrsk.tu-dresden.de/jupyter/hub/home 5 | 6 | Note: If you're outside the university network, you need to connect via [VPN](https://tu-dresden.de/zih/dienste/service-katalog/arbeitsumgebung/zugang_datennetz/vpn). 7 | 8 | After loggin in, a Jupyter Hub login screen will open. Click on `Start My Server`. 9 | 10 | ![img.png](images/taurus_login.png) 11 | 12 | Select the entries as shown below in the `Advanced` tab and click on `Spawn`. 13 | 14 | ![img_1.png](images/taurus_login_1.png) 15 | 16 | This will take a moment. 17 | 18 | ![img_2.png](images/taurus_login_2.png) 19 | 20 | You will be redirected to a Jupyter Lab environment. 21 | 22 | Now you need to activate the install a singularity container on the HPC cluster as explained [in detail here](https://gitlab.mn.tu-dresden.de/bia-pol/singularity-devbio-napari#quick-start). In short: 23 | 24 | Open a terminal by clicking on `File` (1 in the image below) -> `New` (2) -> `Terminal` (3) 25 | 26 | ![](images/5_open_terminal.png) 27 | 28 | Install a custom jupyter kernel for your user 29 | 30 | To install a devbio-napari python environment, execute the following code in the terminal: 31 | 32 | ```bash 33 | git clone https://gitlab.mn.tu-dresden.de/bia-pol/singularity-devbio-napari.git 34 | cd singularity-devbio-napari 35 | ./install.sh v0.2.9 36 | ``` 37 | 38 | Wait 2-15 min until the image is downloaded and verified (the time depends on how much network and disk load is on the cluster). The output should look something like this: 39 | 40 | ![](images/6_terminal_output.png) 41 | 42 | Now reload the browser tab. 43 | 44 | Use the upload button to upload your assistant-generated notebook. You can also use this [example notebook](napari_assistant_generated_notebook.ipynb). 45 | 46 | ![img_4.png](images/taurus_login_4.png) 47 | 48 | Double-click the uploaded notebook to open it. Select a kernel, e.g. the devbio-napari kernel. 49 | 50 | 51 | ![img_6.png](images/taurus_login_6.png) 52 | 53 | In this notebook, you need to change how files are opened. 54 | 55 | ![img_5.png](images/taurus_login_7.png) 56 | 57 | You find a potential modification on the following page. 58 | 59 | 60 | -------------------------------------------------------------------------------- /docs/50_Clesperanto_on_HPC/napari_assistant_generated_notebook.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": null, 6 | "id": "02288901", 7 | "metadata": { 8 | "lines_to_next_cell": 2 9 | }, 10 | "outputs": [], 11 | "source": [ 12 | "from skimage.io import imread\n", 13 | "import pyclesperanto_prototype as cle # version 0.22.0" 14 | ] 15 | }, 16 | { 17 | "cell_type": "markdown", 18 | "id": "948f789a", 19 | "metadata": {}, 20 | "source": [ 21 | "## Loading 'blobs'" 22 | ] 23 | }, 24 | { 25 | "cell_type": "code", 26 | "execution_count": null, 27 | "id": "d88bf584", 28 | "metadata": { 29 | "lines_to_next_cell": 2 30 | }, 31 | "outputs": [], 32 | "source": [ 33 | "image0_b = imread(\n", 34 | " \"C:/Users/haase/mambaforge/envs/bio39/lib/site-packages/napari_pyclesperanto_assistant/data/blobs.tif\")\n", 35 | "image0_b" 36 | ] 37 | }, 38 | { 39 | "cell_type": "markdown", 40 | "id": "853ced0a", 41 | "metadata": {}, 42 | "source": [ 43 | "## gaussian blur" 44 | ] 45 | }, 46 | { 47 | "cell_type": "code", 48 | "execution_count": null, 49 | "id": "ce6be712", 50 | "metadata": {}, 51 | "outputs": [], 52 | "source": [ 53 | "image1_gb = cle.gaussian_blur(image0_b, None, 1.0, 1.0, 0.0)\n", 54 | "image1_gb" 55 | ] 56 | }, 57 | { 58 | "cell_type": "markdown", 59 | "id": "7b675a36", 60 | "metadata": {}, 61 | "source": [ 62 | "## threshold otsu" 63 | ] 64 | }, 65 | { 66 | "cell_type": "code", 67 | "execution_count": null, 68 | "id": "8f4bc144", 69 | "metadata": {}, 70 | "outputs": [], 71 | "source": [ 72 | "image2_to = cle.threshold_otsu(image1_gb)\n", 73 | "image2_to" 74 | ] 75 | }, 76 | { 77 | "cell_type": "markdown", 78 | "id": "f5f69e98", 79 | "metadata": {}, 80 | "source": [ 81 | "## connected components labeling box" 82 | ] 83 | }, 84 | { 85 | "cell_type": "code", 86 | "execution_count": null, 87 | "id": "6af0436b", 88 | "metadata": {}, 89 | "outputs": [], 90 | "source": [ 91 | "image3_cclb = cle.connected_components_labeling_box(image2_to)\n", 92 | "image3_cclb" 93 | ] 94 | } 95 | ], 96 | "metadata": { 97 | "jupytext": { 98 | "cell_metadata_filter": "-all", 99 | "main_language": "python", 100 | "notebook_metadata_filter": "-all" 101 | } 102 | }, 103 | "nbformat": 4, 104 | "nbformat_minor": 5 105 | } 106 | -------------------------------------------------------------------------------- /docs/50_Clesperanto_on_HPC/readme.md: -------------------------------------------------------------------------------- 1 | # Using Clesperanto on Taurus 2 | 3 | In this session we will make use of [clesperanto](https://github.com/clEsperanto/pyclesperanto_prototype) on TU Dresden's HPC infrastructure. 4 | -------------------------------------------------------------------------------- /docs/60_Pytorch/00_versions.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "293e0061-8d1e-42fc-bc50-bc367e69bc07", 6 | "metadata": {}, 7 | "source": [ 8 | "# Versions" 9 | ] 10 | }, 11 | { 12 | "cell_type": "markdown", 13 | "id": "209e5a4b-964e-4453-814b-72ac48f67302", 14 | "metadata": {}, 15 | "source": [ 16 | "As for every machine learning project, managing software and sofware dependencies is key. We have tried to make this process as easy as possible. Execute the cell below and you should be fine." 17 | ] 18 | }, 19 | { 20 | "cell_type": "code", 21 | "execution_count": null, 22 | "id": "7c582297", 23 | "metadata": {}, 24 | "outputs": [], 25 | "source": [ 26 | "!sh enable_env_in_jupyter.sh" 27 | ] 28 | }, 29 | { 30 | "cell_type": "markdown", 31 | "id": "bf7cf1ee-b271-4a13-8bf5-b6083ce5e426", 32 | "metadata": {}, 33 | "source": [ 34 | "After you have launched the above cell and you do not get an error, you need to close the current notebook and reopen it. Then you should see the kernel `torch_intro_env` be available in the top right menue.\n", 35 | "\n", 36 | "The cells below are meant as a cross check to see if everything works as expected." 37 | ] 38 | }, 39 | { 40 | "cell_type": "code", 41 | "execution_count": 1, 42 | "id": "2347004f-a58b-4d52-b2ba-c0d093b2042b", 43 | "metadata": {}, 44 | "outputs": [ 45 | { 46 | "name": "stdout", 47 | "output_type": "stream", 48 | "text": [ 49 | "1.11.0\n" 50 | ] 51 | } 52 | ], 53 | "source": [ 54 | "import torch\n", 55 | "print(torch.__version__)" 56 | ] 57 | }, 58 | { 59 | "cell_type": "code", 60 | "execution_count": 2, 61 | "id": "685b7d74", 62 | "metadata": {}, 63 | "outputs": [ 64 | { 65 | "name": "stdout", 66 | "output_type": "stream", 67 | "text": [ 68 | "0.12.0a0+76b4a42\n" 69 | ] 70 | } 71 | ], 72 | "source": [ 73 | "import torchvision\n", 74 | "print(torchvision.__version__)" 75 | ] 76 | }, 77 | { 78 | "cell_type": "code", 79 | "execution_count": 3, 80 | "id": "9cdffa10", 81 | "metadata": {}, 82 | "outputs": [ 83 | { 84 | "name": "stdout", 85 | "output_type": "stream", 86 | "text": [ 87 | "2.0.7\n" 88 | ] 89 | } 90 | ], 91 | "source": [ 92 | "import pytorch_lightning as pl\n", 93 | "print(pl.__version__)" 94 | ] 95 | }, 96 | { 97 | "cell_type": "code", 98 | "execution_count": 4, 99 | "id": "444d4fef", 100 | "metadata": {}, 101 | "outputs": [ 102 | { 103 | "name": "stdout", 104 | "output_type": "stream", 105 | "text": [ 106 | "1.2.0\n" 107 | ] 108 | } 109 | ], 110 | "source": [ 111 | "import monai\n", 112 | "print(monai.__version__)" 113 | ] 114 | }, 115 | { 116 | "cell_type": "code", 117 | "execution_count": 5, 118 | "id": "7f41b1c5", 119 | "metadata": {}, 120 | "outputs": [ 121 | { 122 | "name": "stdout", 123 | "output_type": "stream", 124 | "text": [ 125 | "1.11.2\n" 126 | ] 127 | } 128 | ], 129 | "source": [ 130 | "import scipy\n", 131 | "print(scipy.__version__)" 132 | ] 133 | }, 134 | { 135 | "cell_type": "markdown", 136 | "id": "bbe5db54-abb9-446a-8d53-5bc3e4366d7e", 137 | "metadata": {}, 138 | "source": [ 139 | "After we made sure, that all software is available at the versions we intended, let's look around the available hardware a bit." 140 | ] 141 | }, 142 | { 143 | "cell_type": "code", 144 | "execution_count": 6, 145 | "id": "da8d5db3", 146 | "metadata": {}, 147 | "outputs": [ 148 | { 149 | "name": "stdout", 150 | "output_type": "stream", 151 | "text": [ 152 | "cuda devices available: 1\n" 153 | ] 154 | } 155 | ], 156 | "source": [ 157 | "print(\"cuda devices available:\", torch.cuda.device_count())" 158 | ] 159 | }, 160 | { 161 | "cell_type": "code", 162 | "execution_count": 7, 163 | "id": "dff36cda", 164 | "metadata": {}, 165 | "outputs": [ 166 | { 167 | "name": "stdout", 168 | "output_type": "stream", 169 | "text": [ 170 | "True\n" 171 | ] 172 | } 173 | ], 174 | "source": [ 175 | "print(torch.cuda.is_available())" 176 | ] 177 | }, 178 | { 179 | "cell_type": "code", 180 | "execution_count": 8, 181 | "id": "b0e1ed3e", 182 | "metadata": {}, 183 | "outputs": [], 184 | "source": [ 185 | "# let's create a regular n-dimensional object, in torch this is called a Tensor\n", 186 | "x = torch.ones(10, 2, 32, 42)" 187 | ] 188 | }, 189 | { 190 | "cell_type": "code", 191 | "execution_count": 9, 192 | "id": "860fe3ca", 193 | "metadata": {}, 194 | "outputs": [], 195 | "source": [ 196 | "device = torch.device(\"cuda\")" 197 | ] 198 | }, 199 | { 200 | "cell_type": "code", 201 | "execution_count": null, 202 | "id": "54365f70", 203 | "metadata": {}, 204 | "outputs": [], 205 | "source": [ 206 | "# moving a tensor onto an accelerator is as easy as the following line\n", 207 | "x.to(device)" 208 | ] 209 | }, 210 | { 211 | "cell_type": "markdown", 212 | "id": "becd8f52-cbc1-47cb-9874-b47cd38df062", 213 | "metadata": {}, 214 | "source": [ 215 | "## Fallback Environment\n", 216 | "\n", 217 | "Should there be any trouble with running the first cell in this notebook, there is another way to set up the kernel that will work with all or most of the notebooks.\n", 218 | "\n", 219 | "1. Start a [jupyter lab session on taurus](https://taurus.hrsk.tu-dresden.de/jupyter/hub/home). Make sure to select at least one GPU.\n", 220 | "\n", 221 | "2. In Jupyter lab, open a terminal and type the following commands. IMPORTANT: make sure to wait until the script reports that it is done. otherwise you may end up with a broken partial singularity image in your `~/.singularity/cache` directory.\n", 222 | "\n", 223 | " ```bash\n", 224 | "\n", 225 | " git clone https://gitlab.mn.tu-dresden.de/bia-pol/singularity-devbio-napari.git\n", 226 | "\n", 227 | " cd singularity-devbio-napari\n", 228 | "\n", 229 | " ./install.sh \n", 230 | "\n", 231 | " ```\n", 232 | "\n", 233 | " Replace `` with the version you want to install. For this session, `pol-course-pytorch` is the most suitable.\n", 234 | "\n", 235 | "3. You should now see an additional button named `devbio napari` on the jupyter lab home screen. Note: you may need to reload the page first.\n", 236 | "\n", 237 | "4. Klick on that button to start a jupyter notebook inside the singularity container. Note that the first command execution will take a while because of the additional time it takes to start the singularity container.\n", 238 | "\n" 239 | ] 240 | } 241 | ], 242 | "metadata": { 243 | "kernelspec": { 244 | "display_name": "devbio-napari_pol-course-pytorch", 245 | "language": "python", 246 | "name": "devbio-napari_pol-course-pytorch" 247 | }, 248 | "language_info": { 249 | "codemirror_mode": { 250 | "name": "ipython", 251 | "version": 3 252 | }, 253 | "file_extension": ".py", 254 | "mimetype": "text/x-python", 255 | "name": "python", 256 | "nbconvert_exporter": "python", 257 | "pygments_lexer": "ipython3", 258 | "version": "3.10.8" 259 | } 260 | }, 261 | "nbformat": 4, 262 | "nbformat_minor": 5 263 | } 264 | -------------------------------------------------------------------------------- /docs/60_Pytorch/01_data_exploration.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "405c2b72-7d56-4ef3-b4c7-9f2a2dfa0bda", 6 | "metadata": {}, 7 | "source": [ 8 | "# Data exploration" 9 | ] 10 | }, 11 | { 12 | "cell_type": "markdown", 13 | "id": "f26351dc-61ed-4a09-b318-fdf790b5e998", 14 | "metadata": {}, 15 | "source": [ 16 | "For this workshop, we will rely on a very simple dataset from the 2018 data science bowl. See [this page](https://bbbc.broadinstitute.org/BBBC038/) for more details." 17 | ] 18 | }, 19 | { 20 | "cell_type": "code", 21 | "execution_count": null, 22 | "id": "870ffc9b", 23 | "metadata": {}, 24 | "outputs": [], 25 | "source": [ 26 | "from data import get_dsb2018_train_files, get_dsb2018_validation_files, get_dsb2018_test_files, fill_label_holes, quantile_normalization\n", 27 | "from tifffile import imread\n", 28 | "\n", 29 | "import matplotlib.pyplot as plt\n", 30 | "import numpy as np" 31 | ] 32 | }, 33 | { 34 | "cell_type": "markdown", 35 | "id": "0f44606d", 36 | "metadata": {}, 37 | "source": [ 38 | "## Getting lists of input and label files" 39 | ] 40 | }, 41 | { 42 | "cell_type": "markdown", 43 | "id": "125cdc8b-c348-41e5-98ac-5e6976e69b1b", 44 | "metadata": {}, 45 | "source": [ 46 | "The data required to execute the notebooks is located at `/projects/p_scads_trainings/BIAS/dsb2018` and has to be integrated into your clone of this repository (which should reside in your home directory after clicking the above link to launch jupyter Hub). \n", 47 | "\n", 48 | "To get the data: \n", 49 | "\n", 50 | "1. create a directory named `data` in the top level of this repo (i.e. on the same level the `*.ipynb*` notebook files and this README are located). \n", 51 | "```\n", 52 | "mkdir data\n", 53 | "```\n", 54 | "\n", 55 | "\n", 56 | "2. copy the data to the freshly created directory using \n", 57 | "```\n", 58 | "cp -r /projects/p_scads_trainings/BIAS/dsb2018 $PWD/data/\n", 59 | "```\n", 60 | "\n", 61 | "\n", 62 | "As a backup solution, the data can be downloaded as a zip file from [the stardist github repository]('https://github.com/stardist/stardist/releases/download/0.1.0/dsb2018.zip')." 63 | ] 64 | }, 65 | { 66 | "cell_type": "code", 67 | "execution_count": null, 68 | "id": "18dc0b4d", 69 | "metadata": {}, 70 | "outputs": [], 71 | "source": [ 72 | "# let's loop through the dataset and check how many samples we have\n", 73 | "for name, getter_fn in zip([\"train\", \"val\", \"test\"], [get_dsb2018_train_files, get_dsb2018_validation_files, get_dsb2018_test_files]):\n", 74 | " X, y = getter_fn()\n", 75 | " print(name, len(X), len(y))" 76 | ] 77 | }, 78 | { 79 | "cell_type": "markdown", 80 | "id": "6ac8b46d-35ef-46b3-81e6-2f170e440e1a", 81 | "metadata": {}, 82 | "source": [ 83 | "We retain the last iteration of this loop, i.e. the test set. The variables `X` and `y` should contain paths to specific `.tif` files now." 84 | ] 85 | }, 86 | { 87 | "cell_type": "code", 88 | "execution_count": null, 89 | "id": "d27ca9c3", 90 | "metadata": {}, 91 | "outputs": [], 92 | "source": [ 93 | "X[:3]" 94 | ] 95 | }, 96 | { 97 | "cell_type": "code", 98 | "execution_count": null, 99 | "id": "d6b28f57", 100 | "metadata": {}, 101 | "outputs": [], 102 | "source": [ 103 | "y[:3]" 104 | ] 105 | }, 106 | { 107 | "cell_type": "markdown", 108 | "id": "28cf0326", 109 | "metadata": {}, 110 | "source": [ 111 | "## Looking at a single sample of the training data" 112 | ] 113 | }, 114 | { 115 | "cell_type": "code", 116 | "execution_count": null, 117 | "id": "6f896730", 118 | "metadata": {}, 119 | "outputs": [], 120 | "source": [ 121 | "Xtrain, ytrain = get_dsb2018_train_files()\n", 122 | "\n", 123 | "sidx = 0 #selecting the first image in the lists\n", 124 | "image_file, label_file = Xtrain[sidx], ytrain[sidx]\n", 125 | "image, label = imread(image_file), imread(label_file)\n", 126 | "label_filled = fill_label_holes(label) # some masks have holes, let's fill them" 127 | ] 128 | }, 129 | { 130 | "cell_type": "code", 131 | "execution_count": null, 132 | "id": "93c445ad", 133 | "metadata": {}, 134 | "outputs": [], 135 | "source": [ 136 | "print(type(image))\n", 137 | "print(type(label))" 138 | ] 139 | }, 140 | { 141 | "cell_type": "code", 142 | "execution_count": null, 143 | "id": "ad8f3a33", 144 | "metadata": {}, 145 | "outputs": [], 146 | "source": [ 147 | "for name, sample in zip([\"image\", \"label\"], [image, label]):\n", 148 | " print(name, sample.dtype, sample.shape, sample.min(), sample.max())" 149 | ] 150 | }, 151 | { 152 | "cell_type": "markdown", 153 | "id": "2a60fab1-5b49-472c-abdf-6b8e38cde8f9", 154 | "metadata": {}, 155 | "source": [ 156 | "The loaded images are 8-bit greyscale images. The labels however are encoded as 16-bit files." 157 | ] 158 | }, 159 | { 160 | "cell_type": "code", 161 | "execution_count": null, 162 | "id": "0889dedc", 163 | "metadata": {}, 164 | "outputs": [], 165 | "source": [ 166 | "plt.subplot(131)\n", 167 | "plt.imshow(image, cmap=\"gray\")\n", 168 | "\n", 169 | "plt.subplot(132)\n", 170 | "plt.imshow(label)\n", 171 | "\n", 172 | "plt.subplot(133)\n", 173 | "plt.imshow(label_filled)" 174 | ] 175 | }, 176 | { 177 | "cell_type": "markdown", 178 | "id": "1245ebf9", 179 | "metadata": {}, 180 | "source": [ 181 | "## Convert the instance label to a binary segmentation mask\n", 182 | "\n", 183 | "As we intend to demonstrate the usage of pytorch, we are simplifying our problem from instance segmentation to semantic segmentation." 184 | ] 185 | }, 186 | { 187 | "cell_type": "code", 188 | "execution_count": null, 189 | "id": "d9d2335e", 190 | "metadata": {}, 191 | "outputs": [], 192 | "source": [ 193 | "label_binary = np.zeros_like(label_filled)\n", 194 | "label_binary[label_filled != 0] = 1\n", 195 | "\n", 196 | "plt.imshow(label_binary, cmap=\"gray\")" 197 | ] 198 | }, 199 | { 200 | "cell_type": "markdown", 201 | "id": "629b2993", 202 | "metadata": {}, 203 | "source": [ 204 | "## Normalization of the raw image\n", 205 | "\n", 206 | "As neural networks tend to be easier to train when input values are small, we should normalize the pixel intensities from the uint8 range of [0, 255] to floating point values closer to [0, 1]. \n", 207 | "\n", 208 | "In the code below, we use a technique that sets the lower boundary of the normalization range to the 1% percentile. Equally, we set the upper boundary of the normalization to the 99.8%th percentile. This technique has proven to be very robust in practice. We adopted it from StarDist, see https://github.com/stardist/stardist/blob/master/examples/2D/2_training.ipynb" 209 | ] 210 | }, 211 | { 212 | "cell_type": "code", 213 | "execution_count": null, 214 | "id": "4f622821", 215 | "metadata": {}, 216 | "outputs": [], 217 | "source": [ 218 | "# similar normalization as shown in stardist (https://github.com/stardist/stardist/blob/master/examples/2D/2_training.ipynb)\n", 219 | "image_normalized_noclip = quantile_normalization(\n", 220 | " image,\n", 221 | " quantile_low=0.01,\n", 222 | " quantile_high=0.998,\n", 223 | " clip=False)[0]\n", 224 | "\n", 225 | "image_normalized_clip = quantile_normalization(\n", 226 | " image,\n", 227 | " quantile_low=0.01,\n", 228 | " quantile_high=0.998,\n", 229 | " clip=True)[0]" 230 | ] 231 | }, 232 | { 233 | "cell_type": "code", 234 | "execution_count": null, 235 | "id": "370008f7", 236 | "metadata": {}, 237 | "outputs": [], 238 | "source": [ 239 | "print(\"image intensity range before normalisation\")\n", 240 | "print(image_normalized_noclip.min(), image_normalized_noclip.max())\n", 241 | "\n", 242 | "print(\"image intensity range after normalisation\")\n", 243 | "print(image_normalized_clip.min(), image_normalized_clip.max())" 244 | ] 245 | }, 246 | { 247 | "cell_type": "code", 248 | "execution_count": null, 249 | "id": "d4afd5b5", 250 | "metadata": {}, 251 | "outputs": [], 252 | "source": [ 253 | "plt.subplot(131)\n", 254 | "_ = plt.hist(image.flatten(), density=True)\n", 255 | "\n", 256 | "plt.subplot(132)\n", 257 | "_ = plt.hist(image_normalized_noclip.flatten(), density=True)\n", 258 | "\n", 259 | "plt.subplot(133)\n", 260 | "_ = plt.hist(image_normalized_clip.flatten(), density=True)\n", 261 | "\n", 262 | "plt.tight_layout()" 263 | ] 264 | }, 265 | { 266 | "cell_type": "code", 267 | "execution_count": null, 268 | "id": "ab67b69e", 269 | "metadata": {}, 270 | "outputs": [], 271 | "source": [ 272 | "from torchvision import transforms\n", 273 | "\n", 274 | "# a convenient transform from torchvision is to cast the \n", 275 | "# np.array to a torch.Tensor\n", 276 | "label_torch = transforms.ToTensor()(label_binary.astype(np.float32))\n", 277 | "\n", 278 | "# when using code that expects numpy objects, we have to cast back again\n", 279 | "plt.imshow(label_torch.numpy()[0], cmap=\"gray\")" 280 | ] 281 | }, 282 | { 283 | "cell_type": "markdown", 284 | "id": "02e6d126", 285 | "metadata": {}, 286 | "source": [ 287 | "## We explore the image resolutions on the training data" 288 | ] 289 | }, 290 | { 291 | "cell_type": "code", 292 | "execution_count": null, 293 | "id": "9efffa46", 294 | "metadata": {}, 295 | "outputs": [], 296 | "source": [ 297 | "# let's read in all training images\n", 298 | "X = list(map(imread, Xtrain))" 299 | ] 300 | }, 301 | { 302 | "cell_type": "code", 303 | "execution_count": null, 304 | "id": "4ba41d57", 305 | "metadata": {}, 306 | "outputs": [], 307 | "source": [ 308 | "X[1].shape, type(X[1].shape)" 309 | ] 310 | }, 311 | { 312 | "cell_type": "code", 313 | "execution_count": null, 314 | "id": "a606d91d", 315 | "metadata": {}, 316 | "outputs": [], 317 | "source": [ 318 | "shapes = [tuple(x.shape) for x in X]" 319 | ] 320 | }, 321 | { 322 | "cell_type": "code", 323 | "execution_count": null, 324 | "id": "93dea082", 325 | "metadata": {}, 326 | "outputs": [], 327 | "source": [ 328 | "# you will find many different shapes in the training data\n", 329 | "shapes" 330 | ] 331 | }, 332 | { 333 | "cell_type": "code", 334 | "execution_count": null, 335 | "id": "b299beb2", 336 | "metadata": {}, 337 | "outputs": [], 338 | "source": [ 339 | "# let's see the shapes we find\n", 340 | "unique_shapes = set(shapes)\n", 341 | "unique_shapes" 342 | ] 343 | }, 344 | { 345 | "cell_type": "code", 346 | "execution_count": null, 347 | "id": "5027afdf", 348 | "metadata": {}, 349 | "outputs": [], 350 | "source": [ 351 | "counts = {}\n", 352 | "for sh in unique_shapes:\n", 353 | " counts[sh] = len([s for s in shapes if s == sh])\n", 354 | "\n", 355 | "counts" 356 | ] 357 | }, 358 | { 359 | "cell_type": "markdown", 360 | "id": "a5532475-260b-4f86-b778-fe1bf2ae0d4e", 361 | "metadata": {}, 362 | "source": [ 363 | "## Exercise: A homogenous dataset?\n", 364 | "\n", 365 | "If the shapes differ, what else is different. Explore the **training data** set more and find out:\n", 366 | "- are all images encoded the same way?\n", 367 | "- are all label masks encoded the same way?\n", 368 | "Once done, approach you the person next to you and discuss how you would proceed with such a diverse data set in practice. " 369 | ] 370 | }, 371 | { 372 | "cell_type": "code", 373 | "execution_count": null, 374 | "id": "c3d950a2-37bb-4dc6-a512-53e745ea45d0", 375 | "metadata": {}, 376 | "outputs": [], 377 | "source": [] 378 | } 379 | ], 380 | "metadata": { 381 | "kernelspec": { 382 | "display_name": "torch_intro_env", 383 | "language": "python", 384 | "name": "torch_intro_env" 385 | }, 386 | "language_info": { 387 | "codemirror_mode": { 388 | "name": "ipython", 389 | "version": 3 390 | }, 391 | "file_extension": ".py", 392 | "mimetype": "text/x-python", 393 | "name": "python", 394 | "nbconvert_exporter": "python", 395 | "pygments_lexer": "ipython3", 396 | "version": "3.10.12" 397 | } 398 | }, 399 | "nbformat": 4, 400 | "nbformat_minor": 5 401 | } 402 | -------------------------------------------------------------------------------- /docs/60_Pytorch/02_dataset.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "0c85a33d", 6 | "metadata": {}, 7 | "source": [ 8 | "# Creation of a dataset\n", 9 | "\n", 10 | "In deep learning, everything starts with a well-prepared dataset that provides inputs and outputs to the network that is supposed to be trained. Based on the data exploration of the previous notebook, we are creating a dataset class that can serve individual samples to us." 11 | ] 12 | }, 13 | { 14 | "cell_type": "code", 15 | "execution_count": null, 16 | "id": "15c6eade", 17 | "metadata": {}, 18 | "outputs": [], 19 | "source": [ 20 | "from data import get_dsb2018_train_files, get_dsb2018_validation_files, get_dsb2018_test_files, fill_label_holes, quantile_normalization\n", 21 | "from tifffile import imread\n", 22 | "from tqdm import tqdm" 23 | ] 24 | }, 25 | { 26 | "cell_type": "code", 27 | "execution_count": null, 28 | "id": "c5da2bde", 29 | "metadata": {}, 30 | "outputs": [], 31 | "source": [ 32 | "import matplotlib.pyplot as plt\n", 33 | "import numpy as np\n", 34 | "import torch\n", 35 | "\n", 36 | "from torchvision import transforms" 37 | ] 38 | }, 39 | { 40 | "cell_type": "code", 41 | "execution_count": null, 42 | "id": "59838c88", 43 | "metadata": {}, 44 | "outputs": [], 45 | "source": [ 46 | "class DSBData():\n", 47 | " def __init__(self, image_files, label_files, target_shape=(256, 256)):\n", 48 | " \"\"\"\n", 49 | " Parameters\n", 50 | " ----------\n", 51 | " image_files: list of pathlib.Path objects pointing to the *.tif images\n", 52 | " label_files: list of pathlib.Path objects pointing to the *.tif segmentation masks\n", 53 | " target_shape: tuple of length 2 specifying the sample resolutions of files that\n", 54 | " will be kept. All other files will NOT be used.\n", 55 | " \"\"\"\n", 56 | " assert len(image_files) == len(label_files)\n", 57 | " assert all(x.name==y.name for x,y in zip(image_files, label_files))\n", 58 | "\n", 59 | " self.images = []\n", 60 | " self.labels = []\n", 61 | "\n", 62 | " tensor_transform = transforms.Compose([\n", 63 | " transforms.ToTensor(),\n", 64 | " ])\n", 65 | " \n", 66 | " # use tqdm to have eye pleasing error bars\n", 67 | " for idx in tqdm(range(len(image_files))):\n", 68 | " # we use the same data reading approach as in the previous notebook\n", 69 | " image = imread(image_files[idx])\n", 70 | " label = imread(label_files[idx])\n", 71 | "\n", 72 | " if image.shape != target_shape:\n", 73 | " continue\n", 74 | " \n", 75 | " # do the normalizations\n", 76 | " image = quantile_normalization(\n", 77 | " image,\n", 78 | " quantile_low=0.01,\n", 79 | " quantile_high=0.998,\n", 80 | " clip=True)[0].astype(np.float32)\n", 81 | "\n", 82 | " # NOTE: we convert the label to dtype float32 and not uint8 because\n", 83 | " # the tensor transformation does a normalization if the input is of\n", 84 | " # dtype uint8, destroying the 0/1 labelling which we want to avoid.\n", 85 | " label = fill_label_holes(label)\n", 86 | " label_binary = np.zeros_like(label).astype(np.float32)\n", 87 | " label_binary[label != 0] = 1.\n", 88 | " \n", 89 | " # convert to torch tensor: adds an artificial color channel in the front\n", 90 | " # and scales inputs to have same size as samples tend to differ in image\n", 91 | " # resolutions\n", 92 | " image = tensor_transform(image)\n", 93 | " label = tensor_transform(label_binary)\n", 94 | "\n", 95 | " self.images.append(image)\n", 96 | " self.labels.append(label)\n", 97 | " \n", 98 | " self.images = torch.stack(self.images)\n", 99 | " self.labels = torch.stack(self.labels)\n", 100 | " \n", 101 | " def __getitem__(self, idx):\n", 102 | " return self.images[idx], self.labels[idx]\n", 103 | "\n", 104 | " def __len__(self):\n", 105 | " return len(self.images)" 106 | ] 107 | }, 108 | { 109 | "cell_type": "code", 110 | "execution_count": null, 111 | "id": "30835205", 112 | "metadata": {}, 113 | "outputs": [], 114 | "source": [ 115 | "train_img_files, train_lbl_files = get_dsb2018_train_files()\n", 116 | "\n", 117 | "n_samples = len(train_img_files)\n", 118 | "\n", 119 | "train_data = DSBData(\n", 120 | " image_files=train_img_files[:n_samples],\n", 121 | " label_files=train_lbl_files[:n_samples],\n", 122 | " target_shape=(256, 256)\n", 123 | ")\n", 124 | "\n", 125 | "# NOTE: the length of the dataset might not be the same as n_samples\n", 126 | "# because files not having the target shape will be discarded\n", 127 | "print(len(train_data))" 128 | ] 129 | }, 130 | { 131 | "cell_type": "code", 132 | "execution_count": null, 133 | "id": "812b41a4", 134 | "metadata": {}, 135 | "outputs": [], 136 | "source": [ 137 | "print(train_data.images.shape, train_data.labels.shape)" 138 | ] 139 | }, 140 | { 141 | "cell_type": "code", 142 | "execution_count": null, 143 | "id": "97aa29f6", 144 | "metadata": {}, 145 | "outputs": [], 146 | "source": [ 147 | "print(train_data.images.min(), train_data.images.max())" 148 | ] 149 | }, 150 | { 151 | "cell_type": "code", 152 | "execution_count": null, 153 | "id": "9117de87", 154 | "metadata": {}, 155 | "outputs": [], 156 | "source": [ 157 | "print(train_data.labels.unique())" 158 | ] 159 | }, 160 | { 161 | "cell_type": "code", 162 | "execution_count": null, 163 | "id": "29d0a74e", 164 | "metadata": {}, 165 | "outputs": [], 166 | "source": [ 167 | "val_img_files, val_lbl_files = get_dsb2018_validation_files()\n", 168 | "\n", 169 | "n_samples = len(val_img_files)\n", 170 | "\n", 171 | "val_data = DSBData(\n", 172 | " image_files=val_img_files[:n_samples],\n", 173 | " label_files=val_lbl_files[:n_samples],\n", 174 | " target_shape=(256, 256)\n", 175 | ")\n", 176 | "\n", 177 | "# NOTE: the length of the dataset might not be the same as n_samples\n", 178 | "# because files not having the target shape will be discarded\n", 179 | "print(len(val_data))" 180 | ] 181 | }, 182 | { 183 | "cell_type": "code", 184 | "execution_count": null, 185 | "id": "c75c3ac7", 186 | "metadata": {}, 187 | "outputs": [], 188 | "source": [ 189 | "image, label = train_data[0]\n", 190 | "print(image.shape, label.shape)\n", 191 | "\n", 192 | "plt.subplot(121)\n", 193 | "plt.imshow(image[0].numpy(), cmap=\"gray\")\n", 194 | "\n", 195 | "plt.subplot(122)\n", 196 | "plt.imshow(label[0].numpy(), cmap=\"gray\")" 197 | ] 198 | }, 199 | { 200 | "cell_type": "markdown", 201 | "id": "136507e7-16d3-472c-bb8d-556ad87af74b", 202 | "metadata": {}, 203 | "source": [ 204 | "## Exercise: What if I'd chosen a different shape?\n", 205 | "\n", 206 | "Return to the last notebook. Check what different shapes are available in the data set. Compose a data set object only with them. Take 1-2 samples and display them in the notebook." 207 | ] 208 | }, 209 | { 210 | "cell_type": "code", 211 | "execution_count": null, 212 | "id": "73930e32-337c-45f4-ba77-a828668d1fc6", 213 | "metadata": {}, 214 | "outputs": [], 215 | "source": [] 216 | } 217 | ], 218 | "metadata": { 219 | "kernelspec": { 220 | "display_name": "torch_intro_env", 221 | "language": "python", 222 | "name": "torch_intro_env" 223 | }, 224 | "language_info": { 225 | "codemirror_mode": { 226 | "name": "ipython", 227 | "version": 3 228 | }, 229 | "file_extension": ".py", 230 | "mimetype": "text/x-python", 231 | "name": "python", 232 | "nbconvert_exporter": "python", 233 | "pygments_lexer": "ipython3", 234 | "version": "3.10.12" 235 | } 236 | }, 237 | "nbformat": 4, 238 | "nbformat_minor": 5 239 | } 240 | -------------------------------------------------------------------------------- /docs/60_Pytorch/03_data_batching_and_setup_model.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "23ebe65a", 6 | "metadata": {}, 7 | "source": [ 8 | "# Processing batches of data\n", 9 | "\n", 10 | "As most deep learning workflows benefit greatly from running on machines with GPUs that can process data in parallel, during model training the data is passed in batches of samples to the network instead of processing each sample sequentially. Torch offers great support for this which builds on top of a provided dataset.\n", 11 | "For convenience, the dataset class introduced in the previous notebook is part of the data module and we can now easily import it." 12 | ] 13 | }, 14 | { 15 | "cell_type": "code", 16 | "execution_count": null, 17 | "id": "0ee2d544", 18 | "metadata": {}, 19 | "outputs": [], 20 | "source": [ 21 | "from data import DSBData, get_dsb2018_train_files" 22 | ] 23 | }, 24 | { 25 | "cell_type": "code", 26 | "execution_count": null, 27 | "id": "1edd1a8b", 28 | "metadata": {}, 29 | "outputs": [], 30 | "source": [ 31 | "train_img_files, train_lbl_files = get_dsb2018_train_files()\n", 32 | "\n", 33 | "train_data = DSBData(\n", 34 | " image_files=train_img_files,\n", 35 | " label_files=train_lbl_files,\n", 36 | " target_shape=(256, 256)\n", 37 | ")\n", 38 | "\n", 39 | "print(len(train_data))" 40 | ] 41 | }, 42 | { 43 | "cell_type": "markdown", 44 | "id": "86ebf6a7-5af9-4c82-b759-59f8805b2d72", 45 | "metadata": {}, 46 | "source": [ 47 | "Before starting to work with the data and actual models, we have to wrap out dataset object in a `DataLoader`." 48 | ] 49 | }, 50 | { 51 | "cell_type": "code", 52 | "execution_count": null, 53 | "id": "34c2b7d2", 54 | "metadata": {}, 55 | "outputs": [], 56 | "source": [ 57 | "from torch.utils.data import DataLoader\n", 58 | "\n", 59 | "train_loader = DataLoader(train_data, batch_size=32, shuffle=True)" 60 | ] 61 | }, 62 | { 63 | "cell_type": "markdown", 64 | "id": "b04df710-aa06-4985-9c8c-dcabd744dc71", 65 | "metadata": {}, 66 | "source": [ 67 | "The `DataLoader` comes with out of the box support for iterators that make looping code a bit more concise." 68 | ] 69 | }, 70 | { 71 | "cell_type": "code", 72 | "execution_count": null, 73 | "id": "8d57f46b", 74 | "metadata": {}, 75 | "outputs": [], 76 | "source": [ 77 | "for batch_idx, (batch_images, batch_labels) in enumerate(train_loader):\n", 78 | " print(\"Batch\", batch_idx, batch_images.shape, batch_labels.shape)\n", 79 | " #break" 80 | ] 81 | }, 82 | { 83 | "cell_type": "markdown", 84 | "id": "b90408d6", 85 | "metadata": {}, 86 | "source": [ 87 | "## Neural network architecture\n", 88 | "\n", 89 | "For semantic segmentation problems, a specific convolutional neural network architecture, i.e. a defined sequence of operations (also called layers) involving convolutional filters, data aggregation via pooling and nonlinear activation functions, has been demonstrated to work well across a wide range of image domains. This architecture is called UNet and its basic structure is shown below. (Image taken from [here](https://github.com/HarisIqbal88/PlotNeuralNet/blob/master/examples/Unet_Ushape/Unet_ushape.pdf).)\n", 90 | "\n", 91 | "\"Drawing\"\n", 92 | "\n", 93 | "As this is rather cumbersome to implement directly, we will use the [MONAI](https://monai.io/) library, which provides a convenient torch implementation of this architecture by the name of `BasicUNet`. \n", 94 | "\n", 95 | "If you are interested, the [MONAI](https://monai.io/) library offers many more architectures in their [network architectures](https://docs.monai.io/en/stable/networks.html) documentation section." 96 | ] 97 | }, 98 | { 99 | "cell_type": "code", 100 | "execution_count": null, 101 | "id": "708ba935", 102 | "metadata": {}, 103 | "outputs": [], 104 | "source": [ 105 | "import matplotlib.pyplot as plt\n", 106 | "import torch\n", 107 | "\n", 108 | "from monai.networks.nets import BasicUNet" 109 | ] 110 | }, 111 | { 112 | "cell_type": "code", 113 | "execution_count": null, 114 | "id": "51be9e50", 115 | "metadata": {}, 116 | "outputs": [], 117 | "source": [ 118 | "BasicUNet?" 119 | ] 120 | }, 121 | { 122 | "cell_type": "code", 123 | "execution_count": null, 124 | "id": "f4c56869", 125 | "metadata": {}, 126 | "outputs": [], 127 | "source": [ 128 | "model = BasicUNet(\n", 129 | " spatial_dims=2,\n", 130 | " in_channels=1,\n", 131 | " out_channels=1,\n", 132 | " features=[16, 16, 32, 64, 128, 16],\n", 133 | " act=\"relu\",\n", 134 | " norm=\"batch\",\n", 135 | " dropout=0.25,\n", 136 | ")\n", 137 | "print(model)" 138 | ] 139 | }, 140 | { 141 | "cell_type": "markdown", 142 | "id": "f4b3d662", 143 | "metadata": {}, 144 | "source": [ 145 | "We can now feed a batch of images directly through the model to obtain predictions. Note however, that those will likely not be usable for segmentation as the model has not been trained yet and model parameters are initialized randomly.\n", 146 | "\n", 147 | "Very importantly, the model outputs are of the same shape as the model inputs. Because the UNet consists entirely of convolutional operations, it is (to a degree) shape invariant and can process arbitrary input sizes. It is however recommended to work with resolutions that are divisible by 16, as the input resolution is halved in each of the four downsampling blocks." 148 | ] 149 | }, 150 | { 151 | "cell_type": "code", 152 | "execution_count": null, 153 | "id": "e3c2d584", 154 | "metadata": {}, 155 | "outputs": [], 156 | "source": [ 157 | "batch_preds = model(batch_images)\n", 158 | "print(batch_preds.shape)" 159 | ] 160 | }, 161 | { 162 | "cell_type": "code", 163 | "execution_count": null, 164 | "id": "f1a3e74a", 165 | "metadata": {}, 166 | "outputs": [], 167 | "source": [ 168 | "plt.subplot(131)\n", 169 | "plt.imshow(batch_images[0, 0].numpy(), cmap=\"gray\")\n", 170 | "plt.title(\"Input\")\n", 171 | "\n", 172 | "plt.subplot(132)\n", 173 | "plt.imshow(batch_labels[0, 0].numpy(), cmap=\"gray\")\n", 174 | "plt.title(\"Ground truth\")\n", 175 | "\n", 176 | "plt.subplot(133)\n", 177 | "plt.imshow(batch_preds.detach()[0, 0].numpy(), cmap=\"gray\")\n", 178 | "plt.title(\"Predictions\")" 179 | ] 180 | }, 181 | { 182 | "cell_type": "code", 183 | "execution_count": null, 184 | "id": "4165f377", 185 | "metadata": {}, 186 | "outputs": [], 187 | "source": [ 188 | "# different sized dummy input should be processable as well\n", 189 | "dummy_batch = torch.zeros(8, 1, 512, 512)\n", 190 | "dummy_preds = model(dummy_batch)\n", 191 | "print(dummy_preds.shape)" 192 | ] 193 | }, 194 | { 195 | "cell_type": "code", 196 | "execution_count": null, 197 | "id": "f252db93", 198 | "metadata": {}, 199 | "outputs": [], 200 | "source": [ 201 | "# different sized dummy input that is not divisible by 16, still produces output of same shape\n", 202 | "dummy_batch = torch.zeros(8, 1, 114, 87)\n", 203 | "dummy_preds = model(dummy_batch)\n", 204 | "print(dummy_preds.shape)" 205 | ] 206 | }, 207 | { 208 | "cell_type": "markdown", 209 | "id": "e1469a93", 210 | "metadata": {}, 211 | "source": [ 212 | "The model output range is not limited to `[0,1.)` because in the output layer, no nonlinear activation was used which could have transformed the output pixel values as such. \n", 213 | "\n", 214 | "To fix this and make the output usable for segmentation purposes, we apply a [sigmoid activation](https://en.wikipedia.org/wiki/Sigmoid_function) function per pixel. " 215 | ] 216 | }, 217 | { 218 | "cell_type": "code", 219 | "execution_count": null, 220 | "id": "e8845d58", 221 | "metadata": {}, 222 | "outputs": [], 223 | "source": [ 224 | "print(batch_preds.min(), batch_preds.max())" 225 | ] 226 | }, 227 | { 228 | "cell_type": "code", 229 | "execution_count": null, 230 | "id": "aa8e883f", 231 | "metadata": {}, 232 | "outputs": [], 233 | "source": [ 234 | "batch_preds_seg = torch.nn.functional.sigmoid(batch_preds)\n", 235 | "print(batch_preds_seg.min(), batch_preds_seg.max())" 236 | ] 237 | }, 238 | { 239 | "cell_type": "code", 240 | "execution_count": null, 241 | "id": "e5976c40", 242 | "metadata": {}, 243 | "outputs": [], 244 | "source": [ 245 | "plt.imshow(batch_preds_seg.detach()[0, 0], cmap=\"gray\")\n", 246 | "plt.colorbar(orientation=\"horizontal\")" 247 | ] 248 | }, 249 | { 250 | "cell_type": "markdown", 251 | "id": "9588a98b", 252 | "metadata": {}, 253 | "source": [ 254 | "In order to obtain binary (0/1) predictions, a straightforward approach would be to use thresholding at 0.5" 255 | ] 256 | }, 257 | { 258 | "cell_type": "code", 259 | "execution_count": null, 260 | "id": "0f9195de", 261 | "metadata": {}, 262 | "outputs": [], 263 | "source": [ 264 | "batch_preds_seg_binary = (batch_preds_seg > 0.5).to(torch.uint8)\n", 265 | "plt.imshow(batch_preds_seg_binary.detach()[0, 0], cmap=\"gray\")\n", 266 | "plt.colorbar(orientation=\"horizontal\")" 267 | ] 268 | }, 269 | { 270 | "cell_type": "markdown", 271 | "id": "81949bca-ad98-41f0-8d81-0a5b439b0fa0", 272 | "metadata": {}, 273 | "source": [ 274 | "Out model is not trained yet. So don't be bothered too much to just see garbage in the plot above." 275 | ] 276 | }, 277 | { 278 | "cell_type": "markdown", 279 | "id": "92837588-7f54-4b70-b821-ad1c37c68b92", 280 | "metadata": {}, 281 | "source": [ 282 | "## Exercise: My first MONAI BasicUnet\n", 283 | "\n", 284 | "Play with the model a bit. Take the constructor and change some parameters, e.g. the features, the activation, normalisation. Then, have the model predict on the same image as above. Display the prediction and compare to what we saw earlier. Do you spot a difference?" 285 | ] 286 | }, 287 | { 288 | "cell_type": "code", 289 | "execution_count": null, 290 | "id": "a1ffcd79-3333-4c52-b99b-a84d4e40d41d", 291 | "metadata": {}, 292 | "outputs": [], 293 | "source": [] 294 | } 295 | ], 296 | "metadata": { 297 | "kernelspec": { 298 | "display_name": "torch_intro_env", 299 | "language": "python", 300 | "name": "torch_intro_env" 301 | }, 302 | "language_info": { 303 | "codemirror_mode": { 304 | "name": "ipython", 305 | "version": 3 306 | }, 307 | "file_extension": ".py", 308 | "mimetype": "text/x-python", 309 | "name": "python", 310 | "nbconvert_exporter": "python", 311 | "pygments_lexer": "ipython3", 312 | "version": "3.10.12" 313 | } 314 | }, 315 | "nbformat": 4, 316 | "nbformat_minor": 5 317 | } 318 | -------------------------------------------------------------------------------- /docs/60_Pytorch/04_model_training.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "115b6528", 6 | "metadata": {}, 7 | "source": [ 8 | "# Training a Unet\n", 9 | "\n", 10 | "Now we have all pieces in place to train a network to segment images for us. Let's do it!" 11 | ] 12 | }, 13 | { 14 | "cell_type": "code", 15 | "execution_count": null, 16 | "id": "6973b36a", 17 | "metadata": {}, 18 | "outputs": [], 19 | "source": [ 20 | "from torch.utils.data import DataLoader\n", 21 | "from data import DSBData, get_dsb2018_train_files" 22 | ] 23 | }, 24 | { 25 | "cell_type": "code", 26 | "execution_count": null, 27 | "id": "6c522cc1", 28 | "metadata": {}, 29 | "outputs": [], 30 | "source": [ 31 | "import torch\n", 32 | "from monai.networks.nets import BasicUNet" 33 | ] 34 | }, 35 | { 36 | "cell_type": "code", 37 | "execution_count": null, 38 | "id": "191569d4", 39 | "metadata": {}, 40 | "outputs": [], 41 | "source": [ 42 | "train_img_files, train_lbl_files = get_dsb2018_train_files()\n", 43 | "\n", 44 | "train_data = DSBData(\n", 45 | " image_files=train_img_files,\n", 46 | " label_files=train_lbl_files,\n", 47 | " target_shape=(256, 256)\n", 48 | ")\n", 49 | "\n", 50 | "print(len(train_data))\n", 51 | "\n", 52 | "train_loader = DataLoader(train_data, batch_size=32, shuffle=True)" 53 | ] 54 | }, 55 | { 56 | "cell_type": "code", 57 | "execution_count": null, 58 | "id": "24514a8d", 59 | "metadata": {}, 60 | "outputs": [], 61 | "source": [ 62 | "model = BasicUNet(\n", 63 | " spatial_dims=2,\n", 64 | " in_channels=1,\n", 65 | " out_channels=1,\n", 66 | " features=[16, 16, 32, 64, 128, 16],\n", 67 | " act=\"relu\",\n", 68 | " norm=\"batch\",\n", 69 | " dropout=0.25,\n", 70 | ")" 71 | ] 72 | }, 73 | { 74 | "cell_type": "markdown", 75 | "id": "3b2d1cad", 76 | "metadata": {}, 77 | "source": [ 78 | "Training of a neural network means updating its parameters (weights) in order to descrese what is called the loss function. This is performed using an optimizer (`Adam` here) which uses the gradient of this loss function ith respect to the model parameters in order to adjust model weights. This should lead to an ever descreasing loss during training." 79 | ] 80 | }, 81 | { 82 | "cell_type": "code", 83 | "execution_count": null, 84 | "id": "8961c8d4", 85 | "metadata": {}, 86 | "outputs": [], 87 | "source": [ 88 | "optimizer = torch.optim.Adam(model.parameters(), lr=1.e-3)\n", 89 | "init_params = list(model.parameters())[0].clone().detach() #storing it for later use" 90 | ] 91 | }, 92 | { 93 | "cell_type": "markdown", 94 | "id": "3ac3ab18", 95 | "metadata": {}, 96 | "source": [ 97 | "Such a training is performed by iterating over the batches of the training dataset multiple times. Each full iteration over the dataset is termed an epoch." 98 | ] 99 | }, 100 | { 101 | "cell_type": "code", 102 | "execution_count": null, 103 | "id": "90632dca", 104 | "metadata": {}, 105 | "outputs": [], 106 | "source": [ 107 | "max_nepochs = 1\n", 108 | "log_interval = 1\n", 109 | "model.train(True) #the model is put in training mode, i.e. gradients are computed\n", 110 | "\n", 111 | "# BCEWithLogitsLoss expects raw unnormalized scores and combines sigmoid + BCELoss for better\n", 112 | "# numerical stability.\n", 113 | "# expects B x C x W x D\n", 114 | "loss_function = torch.nn.BCEWithLogitsLoss(reduction=\"mean\")\n", 115 | "\n", 116 | "for epoch in range(1, max_nepochs + 1):\n", 117 | " for batch_idx, (X, y) in enumerate(train_loader):\n", 118 | " # print(\"train\", batch_idx, X.shape, y.shape)\n", 119 | "\n", 120 | " optimizer.zero_grad()\n", 121 | "\n", 122 | " prediction_logits = model(X)\n", 123 | " \n", 124 | " batch_loss = loss_function(prediction_logits, y)\n", 125 | "\n", 126 | " batch_loss.backward()\n", 127 | "\n", 128 | " optimizer.step()\n", 129 | "\n", 130 | " if batch_idx % log_interval == 0:\n", 131 | " print(\n", 132 | " \"Train Epoch:\",\n", 133 | " epoch,\n", 134 | " \"Batch:\",\n", 135 | " batch_idx,\n", 136 | " \"Total samples processed:\",\n", 137 | " (batch_idx + 1) * train_loader.batch_size,\n", 138 | " \"Loss:\",\n", 139 | " batch_loss.item(),\n", 140 | " )" 141 | ] 142 | }, 143 | { 144 | "cell_type": "code", 145 | "execution_count": null, 146 | "id": "407fd45a", 147 | "metadata": {}, 148 | "outputs": [], 149 | "source": [ 150 | "final_params = list(model.parameters())[0].clone().detach()\n", 151 | "assert not torch.allclose(init_params, final_params)" 152 | ] 153 | }, 154 | { 155 | "cell_type": "markdown", 156 | "id": "66449832", 157 | "metadata": {}, 158 | "source": [ 159 | "## Look at some predictions\n", 160 | "\n", 161 | "Now that the model has been trained for a little bit, we are looking at the predictions again. Usually model training has to be peformed longer, so don't expect any wonders. Also keep in mind that the predictions here are based on the data the model was trained on. Those predictions might be far better than those on data not used during training. But this is a story for later." 162 | ] 163 | }, 164 | { 165 | "cell_type": "code", 166 | "execution_count": null, 167 | "id": "86e8123c", 168 | "metadata": {}, 169 | "outputs": [], 170 | "source": [ 171 | "import matplotlib.pyplot as plt" 172 | ] 173 | }, 174 | { 175 | "cell_type": "code", 176 | "execution_count": null, 177 | "id": "7a473a7f", 178 | "metadata": {}, 179 | "outputs": [], 180 | "source": [ 181 | "# convert to 0/1 range on each pixel\n", 182 | "prediction = torch.nn.functional.sigmoid(prediction_logits)\n", 183 | "prediction_binary = (prediction > 0.5).to(torch.uint8)\n", 184 | "\n", 185 | "sidx = 0\n", 186 | "plt.subplot(131)\n", 187 | "plt.imshow(X[sidx, 0].numpy(), cmap=\"gray\")\n", 188 | "plt.title(\"Input\")\n", 189 | "\n", 190 | "plt.subplot(132)\n", 191 | "plt.imshow(y[sidx, 0].numpy(), cmap=\"gray\")\n", 192 | "plt.title(\"Ground truth\")\n", 193 | "\n", 194 | "plt.subplot(133)\n", 195 | "plt.imshow(prediction_binary.detach()[sidx, 0].numpy(), cmap=\"gray\")\n", 196 | "plt.title(\"Predictions\")" 197 | ] 198 | }, 199 | { 200 | "cell_type": "markdown", 201 | "id": "f5ff2eff-c06b-4ed3-97ad-1517de6e9c2d", 202 | "metadata": {}, 203 | "source": [ 204 | "## Exercise: We can do better!\n", 205 | "\n", 206 | "Take the training code from above and have the model train for longer. For example, try 10 or 20 epochs. Do you see any improvements?" 207 | ] 208 | }, 209 | { 210 | "cell_type": "code", 211 | "execution_count": null, 212 | "id": "2440511a-c0a9-4d7d-a1d6-738fc8ea12bb", 213 | "metadata": {}, 214 | "outputs": [], 215 | "source": [] 216 | } 217 | ], 218 | "metadata": { 219 | "kernelspec": { 220 | "display_name": "torch_intro_env", 221 | "language": "python", 222 | "name": "torch_intro_env" 223 | }, 224 | "language_info": { 225 | "codemirror_mode": { 226 | "name": "ipython", 227 | "version": 3 228 | }, 229 | "file_extension": ".py", 230 | "mimetype": "text/x-python", 231 | "name": "python", 232 | "nbconvert_exporter": "python", 233 | "pygments_lexer": "ipython3", 234 | "version": "3.10.12" 235 | } 236 | }, 237 | "nbformat": 4, 238 | "nbformat_minor": 5 239 | } 240 | -------------------------------------------------------------------------------- /docs/60_Pytorch/06_model_training_with_logging.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Training with logging\n", 8 | "\n", 9 | "Training of the model in the previous notebook was leveraging the GPU in case it was available. However, after training and closing of this notebook, information about the course of training and the development of the loss was gone. We would like to keep this information as it might be relevant for diagnostic purposes later on, such as convergence or overfitting.\n", 10 | "\n", 11 | "A separate tool, originally developed within the tensorflow ecosystem but now adapted to pytorch, provides a solution for this: Tensorboard. More information is available [here](https://pytorch.org/tutorials/recipes/recipes/tensorboard_with_pytorch.html)." 12 | ] 13 | }, 14 | { 15 | "cell_type": "code", 16 | "execution_count": null, 17 | "metadata": {}, 18 | "outputs": [], 19 | "source": [ 20 | "from torch.utils.tensorboard import SummaryWriter" 21 | ] 22 | }, 23 | { 24 | "cell_type": "code", 25 | "execution_count": null, 26 | "metadata": {}, 27 | "outputs": [], 28 | "source": [ 29 | "import torch" 30 | ] 31 | }, 32 | { 33 | "cell_type": "code", 34 | "execution_count": null, 35 | "metadata": {}, 36 | "outputs": [], 37 | "source": [ 38 | "cuda_present = torch.cuda.is_available()\n", 39 | "ndevices = torch.cuda.device_count()\n", 40 | "use_cuda = cuda_present and ndevices > 0\n", 41 | "device = torch.device(\"cuda\" if use_cuda else \"cpu\") # \"cuda:0\" ... default device, \"cuda:1\" would be GPU index 1, \"cuda:2\" etc\n", 42 | "print(\"number of devices:\", ndevices, \"\\tchosen device:\", device, \"\\tuse_cuda=\", use_cuda)" 43 | ] 44 | }, 45 | { 46 | "cell_type": "code", 47 | "execution_count": null, 48 | "metadata": {}, 49 | "outputs": [], 50 | "source": [ 51 | "from torch.utils.data import DataLoader\n", 52 | "from data import DSBData, get_dsb2018_train_files" 53 | ] 54 | }, 55 | { 56 | "cell_type": "code", 57 | "execution_count": null, 58 | "metadata": {}, 59 | "outputs": [], 60 | "source": [ 61 | "from monai.networks.nets import BasicUNet" 62 | ] 63 | }, 64 | { 65 | "cell_type": "code", 66 | "execution_count": null, 67 | "metadata": {}, 68 | "outputs": [], 69 | "source": [ 70 | "train_img_files, train_lbl_files = get_dsb2018_train_files()\n", 71 | "\n", 72 | "train_data = DSBData(\n", 73 | " image_files=train_img_files,\n", 74 | " label_files=train_lbl_files,\n", 75 | " target_shape=(256, 256)\n", 76 | ")\n", 77 | "\n", 78 | "print(len(train_data))\n", 79 | "\n", 80 | "train_loader = DataLoader(train_data, batch_size=32, shuffle=True, num_workers=1, pin_memory=True)" 81 | ] 82 | }, 83 | { 84 | "cell_type": "code", 85 | "execution_count": null, 86 | "metadata": {}, 87 | "outputs": [], 88 | "source": [ 89 | "model = BasicUNet(\n", 90 | " spatial_dims=2,\n", 91 | " in_channels=1,\n", 92 | " out_channels=1,\n", 93 | " features=[16, 16, 32, 64, 128, 16],\n", 94 | " act=\"relu\",\n", 95 | " norm=\"batch\",\n", 96 | " dropout=0.25,\n", 97 | ")\n", 98 | "\n", 99 | "# transfer the model to the chosen device\n", 100 | "model = model.to(device)" 101 | ] 102 | }, 103 | { 104 | "cell_type": "markdown", 105 | "metadata": {}, 106 | "source": [ 107 | "Training of a neural network means updating its parameters (weights) using a strategy that involves the gradients of a loss function with respect to the model parameters in order to adjust model weights to minimize this loss." 108 | ] 109 | }, 110 | { 111 | "cell_type": "code", 112 | "execution_count": null, 113 | "metadata": {}, 114 | "outputs": [], 115 | "source": [ 116 | "optimizer = torch.optim.Adam(model.parameters(), lr=1.e-3)\n", 117 | "init_params = list(model.parameters())[0].clone().detach()" 118 | ] 119 | }, 120 | { 121 | "cell_type": "markdown", 122 | "metadata": {}, 123 | "source": [ 124 | "Such a training is performed by iterating over the batches of the training dataset multiple times. Each full iteration over the dataset is termed an epoch." 125 | ] 126 | }, 127 | { 128 | "cell_type": "markdown", 129 | "metadata": {}, 130 | "source": [ 131 | "**During or after training**, the tensorboard logs (which have been collected with the `SummaryWriter` object) can be visualized. Would you be on your laptop or workstation at home, you could do:\n", 132 | "\n", 133 | "```shell\n", 134 | "tensorboard --logdir \"path/to/logs\",\n", 135 | "```\n", 136 | "\n", 137 | "then open a browser using the URL `localhost:6006` (or whichever port the tensorboard server outputted as running on).\n", 138 | "Alternatively, tensorboard can be accessed from jupyter as well:" 139 | ] 140 | }, 141 | { 142 | "cell_type": "markdown", 143 | "metadata": {}, 144 | "source": [ 145 | "On Taurus, some special steps need to be taken to visualize the tensorboard logs.\n", 146 | "\n", 147 | "If not done already, spawn a notebook BUT this time make sure to choose `production` under software environment in the advanced spawn configuration. Then wait until the notebooks open. Run this notebook.\n", 148 | "\n", 149 | "In order to be able to view the tensorboard logs, the tensorboard jupyter lab extension always checks the same location on the computer it is running on. Hence, you need to move your logs in the right location. To do so, run the following command:" 150 | ] 151 | }, 152 | { 153 | "cell_type": "code", 154 | "execution_count": null, 155 | "metadata": {}, 156 | "outputs": [], 157 | "source": [ 158 | "!mkdir -p /tmp/$USER/tf-logs \n", 159 | "!ln -s $PWD/logs /tmp/$USER/tf-logs #might fail if the destination already exists" 160 | ] 161 | }, 162 | { 163 | "cell_type": "markdown", 164 | "metadata": {}, 165 | "source": [ 166 | "Now run the following cell which performs the model training. While the training runs, you can open the Tensorboad tab from the jupyter lab main page." 167 | ] 168 | }, 169 | { 170 | "cell_type": "code", 171 | "execution_count": null, 172 | "metadata": {}, 173 | "outputs": [], 174 | "source": [ 175 | "max_nepochs = 1\n", 176 | "log_interval = 1\n", 177 | "\n", 178 | "writer = SummaryWriter(log_dir=\"logs\", comment=\"this is the test of SummaryWriter\")\n", 179 | "\n", 180 | "model.train(True)\n", 181 | "\n", 182 | "# expects raw unnormalized scores and combines sigmoid + BCELoss for better\n", 183 | "# numerical stability.\n", 184 | "# expects B x C x W x D\n", 185 | "loss_function = torch.nn.BCEWithLogitsLoss(reduction=\"mean\")\n", 186 | "\n", 187 | "for epoch in range(1, max_nepochs + 1):\n", 188 | " for batch_idx, (X, y) in enumerate(train_loader):\n", 189 | " # the inputs and labels have to be on the same device as the model\n", 190 | " X, y = X.to(device), y.to(device)\n", 191 | " \n", 192 | " optimizer.zero_grad()\n", 193 | "\n", 194 | " prediction_logits = model(X)\n", 195 | " \n", 196 | " batch_loss = loss_function(prediction_logits, y)\n", 197 | "\n", 198 | " batch_loss.backward()\n", 199 | "\n", 200 | " optimizer.step()\n", 201 | "\n", 202 | " if batch_idx % log_interval == 0:\n", 203 | " print(\n", 204 | " \"Train Epoch:\",\n", 205 | " epoch,\n", 206 | " \"Batch:\",\n", 207 | " batch_idx,\n", 208 | " \"Total samples processed:\",\n", 209 | " (batch_idx + 1) * train_loader.batch_size,\n", 210 | " \"Loss:\",\n", 211 | " batch_loss.item(),\n", 212 | " )\n", 213 | " writer.add_scalar(\"Loss/train\", batch_loss.item(), batch_idx)\n", 214 | "writer.close()" 215 | ] 216 | }, 217 | { 218 | "cell_type": "markdown", 219 | "metadata": {}, 220 | "source": [ 221 | "When you executed the cell above, you should see a new folder appear in the current directory. This folder is called `logs`. This is where tensorboard stores all run information." 222 | ] 223 | }, 224 | { 225 | "cell_type": "markdown", 226 | "metadata": {}, 227 | "source": [ 228 | "## Exercise: Let's do this locally\n", 229 | "\n", 230 | "The nice thing with having all the logs available on disk is, that you can move them around. If you like, try to download the entire `logs` folder onto your local machine (laptop). Then install `tensorboard` with pip or conda.\n", 231 | "\n", 232 | "```\n", 233 | "pip install tensorboard #can take awhile\n", 234 | "```\n", 235 | "\n", 236 | "Then run the same code as above on your local machine.\n", 237 | "```\n", 238 | "tensorboard --port 6006 --logdir /local/path/logs\n", 239 | "```\n", 240 | "You can now open a browser window and type in `localhost:6006` as URL. This should open the tensorboard interface." 241 | ] 242 | }, 243 | { 244 | "cell_type": "code", 245 | "execution_count": null, 246 | "metadata": {}, 247 | "outputs": [], 248 | "source": [] 249 | } 250 | ], 251 | "metadata": { 252 | "kernelspec": { 253 | "display_name": "devbio-napari_pol-course-pytorch", 254 | "language": "python", 255 | "name": "devbio-napari_pol-course-pytorch" 256 | }, 257 | "language_info": { 258 | "codemirror_mode": { 259 | "name": "ipython", 260 | "version": 3 261 | }, 262 | "file_extension": ".py", 263 | "mimetype": "text/x-python", 264 | "name": "python", 265 | "nbconvert_exporter": "python", 266 | "pygments_lexer": "ipython3", 267 | "version": "3.10.8" 268 | } 269 | }, 270 | "nbformat": 4, 271 | "nbformat_minor": 5 272 | } 273 | -------------------------------------------------------------------------------- /docs/60_Pytorch/07_model_training_with_checkpoints.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "cdad11e3", 6 | "metadata": {}, 7 | "source": [ 8 | "# Training with checkpoints\n", 9 | "\n", 10 | "The currently developed pipeline has the major drawbacks that the states of the parameters are not saved. So after training, when closing the notebook, you no longer have access to the trained model. This has to be fixed to save you hours of re-training models over and over again. Also, it might be a good idea to save some \"snapshots\" of model parameters obtained during training, not only once training is finished. This can be achieved using checkpointing. " 11 | ] 12 | }, 13 | { 14 | "cell_type": "code", 15 | "execution_count": null, 16 | "id": "b53d6258", 17 | "metadata": {}, 18 | "outputs": [], 19 | "source": [ 20 | "from torch.utils.tensorboard import SummaryWriter\n", 21 | "from pathlib import Path" 22 | ] 23 | }, 24 | { 25 | "cell_type": "code", 26 | "execution_count": null, 27 | "id": "2c7b1d5a", 28 | "metadata": {}, 29 | "outputs": [], 30 | "source": [ 31 | "import torch" 32 | ] 33 | }, 34 | { 35 | "cell_type": "code", 36 | "execution_count": null, 37 | "id": "ed0c5a48", 38 | "metadata": {}, 39 | "outputs": [], 40 | "source": [ 41 | "cuda_present = torch.cuda.is_available()\n", 42 | "ndevices = torch.cuda.device_count()\n", 43 | "use_cuda = cuda_present and ndevices > 0\n", 44 | "device = torch.device(\"cuda\" if use_cuda else \"cpu\") # \"cuda:0\" ... default device, \"cuda:1\" would be GPU index 1, \"cuda:2\" etc\n", 45 | "print(\"number of devices:\", ndevices, \"\\tchosen device:\", device, \"\\tuse_cuda=\", use_cuda)" 46 | ] 47 | }, 48 | { 49 | "cell_type": "code", 50 | "execution_count": null, 51 | "id": "b71c1cd0", 52 | "metadata": {}, 53 | "outputs": [], 54 | "source": [ 55 | "from torch.utils.data import DataLoader\n", 56 | "from data import DSBData, get_dsb2018_train_files" 57 | ] 58 | }, 59 | { 60 | "cell_type": "code", 61 | "execution_count": null, 62 | "id": "a00f4e20", 63 | "metadata": {}, 64 | "outputs": [], 65 | "source": [ 66 | "from monai.networks.nets import BasicUNet" 67 | ] 68 | }, 69 | { 70 | "cell_type": "code", 71 | "execution_count": null, 72 | "id": "451c0804", 73 | "metadata": {}, 74 | "outputs": [], 75 | "source": [ 76 | "train_img_files, train_lbl_files = get_dsb2018_train_files()\n", 77 | "\n", 78 | "train_data = DSBData(\n", 79 | " image_files=train_img_files,\n", 80 | " label_files=train_lbl_files,\n", 81 | " target_shape=(256, 256)\n", 82 | ")\n", 83 | "\n", 84 | "print(len(train_data))\n", 85 | "\n", 86 | "train_loader = DataLoader(train_data, batch_size=32, shuffle=True, num_workers=1, pin_memory=True)" 87 | ] 88 | }, 89 | { 90 | "cell_type": "code", 91 | "execution_count": null, 92 | "id": "4a396a9e", 93 | "metadata": {}, 94 | "outputs": [], 95 | "source": [ 96 | "model = BasicUNet(\n", 97 | " spatial_dims=2,\n", 98 | " in_channels=1,\n", 99 | " out_channels=1,\n", 100 | " features=[16, 16, 32, 64, 128, 16],\n", 101 | " act=\"relu\",\n", 102 | " norm=\"batch\",\n", 103 | " dropout=0.25,\n", 104 | ")\n", 105 | "\n", 106 | "# transfer the model to the chosen device\n", 107 | "model = model.to(device)" 108 | ] 109 | }, 110 | { 111 | "cell_type": "markdown", 112 | "id": "b0402b8c", 113 | "metadata": {}, 114 | "source": [ 115 | "Training of a neural network means updating its parameters (weights) using a strategy that involves the gradients of a loss function with respect to the model parameters in order to adjust model weights to minimize this loss." 116 | ] 117 | }, 118 | { 119 | "cell_type": "code", 120 | "execution_count": null, 121 | "id": "7aa63020", 122 | "metadata": {}, 123 | "outputs": [], 124 | "source": [ 125 | "optimizer = torch.optim.Adam(model.parameters(), lr=1.e-3)\n", 126 | "init_params = list(model.parameters())[0].clone().detach()" 127 | ] 128 | }, 129 | { 130 | "cell_type": "markdown", 131 | "id": "370497e6", 132 | "metadata": {}, 133 | "source": [ 134 | "Such a training is performed by iterating over the batches of the training dataset multiple times. Each full iteration over the dataset is termed an epoch." 135 | ] 136 | }, 137 | { 138 | "cell_type": "markdown", 139 | "id": "7de7f9cd", 140 | "metadata": {}, 141 | "source": [ 142 | "During or after training the tensorboard logs can be visualized as follows: in a terminal, type\n", 143 | "\n", 144 | "```shell\n", 145 | "tensorboard --logdir \"path/to/logs\",\n", 146 | "```\n", 147 | "\n", 148 | "then open a browser on `localhost:6006` (or whichever port the tensorboard server outputted as running on).\n", 149 | "Alternatively, tensorboard can be accessed from jupyter as well:" 150 | ] 151 | }, 152 | { 153 | "cell_type": "code", 154 | "execution_count": null, 155 | "id": "8b9a00e9", 156 | "metadata": {}, 157 | "outputs": [], 158 | "source": [ 159 | "%load_ext tensorboard\n", 160 | "%tensorboard --port 6006 --logdir ./logs" 161 | ] 162 | }, 163 | { 164 | "cell_type": "code", 165 | "execution_count": null, 166 | "id": "f10b4327", 167 | "metadata": {}, 168 | "outputs": [], 169 | "source": [ 170 | "max_nepochs = 2\n", 171 | "log_interval = 1\n", 172 | "writer = SummaryWriter(log_dir=\"logs\", comment=\"this is the test of SummaryWriter\")\n", 173 | "\n", 174 | "model.train(True)\n", 175 | "\n", 176 | "chpfolder = Path(\"chkpts\")\n", 177 | "if not chpfolder.is_dir():\n", 178 | " chpfolder.mkdir()\n", 179 | "\n", 180 | "# expects raw unnormalized scores and combines sigmoid + BCELoss for better\n", 181 | "# numerical stability.\n", 182 | "# expects B x C x W x D\n", 183 | "loss_function = torch.nn.BCEWithLogitsLoss(reduction=\"mean\")\n", 184 | "\n", 185 | "for epoch in range(1, max_nepochs + 1):\n", 186 | " for batch_idx, (X, y) in enumerate(train_loader):\n", 187 | " # the inputs and labels have to be on the same device as the model\n", 188 | " X, y = X.to(device), y.to(device)\n", 189 | " \n", 190 | " optimizer.zero_grad()\n", 191 | "\n", 192 | " prediction_logits = model(X)\n", 193 | " \n", 194 | " batch_loss = loss_function(prediction_logits, y)\n", 195 | "\n", 196 | " batch_loss.backward()\n", 197 | "\n", 198 | " optimizer.step()\n", 199 | "\n", 200 | " if batch_idx % log_interval == 0:\n", 201 | " print(\n", 202 | " \"Train Epoch:\",\n", 203 | " epoch,\n", 204 | " \"Batch:\",\n", 205 | " batch_idx,\n", 206 | " \"Total samples processed:\",\n", 207 | " (batch_idx + 1) * train_loader.batch_size,\n", 208 | " \"Loss:\",\n", 209 | " batch_loss.item(),\n", 210 | " )\n", 211 | " writer.add_scalar(\"Loss/train\", batch_loss.item(), batch_idx)\n", 212 | " # epoch finished, we save the model\n", 213 | " cpath = chpfolder / f\"epoch-{epoch:03.0f}.pth\"\n", 214 | " torch.save(\n", 215 | " {\n", 216 | " \"final_epoch\": epoch,\n", 217 | " \"model_state_dict\": model.state_dict(),\n", 218 | " \"optimizer_state_dict\": optimizer.state_dict(),\n", 219 | " },\n", 220 | " cpath,\n", 221 | " )\n", 222 | "\n", 223 | " assert cpath.is_file() and cpath.stat().st_size > 0\n", 224 | "writer.close()" 225 | ] 226 | }, 227 | { 228 | "cell_type": "code", 229 | "execution_count": null, 230 | "id": "81348f9e", 231 | "metadata": {}, 232 | "outputs": [], 233 | "source": [ 234 | "final_params = list(model.parameters())[0].clone().detach()\n", 235 | "assert not torch.allclose(init_params, final_params)" 236 | ] 237 | }, 238 | { 239 | "cell_type": "markdown", 240 | "id": "8ebe2882", 241 | "metadata": {}, 242 | "source": [ 243 | "Restoring the model from a saved checkpoint, e.g. for doing inference, can be done as follows:" 244 | ] 245 | }, 246 | { 247 | "cell_type": "code", 248 | "execution_count": null, 249 | "id": "dc2e1428", 250 | "metadata": {}, 251 | "outputs": [], 252 | "source": [ 253 | "payload = torch.load(cpath)\n", 254 | "model_from_ckpt = BasicUNet(\n", 255 | " spatial_dims=2,\n", 256 | " in_channels=1,\n", 257 | " out_channels=1,\n", 258 | " features=[16, 16, 32, 64, 128, 16],\n", 259 | " act=\"relu\",\n", 260 | " norm=\"batch\",\n", 261 | " dropout=0.25,\n", 262 | ")\n", 263 | "model_from_ckpt.load_state_dict(payload['model_state_dict'])\n", 264 | "# continue learning/training after this\n", 265 | "loaded_params = list(model_from_ckpt.parameters())[0]\n", 266 | "assert torch.allclose(loaded_params, final_params)" 267 | ] 268 | }, 269 | { 270 | "cell_type": "code", 271 | "execution_count": null, 272 | "id": "973731ce-42a8-4f1b-bef3-4e07c7f26149", 273 | "metadata": {}, 274 | "outputs": [], 275 | "source": [] 276 | } 277 | ], 278 | "metadata": { 279 | "kernelspec": { 280 | "display_name": "torch_intro_env", 281 | "language": "python", 282 | "name": "torch_intro_env" 283 | }, 284 | "language_info": { 285 | "codemirror_mode": { 286 | "name": "ipython", 287 | "version": 3 288 | }, 289 | "file_extension": ".py", 290 | "mimetype": "text/x-python", 291 | "name": "python", 292 | "nbconvert_exporter": "python", 293 | "pygments_lexer": "ipython3", 294 | "version": "3.10.12" 295 | } 296 | }, 297 | "nbformat": 4, 298 | "nbformat_minor": 5 299 | } 300 | -------------------------------------------------------------------------------- /docs/60_Pytorch/08_pytorch_lightning.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "0e0b62c3", 6 | "metadata": {}, 7 | "source": [ 8 | "# Pytorch lightning\n", 9 | "\n", 10 | "As you might have noticed, torch leaves many responsibilities to the user regarding choices of device, writing of the training loop, loss logging and model checkpointing. This is good and bad at the same time since it provides a lot of flexibility, but re-writing lots of boilerplate code which will be very similar across projects.\n", 11 | "\n", 12 | "[Pytorch_lightning](https://lightning.ai/docs/pytorch/stable/) aims to make many things easier by abstracting them from the user. It automatically handles devices, logs to tensorboard, offers plugins for model checkpointing, makes restoration of models from checkpointing easier and is able to manage distributed training across multiple GPUs or multiple nodes. In this notebook, we will modify the existing code using this library." 13 | ] 14 | }, 15 | { 16 | "cell_type": "code", 17 | "execution_count": null, 18 | "id": "d4817cbe", 19 | "metadata": {}, 20 | "outputs": [], 21 | "source": [ 22 | "import pytorch_lightning as pl\n", 23 | "import torch\n", 24 | "\n", 25 | "from pathlib import Path\n", 26 | "from pytorch_lightning.callbacks import ModelCheckpoint" 27 | ] 28 | }, 29 | { 30 | "cell_type": "code", 31 | "execution_count": null, 32 | "id": "7c1febf9", 33 | "metadata": {}, 34 | "outputs": [], 35 | "source": [ 36 | "# we can have lightning take care of fixing random seeds for numpy, torch and cuda to ensure reproducibility of the trained models\n", 37 | "pl.seed_everything(seed=42)" 38 | ] 39 | }, 40 | { 41 | "cell_type": "code", 42 | "execution_count": null, 43 | "id": "a59c6408", 44 | "metadata": {}, 45 | "outputs": [], 46 | "source": [ 47 | "cuda_present = torch.cuda.is_available()\n", 48 | "ndevices = torch.cuda.device_count()\n", 49 | "use_cuda = cuda_present and ndevices > 0\n", 50 | "device = torch.device(\"cuda\" if use_cuda else \"cpu\") # \"cuda:0\" ... default device, \"cuda:1\" would be GPU index 1, \"cuda:2\" etc\n", 51 | "print(\"number of devices:\", ndevices, \"\\tchosen device:\", device, \"\\tuse_cuda=\", use_cuda)" 52 | ] 53 | }, 54 | { 55 | "cell_type": "code", 56 | "execution_count": null, 57 | "id": "e0b00627", 58 | "metadata": {}, 59 | "outputs": [], 60 | "source": [ 61 | "from torch.utils.data import DataLoader\n", 62 | "from data import DSBData, get_dsb2018_train_files" 63 | ] 64 | }, 65 | { 66 | "cell_type": "code", 67 | "execution_count": null, 68 | "id": "b0df807c", 69 | "metadata": {}, 70 | "outputs": [], 71 | "source": [ 72 | "from monai.networks.nets import BasicUNet" 73 | ] 74 | }, 75 | { 76 | "cell_type": "code", 77 | "execution_count": null, 78 | "id": "92d14c30", 79 | "metadata": {}, 80 | "outputs": [], 81 | "source": [ 82 | "train_img_files, train_lbl_files = get_dsb2018_train_files()\n", 83 | "\n", 84 | "train_data = DSBData(\n", 85 | " image_files=train_img_files,\n", 86 | " label_files=train_lbl_files,\n", 87 | " target_shape=(256, 256)\n", 88 | ")\n", 89 | "\n", 90 | "print(len(train_data))\n", 91 | "\n", 92 | "train_loader = DataLoader(train_data, batch_size=32, shuffle=True, num_workers=1, pin_memory=True)" 93 | ] 94 | }, 95 | { 96 | "cell_type": "markdown", 97 | "id": "c05026e0", 98 | "metadata": {}, 99 | "source": [ 100 | "Using lightning requires us to build a new neural network model by inheriting from the base class. This basically means to re-order parts of the existing code base into various member functions that have to be overridden." 101 | ] 102 | }, 103 | { 104 | "cell_type": "code", 105 | "execution_count": null, 106 | "id": "a91cdc92", 107 | "metadata": {}, 108 | "outputs": [], 109 | "source": [ 110 | "class SegmentationNetwork(pl.LightningModule):\n", 111 | " def __init__(self, \n", 112 | " features=[16, 16, 32, 64, 128, 16],\n", 113 | " activation=\"relu\",\n", 114 | " normalization=\"batch\",\n", 115 | " dropout=0.25\n", 116 | " ):\n", 117 | " super().__init__()\n", 118 | " self.save_hyperparameters()\n", 119 | " \n", 120 | " self.model = BasicUNet(\n", 121 | " spatial_dims=2,\n", 122 | " in_channels=1,\n", 123 | " out_channels=1,\n", 124 | " features=features,\n", 125 | " act=activation,\n", 126 | " norm=normalization,\n", 127 | " dropout=dropout,\n", 128 | " )\n", 129 | " self.output_activation = torch.nn.Sigmoid()\n", 130 | " self.loss_function = torch.nn.BCEWithLogitsLoss(reduction=\"mean\")\n", 131 | " \n", 132 | " def forward(self, x):\n", 133 | " logits = self.model(x)\n", 134 | " prediction = self.output_activation(logits) # apply the sigmoid to get a value in [0, 1] for each pixel\n", 135 | "\n", 136 | " return {\n", 137 | " 'logits': logits,\n", 138 | " 'prediction': prediction\n", 139 | " }\n", 140 | "\n", 141 | " def training_step(self, batch, batch_idx):\n", 142 | " # NOTE: no manual device mapping is required, lightning does that for you!\n", 143 | " X, y = batch\n", 144 | " pred_dict = self(X)\n", 145 | "\n", 146 | " batch_loss = self.loss_function(pred_dict[\"logits\"], y)\n", 147 | " self.log(\"train_loss\", batch_loss, prog_bar=True)\n", 148 | "\n", 149 | " return batch_loss\n", 150 | "\n", 151 | " def configure_optimizers(self):\n", 152 | " return torch.optim.Adam(self.parameters(), lr=1.e-3)" 153 | ] 154 | }, 155 | { 156 | "cell_type": "code", 157 | "execution_count": null, 158 | "id": "7f5f5249", 159 | "metadata": {}, 160 | "outputs": [], 161 | "source": [ 162 | "model = SegmentationNetwork()\n", 163 | "print(model)" 164 | ] 165 | }, 166 | { 167 | "cell_type": "code", 168 | "execution_count": null, 169 | "id": "76249c1e", 170 | "metadata": {}, 171 | "outputs": [], 172 | "source": [ 173 | "init_params = list(model.parameters())[0].clone().detach()" 174 | ] 175 | }, 176 | { 177 | "cell_type": "markdown", 178 | "id": "3101d471", 179 | "metadata": {}, 180 | "source": [ 181 | "Now we set up a checkpoint callback that takes care of storing model weights and define a trainer instance which will manage running the training loop and device handling for us." 182 | ] 183 | }, 184 | { 185 | "cell_type": "code", 186 | "execution_count": null, 187 | "id": "f08e53fa", 188 | "metadata": {}, 189 | "outputs": [], 190 | "source": [ 191 | "logfolder = Path(\"lightning_outputs\")\n", 192 | "if not logfolder.is_dir():\n", 193 | " logfolder.mkdir()\n", 194 | "\n", 195 | "ckpt_callback = ModelCheckpoint(\n", 196 | " filename='{epoch:03.0f}-{train_loss:.3f}',\n", 197 | " save_last=True,\n", 198 | " save_top_k=1,\n", 199 | " monitor=\"train_loss\",\n", 200 | " every_n_epochs=1\n", 201 | ")\n", 202 | "\n", 203 | "max_nepochs = 3\n", 204 | "log_interval = 1\n", 205 | "\n", 206 | "trainer = pl.Trainer(\n", 207 | " default_root_dir=logfolder,\n", 208 | " max_epochs=max_nepochs,\n", 209 | " log_every_n_steps=log_interval,\n", 210 | " accelerator=\"gpu\" if use_cuda else \"cpu\",\n", 211 | " devices=ndevices if use_cuda else 1,\n", 212 | " callbacks=[\n", 213 | " ckpt_callback\n", 214 | " ]\n", 215 | ")" 216 | ] 217 | }, 218 | { 219 | "cell_type": "code", 220 | "execution_count": null, 221 | "id": "9a7e46ea", 222 | "metadata": {}, 223 | "outputs": [], 224 | "source": [ 225 | "%load_ext tensorboard\n", 226 | "%tensorboard --port 6006 --logdir ./lightning_outputs" 227 | ] 228 | }, 229 | { 230 | "cell_type": "code", 231 | "execution_count": null, 232 | "id": "0af63aa3", 233 | "metadata": {}, 234 | "outputs": [], 235 | "source": [ 236 | "trainer.fit(model, train_dataloaders=train_loader)" 237 | ] 238 | }, 239 | { 240 | "cell_type": "markdown", 241 | "id": "2f458ac0", 242 | "metadata": {}, 243 | "source": [ 244 | "Restoration of model states from checkpoints is a bit easier than before as well, because it does not require you to re-create a model instance with the same hyperparameters as the checkpointed model (which might be unknown when only the checkpoint is available)." 245 | ] 246 | }, 247 | { 248 | "cell_type": "code", 249 | "execution_count": null, 250 | "id": "f5eb37fa", 251 | "metadata": {}, 252 | "outputs": [], 253 | "source": [ 254 | "final_params = list(model.parameters())[0].clone().detach()\n", 255 | "assert not torch.allclose(init_params, final_params)\n", 256 | "\n", 257 | "# when to reload chkp, e.g. for doing inference\n", 258 | "model_from_ckpt = SegmentationNetwork.load_from_checkpoint(\n", 259 | " ckpt_callback.last_model_path\n", 260 | ")\n", 261 | "loaded_params = list(model_from_ckpt.parameters())[0].cpu()\n", 262 | "assert torch.allclose(loaded_params, final_params)" 263 | ] 264 | }, 265 | { 266 | "cell_type": "markdown", 267 | "id": "f5434b1c", 268 | "metadata": {}, 269 | "source": [ 270 | "## Look at some predictions\n", 271 | "\n", 272 | "Now that the model has been trained for a little bit, we are looking at the predictions again. Usually model training has to be peformed longer, so don't expect any wonders. Also keep in mind that the predictions here are based on the data the model was trained on. Those predictions might be far better than those on data not used during training. But this is a story for later." 273 | ] 274 | }, 275 | { 276 | "cell_type": "code", 277 | "execution_count": null, 278 | "id": "71e4a7ad", 279 | "metadata": {}, 280 | "outputs": [], 281 | "source": [ 282 | "import matplotlib.pyplot as plt" 283 | ] 284 | }, 285 | { 286 | "cell_type": "code", 287 | "execution_count": null, 288 | "id": "77969a2c", 289 | "metadata": {}, 290 | "outputs": [], 291 | "source": [ 292 | "# we get a batch of data\n", 293 | "for batch in train_loader:\n", 294 | " X, y = batch\n", 295 | " break\n", 296 | "\n", 297 | "# convert to 0/1 range on each pixel\n", 298 | "prediction_dict = model_from_ckpt(X.to(model_from_ckpt.device))\n", 299 | "prediction = prediction_dict[\"prediction\"]\n", 300 | "prediction_binary = (prediction > 0.5).to(torch.uint8)\n", 301 | "\n", 302 | "sidx = 0\n", 303 | "plt.subplot(131)\n", 304 | "plt.imshow(X[sidx, 0].numpy(), cmap=\"gray\")\n", 305 | "plt.title(\"Input\")\n", 306 | "\n", 307 | "plt.subplot(132)\n", 308 | "plt.imshow(y[sidx, 0].numpy(), cmap=\"gray\")\n", 309 | "plt.title(\"Ground truth\")\n", 310 | "\n", 311 | "plt.subplot(133)\n", 312 | "plt.imshow(prediction_binary.detach()[sidx, 0].cpu().numpy(), cmap=\"gray\")\n", 313 | "plt.title(\"Predictions\")" 314 | ] 315 | }, 316 | { 317 | "cell_type": "code", 318 | "execution_count": null, 319 | "id": "ccbaf4e1", 320 | "metadata": {}, 321 | "outputs": [], 322 | "source": [] 323 | }, 324 | { 325 | "cell_type": "code", 326 | "execution_count": null, 327 | "id": "2c1a3b5d-c55a-48ad-aafc-eaba0d5eeabf", 328 | "metadata": {}, 329 | "outputs": [], 330 | "source": [] 331 | }, 332 | { 333 | "cell_type": "code", 334 | "execution_count": null, 335 | "id": "56401a4f-b373-493a-a58d-7ecf2e199acd", 336 | "metadata": {}, 337 | "outputs": [], 338 | "source": [] 339 | } 340 | ], 341 | "metadata": { 342 | "kernelspec": { 343 | "display_name": "torch_intro_env", 344 | "language": "python", 345 | "name": "torch_intro_env" 346 | }, 347 | "language_info": { 348 | "codemirror_mode": { 349 | "name": "ipython", 350 | "version": 3 351 | }, 352 | "file_extension": ".py", 353 | "mimetype": "text/x-python", 354 | "name": "python", 355 | "nbconvert_exporter": "python", 356 | "pygments_lexer": "ipython3", 357 | "version": "3.10.12" 358 | } 359 | }, 360 | "nbformat": 4, 361 | "nbformat_minor": 5 362 | } 363 | -------------------------------------------------------------------------------- /docs/60_Pytorch/data.py: -------------------------------------------------------------------------------- 1 | """Note: The dataset and preprocessing code are largely taken from the StarDist github repository available at https://github.com/stardist""" 2 | 3 | import numpy as np 4 | import torch 5 | 6 | from pathlib import Path 7 | from scipy.ndimage import binary_fill_holes 8 | from tifffile import imread 9 | from torchvision import transforms 10 | from tqdm import tqdm 11 | 12 | SRC_DIR = Path("./data/dsb2018") 13 | 14 | def get_dsb2018_files(subset, rootdir=SRC_DIR): 15 | assert subset in ["train", "validation", "test"] 16 | src_dir = rootdir / subset 17 | 18 | assert src_dir.exists(), f"root directory with images and masks {src_dir} does not exist" 19 | 20 | X = sorted(src_dir.rglob('**/images/*.tif')) 21 | Y = sorted(src_dir.rglob('**/masks/*.tif')) 22 | assert len(X) > 0, f"error finding the right structure in {src_dir}\n{list(src_dir.glob('*'))}" 23 | assert len(X) == len(Y), print(f"X has length {len(X)} and Y has length {len(Y)}") 24 | assert all(x.name==y.name for x,y in zip(X,Y)) 25 | 26 | return X, Y 27 | 28 | 29 | def get_dsb2018_train_files(): 30 | return get_dsb2018_files(subset="train") 31 | 32 | 33 | def get_dsb2018_validation_files(): 34 | return get_dsb2018_files(subset="validation") 35 | 36 | 37 | def get_dsb2018_test_files(): 38 | return get_dsb2018_files(subset="test") 39 | 40 | 41 | def fill_label_holes(lbl_img, **kwargs): 42 | lbl_img_filled = np.zeros_like(lbl_img) 43 | for l in (set(np.unique(lbl_img)) - set([0])): 44 | mask = lbl_img==l 45 | mask_filled = binary_fill_holes(mask,**kwargs) 46 | lbl_img_filled[mask_filled] = l 47 | return lbl_img_filled 48 | 49 | 50 | def normalize(img, low, high, eps=1.e-20, clip=True): 51 | # we have to add a small eps to handle the case where both quantiles are equal 52 | # to avoid dividing by zero 53 | scaled = (img - low) / (high - low + eps) 54 | 55 | if clip: 56 | scaled = np.clip(scaled, 0, 1) 57 | 58 | return scaled 59 | 60 | 61 | def quantile_normalization(img, quantile_low=0.01, quantile_high=0.998, eps=1.e-20, clip=True): 62 | """ 63 | First scales the data so that values below quantile_low are smaller 64 | than 0 and values larger than quantile_high are larger than one. 65 | Then optionally clips to (0, 1) range. 66 | """ 67 | 68 | qlow = np.quantile(img, quantile_low) 69 | qhigh = np.quantile(img, quantile_high) 70 | 71 | scaled = normalize(img, low=qlow, high=qhigh, eps=eps, clip=clip) 72 | return scaled, qlow, qhigh 73 | 74 | 75 | class DSBData(): 76 | def __init__(self, image_files, label_files, target_shape=(256, 256)): 77 | """ 78 | Parameters 79 | ---------- 80 | image_files: list of pathlib.Path objects pointing to the *.tif images 81 | label_files: list of pathlib.Path objects pointing to the *.tif segmentation masks 82 | target_shape: tuple of length 2 specifying the sample resolutions of files that 83 | will be kept. All other files will NOT be used. 84 | """ 85 | assert len(image_files) == len(label_files) 86 | assert all(x.name==y.name for x,y in zip(image_files, label_files)) 87 | 88 | self.images = [] 89 | self.labels = [] 90 | 91 | tensor_transform = transforms.Compose([ 92 | transforms.ToTensor(), 93 | ]) 94 | for idx in tqdm(range(len(image_files))): 95 | # we use the same data reading approach as in the previous notebook 96 | image = imread(image_files[idx]) 97 | label = imread(label_files[idx]) 98 | 99 | if image.shape != target_shape: 100 | continue 101 | 102 | # do the normalizations 103 | image = quantile_normalization( 104 | image, 105 | quantile_low=0.01, 106 | quantile_high=0.998, 107 | clip=True)[0].astype(np.float32) 108 | 109 | # NOTE: we convert the label to dtype float32 and not uint8 because 110 | # the tensor transformation does a normalization if the input is of 111 | # dtype uint8, destroying the 0/1 labelling which we want to avoid. 112 | label = fill_label_holes(label) 113 | label_binary = np.zeros_like(label).astype(np.float32) 114 | label_binary[label != 0] = 1. 115 | 116 | # convert to torch tensor: adds an artificial color channel in the front 117 | # and scales inputs to have same size as samples tend to differ in image 118 | # resolutions 119 | image = tensor_transform(image) 120 | label = tensor_transform(label_binary) 121 | 122 | self.images.append(image) 123 | self.labels.append(label) 124 | 125 | self.images = torch.stack(self.images) 126 | self.labels = torch.stack(self.labels) 127 | 128 | def __getitem__(self, idx): 129 | return self.images[idx], self.labels[idx] 130 | 131 | def __len__(self): 132 | return len(self.images) 133 | -------------------------------------------------------------------------------- /docs/60_Pytorch/enable_env_in_jupyter.sh: -------------------------------------------------------------------------------- 1 | /projects/p_scads_trainings/BIAS/torch_intro_env/bin/python -m ipykernel install --user --name torch_intro_env --display-name="torch_intro_env" -------------------------------------------------------------------------------- /docs/60_Pytorch/readme.md: -------------------------------------------------------------------------------- 1 | # Introduction to Pytorch 2 | 3 | Workshop given on Aug 29, 2023 at the PoLBIAS Training school. 4 | 5 | Event page: https://biapol.eventmember.de/ 6 | 7 | 8 | ## Start JupyterLab 9 | The instrutions are taken and adapted from [here](https://gitlab.vgiscience.de/ad/mobile_cart_workshop2020/-/blob/master/Readme.md) 10 | 11 | 1. Make sure to connect to TUD network using a VPN connection (Cisco VPN or OpenVPN), otherwise you will not be able to use any of ZIH HPC Services. 12 | 2. Add your ZIH-Login to the HPC project, using the pre-shared link 13 | - this needs to be done only once 14 | - it may take up to two hours before your login is available 15 | 3. Click [this link](https://taurus.hrsk.tu-dresden.de/jupyter/hub/user-redirect/git-pull?repo=https%3A%2F%2Fgithub.com%2FBiAPoL%2FPoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis&urlpath=lab%2Ftree%2FPoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis%2Fdocs%2F60_Pytorch%2F00_versions.ipynb&branch=main) to spawn a JupyterLab instance on the TUD ZIH Jupyter Hub. 16 | - You'll be requested to login using your TUD Account 17 | 18 | 19 | Afterwards, once the notebook has opened: 20 | - If asked to select a Kernel: Confirm suggestion (Python 3) with "Select". 21 | - execute the first cell, with SHIFT+ENTER 22 | - this will link the conda environment we prepared for you to your user folder. Follow any instructions in the notebook. 23 | 24 | The data required to execute the notebooks is located at `/projects/p_scads_trainings/BIAS/dsb2018` and has to be integrated into your clone of this repository (which should reside in your home directory after clicking the above link to launch jupyter Hub). 25 | 26 | 1. Create a directory named `data` in the top level of this repo (i.e. on the same level the `*.ipynb*` notebook files and this README are located). 27 | 2. Copy the data to the freshly created directory using `cp -r /projects/p_scads_trainings/BIAS/dsb2018 $PWD/data` 28 | 29 | As a backup solution, the data can be downloaded as a zip file from [the stardist github repository]('https://github.com/stardist/stardist/releases/download/0.1.0/dsb2018.zip'). 30 | 31 | ## Questions, concerns or feedback? 32 | 33 | Please file an issue through the issue tab. 34 | -------------------------------------------------------------------------------- /docs/60_Pytorch/requirements.txt: -------------------------------------------------------------------------------- 1 | jupyter 2 | matplotlib 3 | monai 4 | numpy 5 | pytorch_lightning 6 | scipy 7 | tensorboard 8 | tensorboardX 9 | tifffile 10 | torch 11 | torchvision 12 | -------------------------------------------------------------------------------- /docs/60_Pytorch/unet.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/2ed77a9bb176161cc20045c791a8a00e2e6b917a/docs/60_Pytorch/unet.png -------------------------------------------------------------------------------- /docs/70_AI_Segmentation_Denoising/CNNs_N2V.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/2ed77a9bb176161cc20045c791a8a00e2e6b917a/docs/70_AI_Segmentation_Denoising/CNNs_N2V.pdf -------------------------------------------------------------------------------- /docs/70_AI_Segmentation_Denoising/HPC_settings.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/2ed77a9bb176161cc20045c791a8a00e2e6b917a/docs/70_AI_Segmentation_Denoising/HPC_settings.PNG -------------------------------------------------------------------------------- /docs/70_AI_Segmentation_Denoising/Readme.md: -------------------------------------------------------------------------------- 1 | # AI segmentation and denoising 2 | 3 | Welcome to the session on segmentation on segmentation and denoising using deep learning. This tutorial will pick up where the previous session on pytorch left off. This tutoorial will cover several topics: 4 | 5 | * [Training a Unet for segmentation](./01_2D_unet_training.ipynb): The [Unet](https://www.nature.com/articles/s41592-018-0261-2) was and is *the* working horse architecture for the segmentation of image data across all possible domains. 6 | * [Denoising image data with Noise2Void](./02_Noise2Void.ipynb): [Noise2Void](https://openaccess.thecvf.com/content_CVPR_2019/html/Krull_Noise2Void_-_Learning_Denoising_From_Single_Noisy_Images_CVPR_2019_paper.html) and its update, [Probabilistic Noise2Void](https://www.frontiersin.org/articles/10.3389/fcomp.2020.00005/full) are powerful tools for denoising any image data without the need for ground truth data. 7 | * [Denoising 3D image datawith Noise2Void](./03_Noise2Void_3D.ipynb): Noise2Void can be extended to 3D image data. This notebook will show you how to do that. 8 | 9 | ## Before you start 10 | 11 | When you log into the HPC, make sure you apply the correct settings. On the login page, this is what you should set for the following tutorial: 12 | 13 | ![](./HPC_settings.PNG) 14 | 15 | ## Kernels to use 16 | 17 | In order to install the singularity containers needed for this exercise, first clone the following repository into your home directory: 18 | 19 | ```bash 20 | git clone https://gitlab.mn.tu-dresden.de/bia-pol/singularity-devbio-napari.git 21 | cd singularity-devbio-napari 22 | ``` 23 | 24 | Then, install the correct container. The following table shows which container to install for which notebook. You can install a container by running this command: 25 | 26 | ```bash 27 | ./install.sh 28 | ``` 29 | 30 | | Notebook | Installlation | Kernel name | 31 | | --- | --- | --- | 32 | | [01_2D_unet_training.ipynb](./01_2D_unet_training.ipynb) | `./install.sh pol-course-pytorch` | `pol-course-pytorch` 33 | | [02_Noise2Void.ipynb](./02_Noise2Void.ipynb) | `./install.sh pol-course-pytorch` | `pol-course-pytorch`| 34 | | [03_Noise2Void_3D.ipynb](./03_Noise2Void_3D.ipynb) | `./install.sh n2v` | `n2v`| 35 | 36 | 37 | ## Lecture materials 38 | 39 | [Link to slides](./CNNs_N2V.pdf) -------------------------------------------------------------------------------- /docs/70_AI_Segmentation_Denoising/models/n2v_3D/logs/images/.nfs0000000005caaf5600000072: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/2ed77a9bb176161cc20045c791a8a00e2e6b917a/docs/70_AI_Segmentation_Denoising/models/n2v_3D/logs/images/.nfs0000000005caaf5600000072 -------------------------------------------------------------------------------- /docs/80_image_analysis_with_dask/202308_dask_workshop_slides.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/2ed77a9bb176161cc20045c791a8a00e2e6b917a/docs/80_image_analysis_with_dask/202308_dask_workshop_slides.pdf -------------------------------------------------------------------------------- /docs/80_image_analysis_with_dask/3_lazy_image_processing.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Practical 3: Virtual stack visualization and explorative analysis" 8 | ] 9 | }, 10 | { 11 | "cell_type": "markdown", 12 | "metadata": {}, 13 | "source": [ 14 | "When doing explorative analysis on large datasets, sometimes it's not an option to load a full dataset into memory. Still, one would want to browse images and potentially try out processing workflows in an interactive manner.\n", 15 | "\n", 16 | "In this notebook, **we'll build a simple lazy data viewer and interactively explore a large dataset**.\n", 17 | "\n", 18 | "We'll create the \"large\" example dataset synthetically, but another option would be to download e.g. the following dataset: http://data.celltrackingchallenge.net/training-datasets/Fluo-N3DL-TRIC.zip. In this case, " 19 | ] 20 | }, 21 | { 22 | "cell_type": "code", 23 | "execution_count": 1, 24 | "metadata": {}, 25 | "outputs": [], 26 | "source": [ 27 | "# imports\n", 28 | "\n", 29 | "import os, sys\n", 30 | "import numpy as np\n", 31 | "import tifffile\n", 32 | "from scipy import ndimage\n", 33 | "from tqdm import tqdm\n", 34 | "\n", 35 | "import dask\n", 36 | "import dask.array as da\n", 37 | "from dask_image import ndfilters\n", 38 | "from dask import delayed\n", 39 | "\n", 40 | "%matplotlib notebook" 41 | ] 42 | }, 43 | { 44 | "cell_type": "code", 45 | "execution_count": null, 46 | "metadata": {}, 47 | "outputs": [], 48 | "source": [ 49 | "# For improving the usability of this notebook,\n", 50 | "# let's create an example 3D+T dataset instead of downloading one.\n", 51 | "\n", 52 | "# create first timepoint\n", 53 | "N_xy, N_z, N_t, N_rs, dx = 1000, 100, 10, [30, 5], 10\n", 54 | "\n", 55 | "np.random.seed(0)\n", 56 | "img = np.product([\n", 57 | " ndimage.zoom(np.random.random([N_r] * 3),\n", 58 | " zoom=[N_z/N_r, N_xy/N_r, (N_xy + 2 * N_t * dx)/N_r], order=1)\n", 59 | " for N_r in N_rs], axis=0,\n", 60 | ")\n", 61 | "\n", 62 | "# convert into uint16\n", 63 | "img = (img * 10000).astype(np.uint16)\n" 64 | ] 65 | }, 66 | { 67 | "cell_type": "code", 68 | "execution_count": null, 69 | "metadata": {}, 70 | "outputs": [], 71 | "source": [ 72 | "# save it as a timelapse\n", 73 | "\n", 74 | "file_pattern = 'data/large_3d_dataset_tp%03d.tif'\n", 75 | "\n", 76 | "os.makedirs('data', exist_ok=True)\n", 77 | "\n", 78 | "N_t = 20\n", 79 | "for t in tqdm(range(N_t)):\n", 80 | " curr_tp = img[:, :, dx * t: dx * t + N_xy]\n", 81 | " tifffile.imwrite(file_pattern %t, curr_tp)\n", 82 | "\n", 83 | "print('Total dataset size: %.2f GB' %(N_t * N_xy ** 2 * N_z * 2 / 10**9))\n" 84 | ] 85 | }, 86 | { 87 | "cell_type": "markdown", 88 | "metadata": {}, 89 | "source": [ 90 | "### Loading the dataset into a dask array\n", 91 | "\n", 92 | "In order to lazily access images, we construct a dask array containing chunks that are computed by reading the corresponding image data from disk." 93 | ] 94 | }, 95 | { 96 | "cell_type": "code", 97 | "execution_count": null, 98 | "metadata": { 99 | "scrolled": true 100 | }, 101 | "outputs": [], 102 | "source": [ 103 | "from glob import glob\n", 104 | "import zarr\n", 105 | "\n", 106 | "file_pattern = 'data/large_3d_dataset_tp*.tif'\n", 107 | "files = sorted(glob(file_pattern))\n", 108 | "\n", 109 | "# determine the shape and dtype of the data\n", 110 | "zarr_arr = zarr.open(tifffile.imread(file_pattern, aszarr=True))\n", 111 | "\n", 112 | "N_t, N_z, N_x, N_y = zarr_arr.shape\n", 113 | "dtype = zarr_arr.dtype\n", 114 | "\n", 115 | "print('Total dataset size is %s GB'\n", 116 | " %(np.product([N_t, N_z, N_x, N_y, dtype.itemsize]) / 1e9))" 117 | ] 118 | }, 119 | { 120 | "cell_type": "code", 121 | "execution_count": null, 122 | "metadata": {}, 123 | "outputs": [], 124 | "source": [ 125 | "# define a custom reader function\n", 126 | "# which loads a single 2D frame from a 3d tif file\n", 127 | "\n", 128 | "def load_2d(t, z):\n", 129 | " return tifffile.TiffFile(files[t]).pages[z].asarray()\n", 130 | "\n", 131 | "# loading should be lazy\n", 132 | "load_2d = delayed(load_2d)\n", 133 | "\n", 134 | "# manually compose a dask array from the individual lazily loaded frames\n", 135 | "# `da.from_delayed` converts a delayed object into a dask array, given\n", 136 | "# information about the shape and dtype of the delayed result\n", 137 | "ims = da.stack([\n", 138 | " da.stack([\n", 139 | " da.from_delayed(load_2d(t, z),\n", 140 | " shape=(N_x, N_y),\n", 141 | " dtype=dtype)\n", 142 | " for z in range(N_z)])\n", 143 | " for t in range(N_t)])\n", 144 | "\n", 145 | "ims" 146 | ] 147 | }, 148 | { 149 | "cell_type": "markdown", 150 | "metadata": {}, 151 | "source": [ 152 | "### Visualize the dataset\n", 153 | "\n", 154 | "Since dask arrays essentially behave like numpy arrays, many viewers support the visualization of the previously constructed dask array." 155 | ] 156 | }, 157 | { 158 | "cell_type": "markdown", 159 | "metadata": {}, 160 | "source": [ 161 | "#### Using tifffile" 162 | ] 163 | }, 164 | { 165 | "cell_type": "code", 166 | "execution_count": null, 167 | "metadata": {}, 168 | "outputs": [], 169 | "source": [ 170 | "# tifffile contains a multidimensional image viewer based on matplotlib\n", 171 | "\n", 172 | "tifffile.imshow(ims)" 173 | ] 174 | }, 175 | { 176 | "cell_type": "markdown", 177 | "metadata": {}, 178 | "source": [ 179 | "#### Using napari\n", 180 | "Napari supports dask arrays." 181 | ] 182 | }, 183 | { 184 | "cell_type": "code", 185 | "execution_count": null, 186 | "metadata": {}, 187 | "outputs": [], 188 | "source": [ 189 | "import napari\n", 190 | "\n", 191 | "viewer = napari.Viewer()\n", 192 | "\n", 193 | "viewer.add_image(ims)" 194 | ] 195 | }, 196 | { 197 | "cell_type": "markdown", 198 | "metadata": {}, 199 | "source": [ 200 | "#### Using ipywidgets to interact with matplotlib plots" 201 | ] 202 | }, 203 | { 204 | "cell_type": "code", 205 | "execution_count": null, 206 | "metadata": {}, 207 | "outputs": [], 208 | "source": [ 209 | "%matplotlib notebook\n", 210 | "import matplotlib.pyplot as plt\n", 211 | "from ipywidgets import interact\n", 212 | "\n", 213 | "# a simple multi-dimensional image viewer\n", 214 | "def browse_images(ims, show_colorbar=False):\n", 215 | "\n", 216 | " plt.figure()\n", 217 | " \n", 218 | " # determine the shape of the non spatial dimensions\n", 219 | " scroll_shape = ims.shape[:-2]\n", 220 | "\n", 221 | " def view_image(**kwargs):\n", 222 | " pos = tuple(kwargs[dim] for dim in sorted(kwargs))\n", 223 | " plt.imshow(ims[tuple(pos)].T, cmap=plt.cm.gray_r, interpolation='nearest')\n", 224 | " if show_colorbar:\n", 225 | " plt.colorbar()\n", 226 | "\n", 227 | " # interact with the viewer using the non spatial dimensions\n", 228 | " interact(view_image,\n", 229 | " **{'dim %s' %dim: (0, s-1) for dim, s in enumerate(scroll_shape)})\n", 230 | "\n", 231 | " plt.show()\n", 232 | " \n", 233 | "browse_images(ims)" 234 | ] 235 | }, 236 | { 237 | "cell_type": "markdown", 238 | "metadata": {}, 239 | "source": [ 240 | "### Process the image on the fly\n", 241 | "\n", 242 | "In addition to viewing the raw images, we can perform operations on the array before viewing it." 243 | ] 244 | }, 245 | { 246 | "cell_type": "code", 247 | "execution_count": null, 248 | "metadata": {}, 249 | "outputs": [], 250 | "source": [ 251 | "# max projection as a simple example\n", 252 | "\n", 253 | "browse_images(ims.max(-3))" 254 | ] 255 | }, 256 | { 257 | "cell_type": "code", 258 | "execution_count": null, 259 | "metadata": {}, 260 | "outputs": [], 261 | "source": [ 262 | "# another example: local background subtraction\n", 263 | "\n", 264 | "ims_mod = ims.astype(np.int32) - ndfilters.minimum_filter(ims, size=(1, 1, 30, 30))\n", 265 | "ims_mod = np.clip(ims_mod, 0, 2**16 - 1)\n", 266 | "\n", 267 | "browse_images(ims_mod, show_colorbar=False)" 268 | ] 269 | }, 270 | { 271 | "cell_type": "code", 272 | "execution_count": null, 273 | "metadata": {}, 274 | "outputs": [], 275 | "source": [ 276 | "# another workflow\n", 277 | "\n", 278 | "ims_max = ims.max(1)\n", 279 | "ims_max = ims_max.rechunk((1, 600, 600))\n", 280 | "ims_proc = ndfilters.gaussian_filter(ims_max, (0, 2, 2))\n", 281 | "ims_proc = ims_proc.astype(float) - ndfilters.minimum_filter(ims_proc, (1, 50, 50))\n", 282 | "ims_proc = ims_proc / ndfilters.maximum_filter(ims_proc.rechunk((1, 600, 600)), (1, 50, 50))\n", 283 | "ims_proc = ims_proc > 0.5\n", 284 | "\n", 285 | "browse_images(ims_proc)" 286 | ] 287 | }, 288 | { 289 | "cell_type": "code", 290 | "execution_count": null, 291 | "metadata": {}, 292 | "outputs": [], 293 | "source": [ 294 | "# once we're happy with the workflow, we can compute the result\n", 295 | "# and stream it into a file\n", 296 | "\n", 297 | "from dask import diagnostics\n", 298 | "\n", 299 | "with diagnostics.ProgressBar():\n", 300 | " da.to_zarr(ims_proc, 'data/processed.zarr', overwrite=True)" 301 | ] 302 | }, 303 | { 304 | "cell_type": "markdown", 305 | "metadata": {}, 306 | "source": [ 307 | "### Obtain properties from objects" 308 | ] 309 | }, 310 | { 311 | "cell_type": "code", 312 | "execution_count": null, 313 | "metadata": {}, 314 | "outputs": [], 315 | "source": [ 316 | "from scipy import ndimage\n", 317 | "from skimage import measure\n", 318 | "import pandas as pd\n", 319 | "\n", 320 | "def get_object_properties(im_binary, im_intensities, t):\n", 321 | " labels, _ = ndimage.label(im_binary)\n", 322 | " props = measure.regionprops_table(\n", 323 | " labels,\n", 324 | " intensity_image=im_intensities,\n", 325 | " properties=['label', 'centroid', 'area', 'mean_intensity'])\n", 326 | " props = pd.DataFrame(props)\n", 327 | " props['t'] = t\n", 328 | " return props\n", 329 | "\n", 330 | "dfs = []\n", 331 | "for t, im in enumerate(ims_proc[:3]):\n", 332 | " df = delayed(get_object_properties)(im, ims_max[t], t)\n", 333 | " dfs.append(df)\n", 334 | "\n", 335 | "with dask.diagnostics.ProgressBar():\n", 336 | " df = pd.concat(dask.compute(dfs)[0], ignore_index=True)\n", 337 | "\n", 338 | "df" 339 | ] 340 | } 341 | ], 342 | "metadata": { 343 | "kernelspec": { 344 | "display_name": "Python 3 (ipykernel)", 345 | "language": "python", 346 | "name": "python3" 347 | }, 348 | "language_info": { 349 | "codemirror_mode": { 350 | "name": "ipython", 351 | "version": 3 352 | }, 353 | "file_extension": ".py", 354 | "mimetype": "text/x-python", 355 | "name": "python", 356 | "nbconvert_exporter": "python", 357 | "pygments_lexer": "ipython3", 358 | "version": "3.9.18" 359 | } 360 | }, 361 | "nbformat": 4, 362 | "nbformat_minor": 2 363 | } 364 | -------------------------------------------------------------------------------- /docs/80_image_analysis_with_dask/README.md: -------------------------------------------------------------------------------- 1 | # Lazy and parallel bio-image processing using DASK 2 | 3 | When images take too long to load or process, or don’t fit into memory, they can be split up and managed in small parts. This practical is about easily gaining control over which parts of a (bio-)image are processed when and where in the context of interactive python workflows, napari and cluster computing. 4 | 5 | ## Preparation 6 | 7 | To get started, we need to install/activate a suitable conda environment: 8 | 9 | ### Option 1: 10 | 11 | Use the environment created in the [course preparation]( 12 | https://biapol.github.io/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/00_course_preparation/Readme.html) and install some further packages to it: 13 | 14 | ``` 15 | mamba activate devbio-napari-env 16 | mamba install -c conda-forge dask-image ipycytoscape 17 | ``` 18 | 19 | ### Option 2: 20 | 21 | Create a new environment from scratch: 22 | 23 | ``` 24 | mamba create --name python=3.9 devbio-napari pyqt dask-image ipycytoscape -c conda-forge 25 | mamba activate dask_course 26 | ``` 27 | 28 | ## Lecture materials 29 | 30 | ### Slides 31 | 32 | Download introductory slides [here](202308_dask_workshop_slides.pdf). 33 | 34 | ### Notebooks 35 | 36 | 1) [Dask basics](1_dask_basics.ipynb) 37 | 2) [Processing images with dask](2_dask_image.ipynb) 38 | 3) [Lazy image processing workflows](3_lazy_image_processing.ipynb) -------------------------------------------------------------------------------- /docs/80_image_analysis_with_dask/environment.yml: -------------------------------------------------------------------------------- 1 | name: neubias_dask_course 2 | channels: 3 | - conda-forge 4 | dependencies: 5 | - python=3.10 6 | - jupyter 7 | - ipywidgets 8 | - ipympl 9 | - pip 10 | - requests 11 | - scikit-image 12 | - xarray 13 | - zarr >= 2.4.0 14 | - dask 15 | - dask-image 16 | - distributed 17 | - bokeh 18 | - graphviz 19 | - python-graphviz 20 | - matplotlib 21 | - seaborn 22 | - pandas 23 | - scipy 24 | - aicsimageio 25 | - pip: 26 | - ome-zarr 27 | -------------------------------------------------------------------------------- /docs/80_image_analysis_with_dask/mydask.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | Dask task graph 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 282 | 285 | 286 | 287 | 288 | -------------------------------------------------------------------------------- /docs/80_image_analysis_with_dask/mydask.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/2ed77a9bb176161cc20045c791a8a00e2e6b917a/docs/80_image_analysis_with_dask/mydask.png -------------------------------------------------------------------------------- /docs/_config.yml: -------------------------------------------------------------------------------- 1 | # In _config.yml 2 | title: Image data science with Python and Napari @EPFL 3 | author: Stephane Rigaud, Brian Northan, Till Korten, Neringa Jurenaite, Apurv Deepak Kulkarni, Peter Steinbach, Sebastian Starke, Johannes Soltwedel and Marvin Albert, Robert Haase, DFG Cluster of Excellence "Physics of Life", TU Dresden 4 | logo: biapol_logo.png 5 | execute: 6 | execute_notebooks: off 7 | 8 | # Add a bibtex file so that we can create citations 9 | bibtex_bibfiles: 10 | - references.bib 11 | 12 | # Information about where the book exists on the web 13 | repository: 14 | url: https://github.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis # Online location of your book 15 | branch: main # Which branch of the repository should be used when creating links (optional) 16 | 17 | # Add GitHub buttons to your book 18 | # See https://jupyterbook.org/customize/config.html#add-a-link-to-your-repository 19 | html: 20 | use_issues_button: true 21 | use_repository_button: true 22 | extra_footer: | 23 |

24 | Copyright: This work can be reused under the terms of the CC-BY 4.0 license unless mentioned otherwise. 25 |

26 | sphinx: 27 | config: 28 | html_show_copyright: false -------------------------------------------------------------------------------- /docs/_toc.yml: -------------------------------------------------------------------------------- 1 | # Table of contents 2 | # Learn more at https://jupyterbook.org/customize/toc.html 3 | 4 | format: jb-book 5 | root: intro 6 | 7 | options: # The options key will be applied to all chapters, but not sub-sections 8 | numbered: False 9 | 10 | parts: 11 | - caption: Monday 12 | chapters: 13 | - file: 00_course_preparation/Readme.md 14 | - file: 10_Clesperanto/readme 15 | sections: 16 | - file: 10_Clesperanto/10_select_devices.ipynb 17 | - file: 10_Clesperanto/20_gpu_arrays_and_memory_managment.ipynb 18 | - file: 10_Clesperanto/30_apply_operations_on_data.ipynb 19 | - file: 10_Clesperanto/40_nuclei_segmentation.ipynb 20 | - file: 10_Clesperanto/50_measurement_and_quantifications.ipynb 21 | - file: 10_Clesperanto/60_custom_kernel_execution.ipynb 22 | - file: 10_Clesperanto/70_benchmarking.ipynb 23 | 24 | - file: 23_clesperanto_assistant/intro 25 | sections: 26 | - file: 23_clesperanto_assistant/napari-assistant 27 | - file: 23_clesperanto_assistant/notebook_export 28 | 29 | - file: 25_cupy/readme 30 | sections: 31 | - file: 25_cupy/10_basics 32 | - file: 25_cupy/20_dropin_replacement 33 | - file: 25_cupy/30_filtering 34 | - file: 25_cupy/40_custom_kernels 35 | - file: 25_cupy/50_napari-cupy-image-processing 36 | - file: 25_cupy/60_benchmark_affine_transforms 37 | 38 | - file: 30_Deconvolution/0_intro_to_decon 39 | sections: 40 | - file: 30_Deconvolution/1_test_libs 41 | - file: 30_Deconvolution/2_cupy_forward 42 | - file: 30_Deconvolution/3_Nuclei_Deconvolution_Compare_to_Truth 43 | - file: 30_Deconvolution/4_Nuclei_Deconvolution_Segmentation 44 | - file: 30_Deconvolution/5_edges 45 | - file: 30_Deconvolution/6_decon_bead_edge_handling 46 | - file: 30_Deconvolution/7_decon_regularization 47 | - file: 30_Deconvolution/8_extract_psf 48 | - file: 30_Deconvolution/9_Dask_Deconvolution 49 | - file: 30_Deconvolution/cluster_access.md 50 | 51 | - caption: Tuesday 52 | chapters: 53 | - file: 40_HPC_Intro/readme 54 | - file: 50_Clesperanto_on_HPC/readme 55 | sections: 56 | - file: 50_Clesperanto_on_HPC/login_taurus 57 | - file: 50_Clesperanto_on_HPC/modified_generated_notebook 58 | - file: 50_Clesperanto_on_HPC/exercises 59 | 60 | - file: 60_Pytorch/readme 61 | sections: 62 | - file: 60_Pytorch/00_versions 63 | - file: 60_Pytorch/01_data_exploration 64 | - file: 60_Pytorch/02_dataset 65 | - file: 60_Pytorch/03_data_batching_and_setup_model 66 | - file: 60_Pytorch/04_model_training 67 | - file: 60_Pytorch/05_model_training_with_device 68 | - file: 60_Pytorch/06_model_training_with_logging 69 | - file: 60_Pytorch/07_model_training_with_checkpoints 70 | - file: 60_Pytorch/08_pytorch_lightning 71 | 72 | - caption: Wednesday 73 | chapters: 74 | - file: 70_AI_Segmentation_Denoising/Readme 75 | sections: 76 | - file: 70_AI_Segmentation_Denoising/01_2D_unet_training 77 | - file: 70_AI_Segmentation_Denoising/02_Noise2Void 78 | - file: 70_AI_Segmentation_Denoising/03_Noise2Void_3D 79 | 80 | - file: 80_image_analysis_with_dask/README.md 81 | sections: 82 | - file: 80_image_analysis_with_dask/1_dask_basics.ipynb 83 | - file: 80_image_analysis_with_dask/2_dask_image.ipynb 84 | - file: 80_image_analysis_with_dask/3_lazy_image_processing.ipynb 85 | 86 | -------------------------------------------------------------------------------- /docs/biapol_logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/2ed77a9bb176161cc20045c791a8a00e2e6b917a/docs/biapol_logo.png -------------------------------------------------------------------------------- /docs/how_to_download.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/2ed77a9bb176161cc20045c791a8a00e2e6b917a/docs/how_to_download.png -------------------------------------------------------------------------------- /docs/intro.md: -------------------------------------------------------------------------------- 1 | # PoL Bio-Image Analysis Training School on GPU-Accelerated Image Analysis 2 | 3 | This [Jupyter book](https://jupyterbook.org/) contains training resources for scientists who want to dive into GPU-accelerated image processing. It specifically aims for students and scientists working with microscopy images in the life sciences. 4 | 5 | Here, we cover the *GPU-Accelerated Image Analysis* Track of the [PoL Bio-Image Analysis Symposium](https://biopol-training.eventmember.de/). We will process images using [clesperanto](https://clesperanto.github.io/), [cupy](https://cupy.dev/) and [Pytorch](https://pytorch.org/). We will denoise, deconvole and segment images with and without deep learning techniques. We will get an introduction in how to [TU Dresden HPC cluster Taurus](https://tu-dresden.de/zih/hochleistungsrechnen/hpc) and distribute tasks using [dask](https://www.dask.org/). 6 | 7 | ## Trainers 8 | 9 | * Stéphane Rigaud, Institut Pasteur Paris 10 | * Marvin Albert, Institut Pasteur Paris 11 | * Johannes Soltwedel, PoL TU Dresden 12 | * Peter Steinbach, Helmholtz AI 13 | * Sebastian Starke, Helmholtz AI 14 | * Neringa Jurenaite, ScaDS.AI, TU Dresden 15 | * Brian Northan, True North IA 16 | * Robert Haase, PoL TU Dresden 17 | 18 | 19 | ## Timetable 20 | 21 | ![img.png](timetable.png) 22 | 23 | ## How to use this material 24 | 25 | For following the course, we recommend downloading [the repository from which this Jupyter book is made](https://github.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis). 26 | All Jupyter Notebooks are executable so that attendees can reproduce all demos and exercises. 27 | 28 | ![img.png](how_to_download.png) 29 | 30 | Assuming you downloaded the repository to your Desktop, you can open the Jupyter book by opening a terminal and typing: 31 | 32 | ```bash 33 | cd Desktop/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis 34 | ``` 35 | ```bash 36 | conda activate devbio-napari-env 37 | ``` 38 | ```bash 39 | jupyter lab 40 | ``` 41 | 42 | Using Jupyter lab, you can navigate to the course lessons in the `docs` folder. 43 | ![img.png](jupyterlab.png) 44 | 45 | ... and execute the code and experiment with it. 46 | ![img.png](jupyterlab2.png) 47 | 48 | ## Feedback and support 49 | 50 | If you have any questions create a [github issue](https://github.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/issues). 51 | Alternatively, open a thread on [image.sc](https://image.sc), put a link to the lesson or exercise you want to ask a question about and tag @haesleinhuepf. 52 | 53 | ## Acknowledgements 54 | 55 | This course was held in Dresden, August 2023. 56 | We would like to thank all the people who shared teaching materials we are reusing here. 57 | We acknowledge support by the Deutsche Forschungsgemeinschaft under Germany’s Excellence Strategy—EXC2068–Cluster of Excellence Physics of Life of TU Dresden. 58 | This project has been made possible in part by grant number 2021-237734 (GPU-accelerating Fiji and friends using distributed CLIJ, NEUBIAS-style, EOSS4) from the Chan Zuckerberg Initiative DAF, an advised fund of the Silicon Valley Community Foundation. 59 | The authors acknowledge the financial support by the Federal Ministry of Education and Research of Germany and by Sächsische Staatsministerium für Wissenschaft, Kultur und Tourismus in the programme Center of Excellence for AI-research „Center for Scalable Data Analytics and Artificial Intelligence Dresden/Leipzig“, project identification number: ScaDS.AI 60 | -------------------------------------------------------------------------------- /docs/jupyterlab.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/2ed77a9bb176161cc20045c791a8a00e2e6b917a/docs/jupyterlab.png -------------------------------------------------------------------------------- /docs/jupyterlab2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/2ed77a9bb176161cc20045c791a8a00e2e6b917a/docs/jupyterlab2.png -------------------------------------------------------------------------------- /docs/timetable.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BiAPoL/PoL-BioImage-Analysis-TS-GPU-Accelerated-Image-Analysis/2ed77a9bb176161cc20045c791a8a00e2e6b917a/docs/timetable.png -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | jupyter-book 2 | matplotlib 3 | numpy 4 | --------------------------------------------------------------------------------