├── Dockerfile ├── LICENSE ├── README.md ├── configs ├── abdom │ ├── pixel │ │ ├── axis_0 │ │ │ └── train_config.yaml │ │ └── axis_2 │ │ │ └── train_config.yaml │ └── sample │ │ ├── axis_0 │ │ └── train_config.yaml │ │ └── axis_2 │ │ └── train_config.yaml ├── brain │ ├── pixel │ │ ├── axis_0 │ │ │ └── train_config.yaml │ │ ├── axis_1 │ │ │ └── train_config.yaml │ │ └── axis_2 │ │ │ └── train_config.yaml │ └── sample │ │ ├── axis_0 │ │ └── train_config.yaml │ │ ├── axis_1 │ │ └── train_config.yaml │ │ └── axis_2 │ │ └── train_config.yaml ├── inference_3d_example.yaml └── train_example.yaml ├── folds ├── abdom │ └── train_folds_10.csv └── brain │ └── train_folds_10.csv ├── mood ├── __init__.py ├── dpa │ ├── __init__.py │ ├── data_generators.py │ ├── evaluate.py │ ├── feature_extractor.py │ ├── inference_evaluate_3d.py │ ├── layers.py │ ├── main.py │ ├── optimizer.py │ ├── pg_decoders.py │ ├── pg_encoders.py │ ├── pg_networks.py │ ├── pg_rec_losses.py │ ├── rec_losses.py │ └── train.py ├── main.py └── utils │ ├── __init__.py │ ├── datasets.py │ ├── loggers.py │ ├── preprocessing │ ├── __init__.py │ ├── create_2d_val_dataset_with_synthetic_anomalies.py │ ├── create_folds.py │ ├── create_val_dataset_with_synthetic_anomalies.py │ └── save_2D.py │ ├── transforms.py │ └── utils.py ├── requirements.txt ├── setup.py └── submission_data ├── configs ├── abdom_pixel.yaml ├── abdom_sample.yaml ├── brain_pixel.yaml └── brain_sample.yaml └── scripts ├── predict_3d.py ├── run_pixel_abdom.sh ├── run_pixel_brain.sh ├── run_sample_abdom.sh └── run_sample_brain.sh /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM pytorch/pytorch:1.4-cuda10.1-cudnn7-devel 2 | 3 | RUN pip install --upgrade pip && \ 4 | pip install \ 5 | numpy \ 6 | setuptools \ 7 | pyyaml \ 8 | opencv-python \ 9 | tensorflow-gpu \ 10 | Pillow \ 11 | h5py \ 12 | keras \ 13 | matplotlib \ 14 | pandas \ 15 | pydicom \ 16 | scikit-image \ 17 | scikit-learn \ 18 | scipy \ 19 | seaborn \ 20 | tensorboard \ 21 | tensorboardX \ 22 | tensorflow \ 23 | tensorflow-estimator \ 24 | tqdm \ 25 | nibabel 26 | 27 | RUN pip install monai 28 | RUN pip install torch==1.6.0+cu101 torchvision==0.7.0+cu101 -f https://download.pytorch.org/whl/torch_stable.html 29 | 30 | # copy files 31 | 32 | RUN mkdir /workspace/mood && \ 33 | mkdir /workspace/configs && \ 34 | mkdir /workspace/data 35 | 36 | COPY submission_data/data /workspace/data 37 | COPY submission_data/configs /workspace/configs 38 | ADD submission_data/scripts /workspace/ 39 | 40 | COPY mood /workspace/mood/mood 41 | ADD setup.py /workspace/mood 42 | RUN pip install /workspace/mood 43 | 44 | RUN chmod +x /workspace/*.sh && \ 45 | mkdir /mnt/data && \ 46 | mkdir /mnt/pred 47 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Medical Out-of-Distribution Analysis Challenge. Solution. Third Place in Pixel-Level Task 2 | 3 | 4 | [![License][license-shield]][license-url] 5 | 6 | 7 | ## Challenge 8 | 9 | [Offical Challenge Website](http://medicalood.dkfz.de/web/) 10 | 11 | [Github-Repository](https://github.com/MIC-DKFZ/mood) 12 | 13 | Medical Out-of-Distribution Analysis Challenge (MOOD) presented two tasks (sample-level 14 | and pixel-level out-of-distribution detection) on two medical datasets 15 | (brain MRI-dataset and abdominal CT-dataset). 16 | 17 | ## Solution 18 | 19 | Presentation of the solution (starts at 18:04 minutes): [https://www.youtube.com/watch?v=yOemj1TQfZU](https://www.youtube.com/watch?v=yOemj1TQfZU) 20 | 21 | We based our solution on [Deep Perceptual Autoencoder.](https://arxiv.org/pdf/2006.13265.pdf) 22 | We applied a Deep Perceptual Autoencoder on 2D slices of 3D volumes. 23 | To calculate [Perceptual Loss](https://arxiv.org/abs/1603.08155), we used VGG19 network 24 | as feature extractor pre-trained 25 | using an unsupervised learning framework [SimCLR](https://arxiv.org/abs/2002.05709). 26 | 27 | Our training procedure consist of two stages: 28 | 1. SimCLR training of VGG19 features on joined set of all sliced of 3D volume along 0'th, 1'st, 2'nd axes. 29 | 2. Training three Deep Perceptual Autoencoders -- each on the set of 2D slices of 3D volume along for the corresponded axes. 30 | 31 | We used Deep Perceptual Autoencoders to predict anomalies pixel-wise (giving an abnormality score for each voxel of 3D volume), 32 | and sample-wise (for a whole 3D volume): 33 | 1. Pixel-wise abnormality scores. The final pixel-wise prediction was the average of pixel-wise predictions over three models (applied along different axes). To obtain pixel-level prediction, we change the computation of the L1-norm over a whole feature map to the pixel-wise L1-norm in the numerator of Equation~\ref{eq:loss}. After obtaining such a map of reconstruction errors, we resized this map to an input image shape. 34 | 2. Sample-wise abnormality score. As an abnormality score of a whole 3D volume, we used a maximum of volume-level abnormality scores. 35 | 36 | 37 | ## Structure of Project 38 | mood - Python Package. Implementation of Deep Perceptual Autoencoder (https://arxiv.org/pdf/2006.13265.pdf). 39 | Installation: 40 | pip install -r requirements.txt 41 | pip install -e . --user 42 | configs - Yaml Configs used in our solution 43 | └───abdom - configs used to train final models on abdominal CT-dataset 44 | │ │───pixel 45 | | | │───axis_0 46 | | | │ | train_config.yaml 47 | | | │───axis_2 48 | | | │ | train_config.yaml 49 | │ │───sample 50 | | | │ ... 51 | └───brain - configs used to train final models on brain MRI-dataset 52 | | | .... 53 | └───cross_validation - configs used to search hyperparameters 54 | │ │───axis_0 55 | │ │ meta_train.yaml 56 | │ │ meta_inference_3d.yaml 57 | │ │───cv 58 | | | │───res_128_lr_1_ld_64_pf_r32 59 | | | | │───0 60 | | | | | train_config.yaml -- training config 61 | | | | | inference_3d_config -- evaluation config 62 | | | | ... 63 | submission_data -- Scripts and configs of the final model 64 | folds -- Folds used in cross-validation 65 | 66 | ## Installation 67 | 68 | ```bash 69 | pip install -r requirements.txt 70 | pip install -e . --user 71 | ``` 72 | 73 | ## Data Preparation 74 | 75 | 1. Download data (see [Challenge Website](http://medicalood.dkfz.de/web/)) to `./data/original`. 76 | 2. Save 2D slices along all axes 77 | ```bash 78 | python mood/utils/preprocessing/save_2D.py -i ./data/original/brain_train/ -o ./data/preprocessed/brain_train/2d_axis_0 -a 0 79 | python mood/utils/preprocessing/save_2D.py -i ./data/original/brain_train/ -o ./data/preprocessed/brain_train/2d_axis_1 -a 1 80 | python mood/utils/preprocessing/save_2D.py -i ./data/original/brain_train/ -o ./data/preprocessed/brain_train/2d_axis_2 -a 2 81 | ... 82 | ``` 83 | 3. Optionally, create folds for cross-validation or **use ours folds** (`folds` dir) 84 | ```bash 85 | python mood/utils/preprocessing/create_folds.py -i ./data/original/brain_train/ -o ./folds/brain/train_folds_10.csv -n 10 86 | python mood/utils/preprocessing/create_folds.py -i ./data/original/abdom_train/ -o ./folds/abdom/train_folds_10.csv -n 10 87 | ``` 88 | 4. Optionally: create a synthetic dataset for validation 89 | ```bash 90 | python mood/utils/data/create_val_dataset_with_synthetic_anomalies.py \ 91 | -i ./data/original/brain_train/ \ 92 | -o ./data/preprocessed/brain_train/3d_test \ 93 | -m ./data/preprocessed/brain_train/3d_test_masks/ \ 94 | --folds_path ./folds/brain/train_folds_10.csv 95 | --fold 0 96 | 97 | python mood/utils/data/create_val_dataset_with_synthetic_anomalies.py \ 98 | -i ./data/original/abdom_train/ \ 99 | -o ./data/preprocessed/abdom_train/3d_test \ 100 | -m ./data/preprocessed/abdom_train/3d_test_masks/ \ 101 | --folds_path ./folds/abdom/train_folds_10.csv 102 | --fold 0 103 | 104 | ``` 105 | 106 | ## Training 107 | 108 | ### Pre-training of VGG19 features 109 | 110 | Since no other data and data sources were allowed to use in the challenge, 111 | we used an unsupervised learning framework [SimCLR](https://arxiv.org/abs/2002.05709) 112 | to pre-train VGG19 features (used in the perceptual loss) 113 | 114 | See [our fork](https://github.com/ninatu/SimCLR) of implementation 115 | of SimCLR adapted for the VGG19 training on provided data. 116 | 117 | Save the pre-trained weights in `./output/vgg_weights/simclr_exp_1.tar`. 118 | 119 | 120 | ### Training Deep Perceptual Autoencoder 121 | 122 | #### Example 123 | See examples of configs for training and inference in `configs` dir. 124 | 125 | To train Deep Perceptual Autoencoder (DPA), run: 126 | ```bash 127 | python mood/main.py train ./configs/train_example.yaml 128 | ``` 129 | 130 | To inference and evaluate your model on synthetic dataset, run 131 | ```bash 132 | python mood/main.py inference_evaluate_3d ./configs/inference_3d_example.yaml 133 | ``` 134 | 135 | #### Final Model 136 | 137 | To train models as in our final submission, use the configs in `configs/brain/pixel`, `configs/brain/sample`, 138 | `configs/abdom/pixel`, `configs/abdom/sample` 139 | 140 | ```bash 141 | python mood/main.py train configs/brain/pixel/axis_0/train_config.yaml 142 | python mood/main.py train configs/brain/pixel/axis_1/train_config.yaml 143 | python mood/main.py train configs/brain/pixel/axis_2/train_config.yaml 144 | 145 | python mood/main.py train configs/brain/sample/axis_0/train_config.yaml 146 | python mood/main.py train configs/brain/sample/axis_1/train_config.yaml 147 | python mood/main.py train configs/brain/sample/axis_2/train_config.yaml 148 | 149 | ... 150 | ``` 151 | 152 | 153 | 154 | ## Building Docker With Final Model 155 | 156 | Our final prediction was the average of predictions over three models 157 | in brain MRI task (applied along different axes), and over two models in abdominal CT task 158 | (applied along 0'th and 2'th axes). 159 | 160 | In order to build a docker with the final model: 161 | 1. Put your trained model into folder `submission_data` 162 | 2. Run 163 | ```bash 164 | docker build . -t mood:latest 165 | ``` 166 | 167 | Inference using the docker: 168 | ```bash 169 | docker run --gpus all -v {input_dir}:/mnt/data -v {output_dir}:/mnt/pred mood:latest sh /workspace/run_{sample/pixel}_{TASK}.sh /mnt/data /mnt/pred 170 | ``` 171 | 172 | ## Cite 173 | If you use this code in your research, please cite: 174 | 175 | ```bibtex 176 | @article{zimmerer2022mood, 177 | title={MOOD 2020: A public Benchmark for Out-of-Distribution Detection and Localization on medical Images}, 178 | author={Zimmerer, David and Full, Peter M and Isensee, Fabian and J{\"a}ger, Paul and Adler, Tim and Petersen, Jens and K{\"o}hler, Gregor and Ross, Tobias and Reinke, Annika and Kascenas, Antanas and others}, 179 | journal={IEEE Transactions on Medical Imaging}, 180 | year={2022}, 181 | publisher={IEEE}, 182 | volume={41}, 183 | number={10}, 184 | pages={2728-2738}, 185 | doi={10.1109/TMI.2022.3170077} 186 | } 187 | ``` 188 | 189 | 190 | 191 | [license-shield]: https://img.shields.io/badge/License-Apache%202.0-blue.svg 192 | [license-url]: https://github.com/ninatu/mood_challenge/blob/master/LICENSE 193 | 194 | -------------------------------------------------------------------------------- /configs/abdom/pixel/axis_0/train_config.yaml: -------------------------------------------------------------------------------- 1 | adam_kwargs: 2 | lr: 0.0002 3 | batch_sizes: 4 | 128: 64 5 | checkpoint_root: ./output/checkpoint/abdom/pixel/axis_0 6 | dec: 7 | kwargs: 8 | inner_dims: 9 | - 64 10 | - 64 11 | - 64 12 | - 64 13 | - 64 14 | - 64 15 | - 64 16 | type: residual9 17 | early_stopping_min_delta: 0.002 18 | early_stopping_patience: 3 19 | enc: 20 | kwargs: 21 | inner_dims: 22 | - 64 23 | - 64 24 | - 64 25 | - 64 26 | - 64 27 | - 64 28 | - 64 29 | type: residual9 30 | finetune_from: null 31 | image_dim: 1 32 | image_rec_loss: 33 | loss_kwargs: 34 | normalize_to_vgg_input: false 35 | path_to_vgg19_weights: ./output/vgg_weights/simclr_exp_1.tar 36 | use_smooth_pg: false 37 | weights_per_resolution: 38 | 128: 39 | feature_weights: 40 | r42: 1 41 | img_weight: 0 42 | loss_type: relative_perceptual_L1 43 | image_sample_iter: 1000 44 | initial_image_res: 128 45 | iters_per_res: {} 46 | latent_dim: 64 47 | latent_res: 1 48 | log_iter: 10 49 | log_root: ./output/logs/abdom/pixel/axis_0 50 | max_image_res: 128 51 | model_type: dpa 52 | num_workers: 16 53 | random_seed: 2349 54 | stab_iter: 300000 55 | train_dataset: 56 | dataset_kwargs: 57 | cache: false 58 | fold: 0 59 | folds_path: ./folds/abdom/train_folds_10.csv 60 | image_root: ./data/preprocessed/abdom_train/2d_axis_0 61 | split: train 62 | dataset_type: numpy2d 63 | transform_kwargs: 64 | resize: 128 65 | trns_iter: 0 66 | val_dataset: 67 | dataset_kwargs: 68 | cache: false 69 | fold: 0 70 | folds_path: ./folds/abdom/train_folds_10.csv 71 | image_root: ./data/preprocessed/abdom_train/2d_axis_0 72 | split: val 73 | dataset_type: numpy2d 74 | transform_kwargs: 75 | resize: 128 76 | val_iter: 10000 77 | verbose: true 78 | -------------------------------------------------------------------------------- /configs/abdom/pixel/axis_2/train_config.yaml: -------------------------------------------------------------------------------- 1 | adam_kwargs: 2 | lr: 0.0001 3 | batch_sizes: 4 | 128: 64 5 | checkpoint_root: ./output/checkpoint/abdom/pixel/axis_2 6 | dec: 7 | kwargs: 8 | inner_dims: 9 | - 128 10 | - 128 11 | - 128 12 | - 128 13 | - 128 14 | - 128 15 | - 128 16 | type: residual9 17 | early_stopping_min_delta: 0.002 18 | early_stopping_patience: 3 19 | enc: 20 | kwargs: 21 | inner_dims: 22 | - 128 23 | - 128 24 | - 128 25 | - 128 26 | - 128 27 | - 128 28 | - 128 29 | type: residual9 30 | finetune_from: null 31 | image_dim: 1 32 | image_rec_loss: 33 | loss_kwargs: 34 | normalize_to_vgg_input: false 35 | path_to_vgg19_weights: ./output/vgg_weights/simclr_exp_1.tar 36 | use_smooth_pg: false 37 | weights_per_resolution: 38 | 128: 39 | feature_weights: 40 | r42: 1 41 | img_weight: 0 42 | loss_type: relative_perceptual_L1 43 | image_sample_iter: 1000 44 | initial_image_res: 128 45 | iters_per_res: {} 46 | latent_dim: 128 47 | latent_res: 1 48 | log_iter: 10 49 | log_root: ./output/logs/abdom/pixel/axis_2 50 | max_image_res: 128 51 | model_type: dpa 52 | num_workers: 16 53 | random_seed: 9289 54 | stab_iter: 300000 55 | train_dataset: 56 | dataset_kwargs: 57 | cache: false 58 | fold: 0 59 | folds_path: ./folds/abdom/train_folds_10.csv 60 | image_root: ./data/preprocessed/abdom_train/2d_axis_2 61 | split: train 62 | dataset_type: numpy2d 63 | transform_kwargs: 64 | resize: 128 65 | trns_iter: 0 66 | val_dataset: 67 | dataset_kwargs: 68 | cache: false 69 | fold: 0 70 | folds_path: ./folds/abdom/train_folds_10.csv 71 | image_root: ./data/preprocessed/abdom_train/2d_axis_2 72 | split: val 73 | dataset_type: numpy2d 74 | transform_kwargs: 75 | resize: 128 76 | val_iter: 10000 77 | verbose: true 78 | -------------------------------------------------------------------------------- /configs/abdom/sample/axis_0/train_config.yaml: -------------------------------------------------------------------------------- 1 | adam_kwargs: 2 | lr: 0.0001 3 | batch_sizes: 4 | 128: 64 5 | checkpoint_root: ./output/checkpoint/abdom/sample/axis_0 6 | dec: 7 | kwargs: 8 | inner_dims: 9 | - 128 10 | - 128 11 | - 128 12 | - 128 13 | - 128 14 | - 128 15 | - 128 16 | type: residual9 17 | early_stopping_min_delta: 0.002 18 | early_stopping_patience: 3 19 | enc: 20 | kwargs: 21 | inner_dims: 22 | - 128 23 | - 128 24 | - 128 25 | - 128 26 | - 128 27 | - 128 28 | - 128 29 | type: residual9 30 | finetune_from: null 31 | image_dim: 1 32 | image_rec_loss: 33 | loss_kwargs: 34 | normalize_to_vgg_input: false 35 | path_to_vgg19_weights: ./output/vgg_weights/simclr_exp_1.tar 36 | use_smooth_pg: false 37 | weights_per_resolution: 38 | 128: 39 | feature_weights: 40 | r42: 1 41 | img_weight: 0 42 | loss_type: relative_perceptual_L1 43 | image_sample_iter: 1000 44 | initial_image_res: 128 45 | iters_per_res: {} 46 | latent_dim: 128 47 | latent_res: 1 48 | log_iter: 10 49 | log_root: ./output/logs/abdom/sample/axis_0 50 | max_image_res: 128 51 | model_type: dpa 52 | num_workers: 16 53 | random_seed: 9289 54 | stab_iter: 300000 55 | train_dataset: 56 | dataset_kwargs: 57 | cache: false 58 | fold: 0 59 | folds_path: ./folds/abdom/train_folds_10.csv 60 | image_root: ./data/preprocessed/abdom_train/2d_axis_0 61 | split: train 62 | dataset_type: numpy2d 63 | transform_kwargs: 64 | resize: 128 65 | trns_iter: 0 66 | val_dataset: 67 | dataset_kwargs: 68 | cache: false 69 | fold: 0 70 | folds_path: ./folds/abdom/train_folds_10.csv 71 | image_root: ./data/preprocessed/abdom_train/2d_axis_0 72 | split: val 73 | dataset_type: numpy2d 74 | transform_kwargs: 75 | resize: 128 76 | val_iter: 10000 77 | verbose: true 78 | -------------------------------------------------------------------------------- /configs/abdom/sample/axis_2/train_config.yaml: -------------------------------------------------------------------------------- 1 | adam_kwargs: 2 | lr: 0.0001 3 | batch_sizes: 4 | 128: 64 5 | checkpoint_root: ./output/checkpoint/abdom/sample/axis_2 6 | dec: 7 | kwargs: 8 | inner_dims: 9 | - 128 10 | - 128 11 | - 128 12 | - 128 13 | - 128 14 | - 128 15 | - 128 16 | type: residual9 17 | early_stopping_min_delta: 0.002 18 | early_stopping_patience: 3 19 | enc: 20 | kwargs: 21 | inner_dims: 22 | - 128 23 | - 128 24 | - 128 25 | - 128 26 | - 128 27 | - 128 28 | - 128 29 | type: residual9 30 | finetune_from: null 31 | image_dim: 1 32 | image_rec_loss: 33 | loss_kwargs: 34 | normalize_to_vgg_input: false 35 | path_to_vgg19_weights: ./output/vgg_weights/simclr_exp_1.tar 36 | use_smooth_pg: false 37 | weights_per_resolution: 38 | 128: 39 | feature_weights: 40 | r42: 1 41 | img_weight: 0 42 | loss_type: relative_perceptual_L1 43 | image_sample_iter: 1000 44 | initial_image_res: 128 45 | iters_per_res: {} 46 | latent_dim: 128 47 | latent_res: 1 48 | log_iter: 10 49 | log_root: ./output/logs/abdom/sample/axis_2 50 | max_image_res: 128 51 | model_type: dpa 52 | num_workers: 16 53 | random_seed: 9289 54 | stab_iter: 300000 55 | train_dataset: 56 | dataset_kwargs: 57 | cache: false 58 | fold: 0 59 | folds_path: ./folds/abdom/train_folds_10.csv 60 | image_root: ./data/preprocessed/abdom_train/2d_axis_2 61 | split: train 62 | dataset_type: numpy2d 63 | transform_kwargs: 64 | resize: 128 65 | trns_iter: 0 66 | val_dataset: 67 | dataset_kwargs: 68 | cache: false 69 | fold: 0 70 | folds_path: ./folds/abdom/train_folds_10.csv 71 | image_root: ./data/preprocessed/abdom_train/2d_axis_2 72 | split: val 73 | dataset_type: numpy2d 74 | transform_kwargs: 75 | resize: 128 76 | val_iter: 10000 77 | verbose: true 78 | -------------------------------------------------------------------------------- /configs/brain/pixel/axis_0/train_config.yaml: -------------------------------------------------------------------------------- 1 | adam_kwargs: 2 | lr: 0.0001 3 | batch_sizes: 4 | 128: 64 5 | checkpoint_root: ./output/checkpoint/brain/pixel/axis_0 6 | dec: 7 | kwargs: 8 | inner_dims: 9 | - 128 10 | - 128 11 | - 128 12 | - 128 13 | - 128 14 | - 128 15 | - 128 16 | type: residual9 17 | early_stopping_min_delta: 0.002 18 | early_stopping_patience: 3 19 | enc: 20 | kwargs: 21 | inner_dims: 22 | - 128 23 | - 128 24 | - 128 25 | - 128 26 | - 128 27 | - 128 28 | - 128 29 | type: residual9 30 | finetune_from: null 31 | image_dim: 1 32 | image_rec_loss: 33 | loss_kwargs: 34 | normalize_to_vgg_input: false 35 | path_to_vgg19_weights: ./output/vgg_weights/simclr_exp_1.tar 36 | use_smooth_pg: false 37 | weights_per_resolution: 38 | 128: 39 | feature_weights: 40 | r32: 1 41 | img_weight: 0 42 | loss_type: relative_perceptual_L1 43 | image_sample_iter: 1000 44 | initial_image_res: 128 45 | iters_per_res: {} 46 | latent_dim: 128 47 | latent_res: 1 48 | log_iter: 10 49 | log_root: ./output/logs/brain/pixel/axis_0 50 | max_image_res: 128 51 | model_type: dpa 52 | num_workers: 16 53 | random_seed: 5317 54 | stab_iter: 300000 55 | train_dataset: 56 | dataset_kwargs: 57 | cache: false 58 | fold: 0 59 | folds_path: ./folds/brain/train_folds_10.csv 60 | image_root: ./data/preprocessed/brain_train/2d_axis_0 61 | split: train 62 | dataset_type: numpy2d 63 | transform_kwargs: 64 | resize: 128 65 | trns_iter: 0 66 | val_dataset: 67 | dataset_kwargs: 68 | cache: false 69 | fold: 0 70 | folds_path: ./folds/brain/train_folds_10.csv 71 | image_root: ./data/preprocessed/brain_train/2d_axis_0 72 | split: val 73 | dataset_type: numpy2d 74 | transform_kwargs: 75 | resize: 128 76 | val_iter: 10000 77 | verbose: true 78 | -------------------------------------------------------------------------------- /configs/brain/pixel/axis_1/train_config.yaml: -------------------------------------------------------------------------------- 1 | adam_kwargs: 2 | lr: 0.0001 3 | batch_sizes: 4 | 128: 64 5 | checkpoint_root: ./output/checkpoint/brain/pixel/axis_1 6 | dec: 7 | kwargs: 8 | inner_dims: 9 | - 128 10 | - 128 11 | - 128 12 | - 128 13 | - 128 14 | - 128 15 | - 128 16 | type: residual9 17 | early_stopping_min_delta: 0.002 18 | early_stopping_patience: 3 19 | enc: 20 | kwargs: 21 | inner_dims: 22 | - 128 23 | - 128 24 | - 128 25 | - 128 26 | - 128 27 | - 128 28 | - 128 29 | type: residual9 30 | finetune_from: null 31 | image_dim: 1 32 | image_rec_loss: 33 | loss_kwargs: 34 | normalize_to_vgg_input: false 35 | path_to_vgg19_weights: ./output/vgg_weights/simclr_exp_1.tar 36 | use_smooth_pg: false 37 | weights_per_resolution: 38 | 128: 39 | feature_weights: 40 | r32: 1 41 | img_weight: 0 42 | loss_type: relative_perceptual_L1 43 | image_sample_iter: 1000 44 | initial_image_res: 128 45 | iters_per_res: {} 46 | latent_dim: 128 47 | latent_res: 1 48 | log_iter: 10 49 | log_root: ./output/logs/brain/pixel/axis_1 50 | max_image_res: 128 51 | model_type: dpa 52 | num_workers: 16 53 | random_seed: 5317 54 | stab_iter: 300000 55 | train_dataset: 56 | dataset_kwargs: 57 | cache: false 58 | fold: 0 59 | folds_path: ./folds/brain/train_folds_10.csv 60 | image_root: ./data/preprocessed/brain_train/2d_axis_1 61 | split: train 62 | dataset_type: numpy2d 63 | transform_kwargs: 64 | resize: 128 65 | trns_iter: 0 66 | val_dataset: 67 | dataset_kwargs: 68 | cache: false 69 | fold: 0 70 | folds_path: ./folds/brain/train_folds_10.csv 71 | image_root: ./data/preprocessed/brain_train/2d_axis_1 72 | split: val 73 | dataset_type: numpy2d 74 | transform_kwargs: 75 | resize: 128 76 | val_iter: 10000 77 | verbose: true 78 | -------------------------------------------------------------------------------- /configs/brain/pixel/axis_2/train_config.yaml: -------------------------------------------------------------------------------- 1 | adam_kwargs: 2 | lr: 0.0001 3 | batch_sizes: 4 | 128: 64 5 | checkpoint_root: ./output/checkpoint/brain/pixel/axis_2 6 | dec: 7 | kwargs: 8 | inner_dims: 9 | - 128 10 | - 128 11 | - 128 12 | - 128 13 | - 128 14 | - 128 15 | - 128 16 | type: residual9 17 | early_stopping_min_delta: 0.002 18 | early_stopping_patience: 3 19 | enc: 20 | kwargs: 21 | inner_dims: 22 | - 128 23 | - 128 24 | - 128 25 | - 128 26 | - 128 27 | - 128 28 | - 128 29 | type: residual9 30 | finetune_from: null 31 | image_dim: 1 32 | image_rec_loss: 33 | loss_kwargs: 34 | normalize_to_vgg_input: false 35 | path_to_vgg19_weights: ./output/vgg_weights/simclr_exp_1.tar 36 | use_smooth_pg: false 37 | weights_per_resolution: 38 | 128: 39 | feature_weights: 40 | r32: 1 41 | img_weight: 0 42 | loss_type: relative_perceptual_L1 43 | image_sample_iter: 1000 44 | initial_image_res: 128 45 | iters_per_res: {} 46 | latent_dim: 128 47 | latent_res: 1 48 | log_iter: 10 49 | log_root: ./output/logs/brain/pixel/axis_2 50 | max_image_res: 128 51 | model_type: dpa 52 | num_workers: 16 53 | random_seed: 5317 54 | stab_iter: 300000 55 | train_dataset: 56 | dataset_kwargs: 57 | cache: false 58 | fold: 0 59 | folds_path: ./folds/brain/train_folds_10.csv 60 | image_root: ./data/preprocessed/brain_train/2d_axis_2 61 | split: train 62 | dataset_type: numpy2d 63 | transform_kwargs: 64 | resize: 128 65 | trns_iter: 0 66 | val_dataset: 67 | dataset_kwargs: 68 | cache: false 69 | fold: 0 70 | folds_path: ./folds/brain/train_folds_10.csv 71 | image_root: ./data/preprocessed/brain_train/2d_axis_2 72 | split: val 73 | dataset_type: numpy2d 74 | transform_kwargs: 75 | resize: 128 76 | val_iter: 10000 77 | verbose: true 78 | -------------------------------------------------------------------------------- /configs/brain/sample/axis_0/train_config.yaml: -------------------------------------------------------------------------------- 1 | adam_kwargs: 2 | lr: 0.0001 3 | batch_sizes: 4 | 128: 64 5 | checkpoint_root: ./output/checkpoint/brain/sample/axis_0 6 | dec: 7 | kwargs: 8 | inner_dims: 9 | - 128 10 | - 128 11 | - 128 12 | - 128 13 | - 128 14 | - 128 15 | - 128 16 | type: residual9 17 | early_stopping_min_delta: 0.002 18 | early_stopping_patience: 3 19 | enc: 20 | kwargs: 21 | inner_dims: 22 | - 128 23 | - 128 24 | - 128 25 | - 128 26 | - 128 27 | - 128 28 | - 128 29 | type: residual9 30 | finetune_from: null 31 | image_dim: 1 32 | image_rec_loss: 33 | loss_kwargs: 34 | normalize_to_vgg_input: false 35 | path_to_vgg19_weights: ./output/vgg_weights/simclr_exp_1.tar 36 | use_smooth_pg: false 37 | weights_per_resolution: 38 | 128: 39 | feature_weights: 40 | r42: 1 41 | img_weight: 0 42 | loss_type: relative_perceptual_L1 43 | image_sample_iter: 1000 44 | initial_image_res: 128 45 | iters_per_res: {} 46 | latent_dim: 128 47 | latent_res: 1 48 | log_iter: 10 49 | log_root: ./output/logs/brain/sample/axis_0 50 | max_image_res: 128 51 | model_type: dpa 52 | num_workers: 16 53 | random_seed: 9289 54 | stab_iter: 300000 55 | train_dataset: 56 | dataset_kwargs: 57 | cache: false 58 | fold: 0 59 | folds_path: ./folds/brain/train_folds_10.csv 60 | image_root: ./data/preprocessed/brain_train/2d_axis_0 61 | split: train 62 | dataset_type: numpy2d 63 | transform_kwargs: 64 | resize: 128 65 | trns_iter: 0 66 | val_dataset: 67 | dataset_kwargs: 68 | cache: false 69 | fold: 0 70 | folds_path: ./folds/brain/train_folds_10.csv 71 | image_root: ./data/preprocessed/brain_train/2d_axis_0 72 | split: val 73 | dataset_type: numpy2d 74 | transform_kwargs: 75 | resize: 128 76 | val_iter: 10000 77 | verbose: true 78 | -------------------------------------------------------------------------------- /configs/brain/sample/axis_1/train_config.yaml: -------------------------------------------------------------------------------- 1 | adam_kwargs: 2 | lr: 0.0002 3 | batch_sizes: 4 | 128: 64 5 | checkpoint_root: ./output/checkpoint/brain/sample/axis_1 6 | dec: 7 | kwargs: 8 | inner_dims: 9 | - 64 10 | - 64 11 | - 64 12 | - 64 13 | - 64 14 | - 64 15 | - 64 16 | type: residual9 17 | early_stopping_min_delta: 0.002 18 | early_stopping_patience: 3 19 | enc: 20 | kwargs: 21 | inner_dims: 22 | - 64 23 | - 64 24 | - 64 25 | - 64 26 | - 64 27 | - 64 28 | - 64 29 | type: residual9 30 | finetune_from: null 31 | image_dim: 1 32 | image_rec_loss: 33 | loss_kwargs: 34 | normalize_to_vgg_input: false 35 | path_to_vgg19_weights: ./output/vgg_weights/simclr_exp_1.tar 36 | use_smooth_pg: false 37 | weights_per_resolution: 38 | 128: 39 | feature_weights: 40 | r32: 1 41 | img_weight: 0 42 | loss_type: relative_perceptual_L1 43 | image_sample_iter: 1000 44 | initial_image_res: 128 45 | iters_per_res: {} 46 | latent_dim: 64 47 | latent_res: 1 48 | log_iter: 10 49 | log_root: ./output/logs/brain/sample/axis_1 50 | max_image_res: 128 51 | model_type: dpa 52 | num_workers: 16 53 | random_seed: 9775 54 | stab_iter: 300000 55 | train_dataset: 56 | dataset_kwargs: 57 | cache: false 58 | fold: 0 59 | folds_path: ./folds/brain/train_folds_10.csv 60 | image_root: ./data/preprocessed/brain_train/2d_axis_1 61 | split: train 62 | dataset_type: numpy2d 63 | transform_kwargs: 64 | resize: 128 65 | trns_iter: 0 66 | val_dataset: 67 | dataset_kwargs: 68 | cache: false 69 | fold: 0 70 | folds_path: ./folds/brain/train_folds_10.csv 71 | image_root: ./data/preprocessed/brain_train/2d_axis_1 72 | split: val 73 | dataset_type: numpy2d 74 | transform_kwargs: 75 | resize: 128 76 | val_iter: 10000 77 | verbose: true 78 | -------------------------------------------------------------------------------- /configs/brain/sample/axis_2/train_config.yaml: -------------------------------------------------------------------------------- 1 | adam_kwargs: 2 | lr: 0.0001 3 | batch_sizes: 4 | 128: 64 5 | checkpoint_root: ./output/checkpoint/brain/sample/axis_2 6 | dec: 7 | kwargs: 8 | inner_dims: 9 | - 128 10 | - 128 11 | - 128 12 | - 128 13 | - 128 14 | - 128 15 | - 128 16 | type: residual9 17 | early_stopping_min_delta: 0.002 18 | early_stopping_patience: 3 19 | enc: 20 | kwargs: 21 | inner_dims: 22 | - 128 23 | - 128 24 | - 128 25 | - 128 26 | - 128 27 | - 128 28 | - 128 29 | type: residual9 30 | finetune_from: null 31 | image_dim: 1 32 | image_rec_loss: 33 | loss_kwargs: 34 | normalize_to_vgg_input: false 35 | path_to_vgg19_weights: ./output/vgg_weights/simclr_exp_1.tar 36 | use_smooth_pg: false 37 | weights_per_resolution: 38 | 128: 39 | feature_weights: 40 | r42: 1 41 | img_weight: 0 42 | loss_type: relative_perceptual_L1 43 | image_sample_iter: 1000 44 | initial_image_res: 128 45 | iters_per_res: {} 46 | latent_dim: 128 47 | latent_res: 1 48 | log_iter: 10 49 | log_root: ./output/logs/brain/sample/axis_2 50 | max_image_res: 128 51 | model_type: dpa 52 | num_workers: 16 53 | random_seed: 9289 54 | stab_iter: 300000 55 | train_dataset: 56 | dataset_kwargs: 57 | cache: false 58 | fold: 0 59 | folds_path: ./folds/brain/train_folds_10.csv 60 | image_root: ./data/preprocessed/brain_train/2d_axis_2 61 | split: train 62 | dataset_type: numpy2d 63 | transform_kwargs: 64 | resize: 128 65 | trns_iter: 0 66 | val_dataset: 67 | dataset_kwargs: 68 | cache: false 69 | fold: 0 70 | folds_path: ./folds/brain/train_folds_10.csv 71 | image_root: ./data/preprocessed/brain_train/2d_axis_2 72 | split: val 73 | dataset_type: numpy2d 74 | transform_kwargs: 75 | resize: 128 76 | val_iter: 10000 77 | verbose: true 78 | -------------------------------------------------------------------------------- /configs/inference_3d_example.yaml: -------------------------------------------------------------------------------- 1 | ##=========================== CONFIG FOR INFERENCE =========================== 2 | 3 | model_type: dpa 4 | 5 | verbose: True 6 | save_inference: False 7 | 8 | 9 | apply_along_axis: 0 10 | score_reduction: max 11 | do_not_process_small_area: True 12 | delete_zero_area: False 13 | resize_3d_for_evaluation: 128 14 | 15 | 16 | test_dataset: 17 | dataset_type: nifti3d 18 | dataset_kwargs: 19 | image_root: ./data/preprocessed/brain_train/3d_test 20 | mask_root: data/preprocessed/brain_train/3d_test_masks 21 | folds_path: ./folds/brain/train_folds_10.csv 22 | fold: 0 23 | split: val 24 | transform_kwargs: 25 | resize: 64 26 | 27 | test_image_rec_loss: 28 | loss_type: relative_perceptual_L1 29 | loss_kwargs: 30 | mode_3d: True 31 | path_to_vgg19_weights: ./output/vgg_weights/simclr_exp_1.tar 32 | normalize_to_vgg_input: False 33 | 64: 34 | img_weight: 0 35 | feature_weights: 36 | r42: 1 37 | use_smooth_pg: False 38 | 39 | 40 | 41 | results_root: ./output/results/example 42 | test_model_path: ./output/checkpoint/example/anomaly_detection.tar 43 | -------------------------------------------------------------------------------- /configs/train_example.yaml: -------------------------------------------------------------------------------- 1 | #=========================== CONFIG FOR TRAINING =========================== 2 | model_type: dpa 3 | 4 | verbose: True 5 | random_seed: 4343 6 | finetune_from: 7 | 8 | checkpoint_root: ./output/checkpoint/example 9 | log_root: ./output/logs/example 10 | 11 | #--------------------- Hyperparameters of training ------------------------------ 12 | 13 | max_image_res: 64 14 | initial_image_res: 64 15 | image_dim: 1 16 | 17 | latent_res: 1 18 | latent_dim: 64 19 | 20 | trns_iter: 0 21 | stab_iter: 300000 22 | iters_per_res: {} 23 | val_iter: 10000 24 | log_iter: 10 25 | image_sample_iter: 1000 26 | early_stopping_patience: 3 27 | early_stopping_min_delta: 0.002 28 | 29 | #--------------------- Hyperparameters of optimizers --------------------------- 30 | 31 | adam_kwargs: 32 | lr: 0.0005 33 | 34 | num_workers: 16 35 | batch_sizes: 36 | 64: 64 37 | 38 | #--------------------- Hyperparameters of dataset ------------------------------ 39 | 40 | train_dataset: 41 | dataset_type: numpy2d 42 | dataset_kwargs: 43 | image_root: ./data/preprocessed/brain_train/2d_axis_2 44 | folds_path: ./folds/brain/train_folds_10.csv 45 | fold: 0 46 | split: train 47 | transform_kwargs: 48 | resize: 64 49 | 50 | val_dataset: 51 | dataset_type: numpy2d 52 | dataset_kwargs: 53 | image_root: ./data/preprocessed/brain_train/2d_axis_2 54 | folds_path: ./folds/brain/train_folds_10.csv 55 | fold: 0 56 | split: val 57 | transform_kwargs: 58 | resize: 64 59 | #--------------------- Hyperparameters of models ------------------------------ 60 | 61 | enc: 62 | type: residual9 63 | kwargs: 64 | inner_dims: [64, 64, 64, 64, 64] 65 | 66 | dec: 67 | type: residual9 68 | kwargs: 69 | inner_dims: [64, 64, 64, 64, 64] 70 | 71 | 72 | #--------------------- Hyperparameters of loss function --------------------------- 73 | 74 | image_rec_loss: 75 | loss_type: relative_perceptual_L1 76 | loss_kwargs: 77 | path_to_vgg19_weights: ./output/vgg_weights/simclr_exp_1.tar 78 | normalize_to_vgg_input: False 79 | weights_per_resolution: 80 | 64: 81 | img_weight: 0 82 | feature_weights: 83 | r42: 1 84 | use_smooth_pg: False 85 | 86 | -------------------------------------------------------------------------------- /folds/abdom/train_folds_10.csv: -------------------------------------------------------------------------------- 1 | filename,test_fold 2 | 00359.nii.gz,0 3 | 00184.nii.gz,0 4 | 00466.nii.gz,0 5 | 00262.nii.gz,0 6 | 00282.nii.gz,0 7 | 00012.nii.gz,0 8 | 00250.nii.gz,0 9 | 00155.nii.gz,0 10 | 00267.nii.gz,0 11 | 00285.nii.gz,0 12 | 00079.nii.gz,0 13 | 00498.nii.gz,0 14 | 00196.nii.gz,0 15 | 00499.nii.gz,0 16 | 00452.nii.gz,0 17 | 00138.nii.gz,0 18 | 00292.nii.gz,0 19 | 00407.nii.gz,0 20 | 00529.nii.gz,0 21 | 00231.nii.gz,0 22 | 00459.nii.gz,0 23 | 00489.nii.gz,0 24 | 00241.nii.gz,0 25 | 00515.nii.gz,0 26 | 00537.nii.gz,0 27 | 00229.nii.gz,0 28 | 00081.nii.gz,0 29 | 00110.nii.gz,0 30 | 00210.nii.gz,0 31 | 00042.nii.gz,0 32 | 00413.nii.gz,0 33 | 00180.nii.gz,0 34 | 00287.nii.gz,0 35 | 00172.nii.gz,0 36 | 00355.nii.gz,0 37 | 00435.nii.gz,0 38 | 00072.nii.gz,0 39 | 00297.nii.gz,0 40 | 00484.nii.gz,0 41 | 00028.nii.gz,0 42 | 00211.nii.gz,0 43 | 00043.nii.gz,0 44 | 00468.nii.gz,0 45 | 00067.nii.gz,0 46 | 00166.nii.gz,0 47 | 00027.nii.gz,0 48 | 00154.nii.gz,0 49 | 00098.nii.gz,0 50 | 00406.nii.gz,0 51 | 00408.nii.gz,0 52 | 00087.nii.gz,0 53 | 00046.nii.gz,0 54 | 00344.nii.gz,0 55 | 00088.nii.gz,0 56 | 00153.nii.gz,0 57 | 00226.nii.gz,1 58 | 00399.nii.gz,1 59 | 00201.nii.gz,1 60 | 00467.nii.gz,1 61 | 00280.nii.gz,1 62 | 00463.nii.gz,1 63 | 00217.nii.gz,1 64 | 00272.nii.gz,1 65 | 00320.nii.gz,1 66 | 00050.nii.gz,1 67 | 00048.nii.gz,1 68 | 00239.nii.gz,1 69 | 00450.nii.gz,1 70 | 00278.nii.gz,1 71 | 00372.nii.gz,1 72 | 00288.nii.gz,1 73 | 00540.nii.gz,1 74 | 00204.nii.gz,1 75 | 00129.nii.gz,1 76 | 00530.nii.gz,1 77 | 00364.nii.gz,1 78 | 00156.nii.gz,1 79 | 00176.nii.gz,1 80 | 00362.nii.gz,1 81 | 00536.nii.gz,1 82 | 00377.nii.gz,1 83 | 00437.nii.gz,1 84 | 00009.nii.gz,1 85 | 00244.nii.gz,1 86 | 00040.nii.gz,1 87 | 00073.nii.gz,1 88 | 00432.nii.gz,1 89 | 00108.nii.gz,1 90 | 00062.nii.gz,1 91 | 00394.nii.gz,1 92 | 00071.nii.gz,1 93 | 00021.nii.gz,1 94 | 00532.nii.gz,1 95 | 00275.nii.gz,1 96 | 00318.nii.gz,1 97 | 00101.nii.gz,1 98 | 00165.nii.gz,1 99 | 00099.nii.gz,1 100 | 00378.nii.gz,1 101 | 00497.nii.gz,1 102 | 00175.nii.gz,1 103 | 00066.nii.gz,1 104 | 00519.nii.gz,1 105 | 00253.nii.gz,1 106 | 00506.nii.gz,1 107 | 00336.nii.gz,1 108 | 00049.nii.gz,1 109 | 00323.nii.gz,1 110 | 00171.nii.gz,1 111 | 00192.nii.gz,1 112 | 00446.nii.gz,2 113 | 00386.nii.gz,2 114 | 00541.nii.gz,2 115 | 00205.nii.gz,2 116 | 00051.nii.gz,2 117 | 00091.nii.gz,2 118 | 00036.nii.gz,2 119 | 00434.nii.gz,2 120 | 00026.nii.gz,2 121 | 00007.nii.gz,2 122 | 00546.nii.gz,2 123 | 00006.nii.gz,2 124 | 00395.nii.gz,2 125 | 00495.nii.gz,2 126 | 00096.nii.gz,2 127 | 00151.nii.gz,2 128 | 00159.nii.gz,2 129 | 00461.nii.gz,2 130 | 00430.nii.gz,2 131 | 00384.nii.gz,2 132 | 00182.nii.gz,2 133 | 00215.nii.gz,2 134 | 00296.nii.gz,2 135 | 00374.nii.gz,2 136 | 00351.nii.gz,2 137 | 00465.nii.gz,2 138 | 00332.nii.gz,2 139 | 00487.nii.gz,2 140 | 00199.nii.gz,2 141 | 00366.nii.gz,2 142 | 00471.nii.gz,2 143 | 00120.nii.gz,2 144 | 00284.nii.gz,2 145 | 00291.nii.gz,2 146 | 00077.nii.gz,2 147 | 00227.nii.gz,2 148 | 00207.nii.gz,2 149 | 00055.nii.gz,2 150 | 00118.nii.gz,2 151 | 00428.nii.gz,2 152 | 00133.nii.gz,2 153 | 00130.nii.gz,2 154 | 00141.nii.gz,2 155 | 00163.nii.gz,2 156 | 00371.nii.gz,2 157 | 00259.nii.gz,2 158 | 00500.nii.gz,2 159 | 00514.nii.gz,2 160 | 00150.nii.gz,2 161 | 00260.nii.gz,2 162 | 00508.nii.gz,2 163 | 00202.nii.gz,2 164 | 00533.nii.gz,2 165 | 00486.nii.gz,2 166 | 00083.nii.gz,2 167 | 00493.nii.gz,3 168 | 00334.nii.gz,3 169 | 00208.nii.gz,3 170 | 00517.nii.gz,3 171 | 00102.nii.gz,3 172 | 00401.nii.gz,3 173 | 00419.nii.gz,3 174 | 00254.nii.gz,3 175 | 00145.nii.gz,3 176 | 00340.nii.gz,3 177 | 00103.nii.gz,3 178 | 00095.nii.gz,3 179 | 00427.nii.gz,3 180 | 00396.nii.gz,3 181 | 00501.nii.gz,3 182 | 00339.nii.gz,3 183 | 00060.nii.gz,3 184 | 00423.nii.gz,3 185 | 00510.nii.gz,3 186 | 00193.nii.gz,3 187 | 00352.nii.gz,3 188 | 00295.nii.gz,3 189 | 00389.nii.gz,3 190 | 00132.nii.gz,3 191 | 00252.nii.gz,3 192 | 00354.nii.gz,3 193 | 00521.nii.gz,3 194 | 00312.nii.gz,3 195 | 00251.nii.gz,3 196 | 00538.nii.gz,3 197 | 00475.nii.gz,3 198 | 00346.nii.gz,3 199 | 00416.nii.gz,3 200 | 00003.nii.gz,3 201 | 00169.nii.gz,3 202 | 00174.nii.gz,3 203 | 00431.nii.gz,3 204 | 00256.nii.gz,3 205 | 00478.nii.gz,3 206 | 00524.nii.gz,3 207 | 00139.nii.gz,3 208 | 00522.nii.gz,3 209 | 00261.nii.gz,3 210 | 00299.nii.gz,3 211 | 00473.nii.gz,3 212 | 00439.nii.gz,3 213 | 00418.nii.gz,3 214 | 00322.nii.gz,3 215 | 00476.nii.gz,3 216 | 00424.nii.gz,3 217 | 00442.nii.gz,3 218 | 00455.nii.gz,3 219 | 00188.nii.gz,3 220 | 00135.nii.gz,3 221 | 00136.nii.gz,3 222 | 00329.nii.gz,4 223 | 00410.nii.gz,4 224 | 00445.nii.gz,4 225 | 00392.nii.gz,4 226 | 00330.nii.gz,4 227 | 00543.nii.gz,4 228 | 00064.nii.gz,4 229 | 00031.nii.gz,4 230 | 00013.nii.gz,4 231 | 00368.nii.gz,4 232 | 00194.nii.gz,4 233 | 00022.nii.gz,4 234 | 00304.nii.gz,4 235 | 00097.nii.gz,4 236 | 00422.nii.gz,4 237 | 00181.nii.gz,4 238 | 00245.nii.gz,4 239 | 00238.nii.gz,4 240 | 00041.nii.gz,4 241 | 00509.nii.gz,4 242 | 00218.nii.gz,4 243 | 00228.nii.gz,4 244 | 00303.nii.gz,4 245 | 00341.nii.gz,4 246 | 00185.nii.gz,4 247 | 00528.nii.gz,4 248 | 00314.nii.gz,4 249 | 00178.nii.gz,4 250 | 00235.nii.gz,4 251 | 00115.nii.gz,4 252 | 00482.nii.gz,4 253 | 00273.nii.gz,4 254 | 00382.nii.gz,4 255 | 00010.nii.gz,4 256 | 00544.nii.gz,4 257 | 00179.nii.gz,4 258 | 00507.nii.gz,4 259 | 00436.nii.gz,4 260 | 00376.nii.gz,4 261 | 00481.nii.gz,4 262 | 00113.nii.gz,4 263 | 00059.nii.gz,4 264 | 00549.nii.gz,4 265 | 00313.nii.gz,4 266 | 00448.nii.gz,4 267 | 00233.nii.gz,4 268 | 00137.nii.gz,4 269 | 00417.nii.gz,4 270 | 00363.nii.gz,4 271 | 00123.nii.gz,4 272 | 00456.nii.gz,4 273 | 00324.nii.gz,4 274 | 00469.nii.gz,4 275 | 00526.nii.gz,4 276 | 00548.nii.gz,4 277 | 00411.nii.gz,5 278 | 00063.nii.gz,5 279 | 00168.nii.gz,5 280 | 00234.nii.gz,5 281 | 00035.nii.gz,5 282 | 00089.nii.gz,5 283 | 00379.nii.gz,5 284 | 00247.nii.gz,5 285 | 00126.nii.gz,5 286 | 00223.nii.gz,5 287 | 00269.nii.gz,5 288 | 00520.nii.gz,5 289 | 00076.nii.gz,5 290 | 00547.nii.gz,5 291 | 00183.nii.gz,5 292 | 00104.nii.gz,5 293 | 00298.nii.gz,5 294 | 00370.nii.gz,5 295 | 00460.nii.gz,5 296 | 00429.nii.gz,5 297 | 00316.nii.gz,5 298 | 00305.nii.gz,5 299 | 00144.nii.gz,5 300 | 00425.nii.gz,5 301 | 00328.nii.gz,5 302 | 00358.nii.gz,5 303 | 00246.nii.gz,5 304 | 00477.nii.gz,5 305 | 00414.nii.gz,5 306 | 00398.nii.gz,5 307 | 00034.nii.gz,5 308 | 00075.nii.gz,5 309 | 00513.nii.gz,5 310 | 00070.nii.gz,5 311 | 00527.nii.gz,5 312 | 00383.nii.gz,5 313 | 00337.nii.gz,5 314 | 00124.nii.gz,5 315 | 00402.nii.gz,5 316 | 00404.nii.gz,5 317 | 00387.nii.gz,5 318 | 00356.nii.gz,5 319 | 00240.nii.gz,5 320 | 00367.nii.gz,5 321 | 00056.nii.gz,5 322 | 00257.nii.gz,5 323 | 00357.nii.gz,5 324 | 00315.nii.gz,5 325 | 00140.nii.gz,5 326 | 00044.nii.gz,5 327 | 00518.nii.gz,5 328 | 00014.nii.gz,5 329 | 00258.nii.gz,5 330 | 00375.nii.gz,5 331 | 00310.nii.gz,5 332 | 00380.nii.gz,6 333 | 00220.nii.gz,6 334 | 00221.nii.gz,6 335 | 00111.nii.gz,6 336 | 00255.nii.gz,6 337 | 00345.nii.gz,6 338 | 00061.nii.gz,6 339 | 00321.nii.gz,6 340 | 00447.nii.gz,6 341 | 00100.nii.gz,6 342 | 00112.nii.gz,6 343 | 00214.nii.gz,6 344 | 00173.nii.gz,6 345 | 00106.nii.gz,6 346 | 00415.nii.gz,6 347 | 00030.nii.gz,6 348 | 00114.nii.gz,6 349 | 00306.nii.gz,6 350 | 00017.nii.gz,6 351 | 00131.nii.gz,6 352 | 00058.nii.gz,6 353 | 00074.nii.gz,6 354 | 00023.nii.gz,6 355 | 00170.nii.gz,6 356 | 00047.nii.gz,6 357 | 00279.nii.gz,6 358 | 00309.nii.gz,6 359 | 00198.nii.gz,6 360 | 00117.nii.gz,6 361 | 00347.nii.gz,6 362 | 00197.nii.gz,6 363 | 00230.nii.gz,6 364 | 00004.nii.gz,6 365 | 00449.nii.gz,6 366 | 00209.nii.gz,6 367 | 00018.nii.gz,6 368 | 00426.nii.gz,6 369 | 00485.nii.gz,6 370 | 00539.nii.gz,6 371 | 00162.nii.gz,6 372 | 00236.nii.gz,6 373 | 00005.nii.gz,6 374 | 00029.nii.gz,6 375 | 00403.nii.gz,6 376 | 00479.nii.gz,6 377 | 00496.nii.gz,6 378 | 00457.nii.gz,6 379 | 00289.nii.gz,6 380 | 00085.nii.gz,6 381 | 00381.nii.gz,6 382 | 00195.nii.gz,6 383 | 00491.nii.gz,6 384 | 00000.nii.gz,6 385 | 00523.nii.gz,6 386 | 00342.nii.gz,6 387 | 00078.nii.gz,7 388 | 00092.nii.gz,7 389 | 00109.nii.gz,7 390 | 00444.nii.gz,7 391 | 00293.nii.gz,7 392 | 00454.nii.gz,7 393 | 00307.nii.gz,7 394 | 00002.nii.gz,7 395 | 00190.nii.gz,7 396 | 00405.nii.gz,7 397 | 00542.nii.gz,7 398 | 00326.nii.gz,7 399 | 00525.nii.gz,7 400 | 00161.nii.gz,7 401 | 00369.nii.gz,7 402 | 00393.nii.gz,7 403 | 00054.nii.gz,7 404 | 00443.nii.gz,7 405 | 00020.nii.gz,7 406 | 00164.nii.gz,7 407 | 00090.nii.gz,7 408 | 00333.nii.gz,7 409 | 00001.nii.gz,7 410 | 00266.nii.gz,7 411 | 00409.nii.gz,7 412 | 00335.nii.gz,7 413 | 00361.nii.gz,7 414 | 00281.nii.gz,7 415 | 00286.nii.gz,7 416 | 00325.nii.gz,7 417 | 00271.nii.gz,7 418 | 00122.nii.gz,7 419 | 00249.nii.gz,7 420 | 00483.nii.gz,7 421 | 00011.nii.gz,7 422 | 00242.nii.gz,7 423 | 00264.nii.gz,7 424 | 00490.nii.gz,7 425 | 00148.nii.gz,7 426 | 00225.nii.gz,7 427 | 00277.nii.gz,7 428 | 00511.nii.gz,7 429 | 00503.nii.gz,7 430 | 00203.nii.gz,7 431 | 00213.nii.gz,7 432 | 00157.nii.gz,7 433 | 00349.nii.gz,7 434 | 00343.nii.gz,7 435 | 00338.nii.gz,7 436 | 00237.nii.gz,7 437 | 00390.nii.gz,7 438 | 00084.nii.gz,7 439 | 00331.nii.gz,7 440 | 00301.nii.gz,7 441 | 00472.nii.gz,7 442 | 00037.nii.gz,8 443 | 00385.nii.gz,8 444 | 00276.nii.gz,8 445 | 00400.nii.gz,8 446 | 00069.nii.gz,8 447 | 00212.nii.gz,8 448 | 00504.nii.gz,8 449 | 00319.nii.gz,8 450 | 00189.nii.gz,8 451 | 00243.nii.gz,8 452 | 00019.nii.gz,8 453 | 00147.nii.gz,8 454 | 00453.nii.gz,8 455 | 00474.nii.gz,8 456 | 00348.nii.gz,8 457 | 00134.nii.gz,8 458 | 00121.nii.gz,8 459 | 00433.nii.gz,8 460 | 00311.nii.gz,8 461 | 00308.nii.gz,8 462 | 00360.nii.gz,8 463 | 00458.nii.gz,8 464 | 00353.nii.gz,8 465 | 00146.nii.gz,8 466 | 00057.nii.gz,8 467 | 00158.nii.gz,8 468 | 00535.nii.gz,8 469 | 00263.nii.gz,8 470 | 00052.nii.gz,8 471 | 00317.nii.gz,8 472 | 00327.nii.gz,8 473 | 00302.nii.gz,8 474 | 00502.nii.gz,8 475 | 00143.nii.gz,8 476 | 00512.nii.gz,8 477 | 00177.nii.gz,8 478 | 00206.nii.gz,8 479 | 00451.nii.gz,8 480 | 00038.nii.gz,8 481 | 00438.nii.gz,8 482 | 00186.nii.gz,8 483 | 00492.nii.gz,8 484 | 00024.nii.gz,8 485 | 00187.nii.gz,8 486 | 00219.nii.gz,8 487 | 00545.nii.gz,8 488 | 00274.nii.gz,8 489 | 00534.nii.gz,8 490 | 00420.nii.gz,8 491 | 00397.nii.gz,8 492 | 00516.nii.gz,8 493 | 00116.nii.gz,8 494 | 00350.nii.gz,8 495 | 00290.nii.gz,8 496 | 00016.nii.gz,8 497 | 00248.nii.gz,9 498 | 00142.nii.gz,9 499 | 00373.nii.gz,9 500 | 00270.nii.gz,9 501 | 00105.nii.gz,9 502 | 00494.nii.gz,9 503 | 00039.nii.gz,9 504 | 00107.nii.gz,9 505 | 00265.nii.gz,9 506 | 00167.nii.gz,9 507 | 00149.nii.gz,9 508 | 00480.nii.gz,9 509 | 00300.nii.gz,9 510 | 00053.nii.gz,9 511 | 00505.nii.gz,9 512 | 00294.nii.gz,9 513 | 00222.nii.gz,9 514 | 00216.nii.gz,9 515 | 00093.nii.gz,9 516 | 00531.nii.gz,9 517 | 00152.nii.gz,9 518 | 00086.nii.gz,9 519 | 00191.nii.gz,9 520 | 00119.nii.gz,9 521 | 00391.nii.gz,9 522 | 00488.nii.gz,9 523 | 00082.nii.gz,9 524 | 00283.nii.gz,9 525 | 00441.nii.gz,9 526 | 00200.nii.gz,9 527 | 00268.nii.gz,9 528 | 00224.nii.gz,9 529 | 00025.nii.gz,9 530 | 00160.nii.gz,9 531 | 00094.nii.gz,9 532 | 00068.nii.gz,9 533 | 00032.nii.gz,9 534 | 00365.nii.gz,9 535 | 00412.nii.gz,9 536 | 00065.nii.gz,9 537 | 00128.nii.gz,9 538 | 00080.nii.gz,9 539 | 00232.nii.gz,9 540 | 00470.nii.gz,9 541 | 00015.nii.gz,9 542 | 00127.nii.gz,9 543 | 00045.nii.gz,9 544 | 00008.nii.gz,9 545 | 00464.nii.gz,9 546 | 00033.nii.gz,9 547 | 00421.nii.gz,9 548 | 00440.nii.gz,9 549 | 00388.nii.gz,9 550 | 00462.nii.gz,9 551 | 00125.nii.gz,9 552 | -------------------------------------------------------------------------------- /folds/brain/train_folds_10.csv: -------------------------------------------------------------------------------- 1 | filename,test_fold 2 | 00525.nii.gz,0 3 | 00409.nii.gz,0 4 | 00370.nii.gz,0 5 | 00483.nii.gz,0 6 | 00075.nii.gz,0 7 | 00785.nii.gz,0 8 | 00716.nii.gz,0 9 | 00405.nii.gz,0 10 | 00495.nii.gz,0 11 | 00173.nii.gz,0 12 | 00456.nii.gz,0 13 | 00201.nii.gz,0 14 | 00732.nii.gz,0 15 | 00164.nii.gz,0 16 | 00367.nii.gz,0 17 | 00596.nii.gz,0 18 | 00349.nii.gz,0 19 | 00295.nii.gz,0 20 | 00501.nii.gz,0 21 | 00620.nii.gz,0 22 | 00630.nii.gz,0 23 | 00652.nii.gz,0 24 | 00622.nii.gz,0 25 | 00343.nii.gz,0 26 | 00494.nii.gz,0 27 | 00725.nii.gz,0 28 | 00257.nii.gz,0 29 | 00014.nii.gz,0 30 | 00266.nii.gz,0 31 | 00210.nii.gz,0 32 | 00512.nii.gz,0 33 | 00478.nii.gz,0 34 | 00360.nii.gz,0 35 | 00475.nii.gz,0 36 | 00498.nii.gz,0 37 | 00443.nii.gz,0 38 | 00203.nii.gz,0 39 | 00246.nii.gz,0 40 | 00627.nii.gz,0 41 | 00484.nii.gz,0 42 | 00127.nii.gz,0 43 | 00592.nii.gz,0 44 | 00703.nii.gz,0 45 | 00194.nii.gz,0 46 | 00232.nii.gz,0 47 | 00122.nii.gz,0 48 | 00748.nii.gz,0 49 | 00434.nii.gz,0 50 | 00719.nii.gz,0 51 | 00593.nii.gz,0 52 | 00148.nii.gz,0 53 | 00776.nii.gz,0 54 | 00466.nii.gz,0 55 | 00220.nii.gz,0 56 | 00090.nii.gz,0 57 | 00196.nii.gz,0 58 | 00705.nii.gz,0 59 | 00214.nii.gz,0 60 | 00219.nii.gz,0 61 | 00225.nii.gz,0 62 | 00290.nii.gz,0 63 | 00706.nii.gz,0 64 | 00095.nii.gz,0 65 | 00407.nii.gz,0 66 | 00519.nii.gz,0 67 | 00533.nii.gz,0 68 | 00268.nii.gz,0 69 | 00013.nii.gz,0 70 | 00604.nii.gz,0 71 | 00079.nii.gz,0 72 | 00430.nii.gz,0 73 | 00740.nii.gz,0 74 | 00683.nii.gz,0 75 | 00797.nii.gz,0 76 | 00474.nii.gz,0 77 | 00395.nii.gz,0 78 | 00578.nii.gz,0 79 | 00235.nii.gz,0 80 | 00548.nii.gz,0 81 | 00195.nii.gz,0 82 | 00377.nii.gz,1 83 | 00124.nii.gz,1 84 | 00674.nii.gz,1 85 | 00726.nii.gz,1 86 | 00539.nii.gz,1 87 | 00068.nii.gz,1 88 | 00115.nii.gz,1 89 | 00231.nii.gz,1 90 | 00555.nii.gz,1 91 | 00066.nii.gz,1 92 | 00156.nii.gz,1 93 | 00752.nii.gz,1 94 | 00505.nii.gz,1 95 | 00690.nii.gz,1 96 | 00150.nii.gz,1 97 | 00680.nii.gz,1 98 | 00249.nii.gz,1 99 | 00385.nii.gz,1 100 | 00477.nii.gz,1 101 | 00769.nii.gz,1 102 | 00248.nii.gz,1 103 | 00582.nii.gz,1 104 | 00282.nii.gz,1 105 | 00442.nii.gz,1 106 | 00324.nii.gz,1 107 | 00460.nii.gz,1 108 | 00297.nii.gz,1 109 | 00162.nii.gz,1 110 | 00209.nii.gz,1 111 | 00663.nii.gz,1 112 | 00401.nii.gz,1 113 | 00746.nii.gz,1 114 | 00581.nii.gz,1 115 | 00433.nii.gz,1 116 | 00584.nii.gz,1 117 | 00422.nii.gz,1 118 | 00404.nii.gz,1 119 | 00480.nii.gz,1 120 | 00350.nii.gz,1 121 | 00572.nii.gz,1 122 | 00774.nii.gz,1 123 | 00672.nii.gz,1 124 | 00568.nii.gz,1 125 | 00019.nii.gz,1 126 | 00166.nii.gz,1 127 | 00784.nii.gz,1 128 | 00561.nii.gz,1 129 | 00330.nii.gz,1 130 | 00576.nii.gz,1 131 | 00742.nii.gz,1 132 | 00727.nii.gz,1 133 | 00728.nii.gz,1 134 | 00355.nii.gz,1 135 | 00097.nii.gz,1 136 | 00088.nii.gz,1 137 | 00648.nii.gz,1 138 | 00492.nii.gz,1 139 | 00694.nii.gz,1 140 | 00424.nii.gz,1 141 | 00559.nii.gz,1 142 | 00585.nii.gz,1 143 | 00400.nii.gz,1 144 | 00348.nii.gz,1 145 | 00790.nii.gz,1 146 | 00469.nii.gz,1 147 | 00394.nii.gz,1 148 | 00675.nii.gz,1 149 | 00237.nii.gz,1 150 | 00263.nii.gz,1 151 | 00047.nii.gz,1 152 | 00119.nii.gz,1 153 | 00025.nii.gz,1 154 | 00723.nii.gz,1 155 | 00224.nii.gz,1 156 | 00756.nii.gz,1 157 | 00398.nii.gz,1 158 | 00397.nii.gz,1 159 | 00289.nii.gz,1 160 | 00758.nii.gz,1 161 | 00590.nii.gz,1 162 | 00034.nii.gz,2 163 | 00380.nii.gz,2 164 | 00129.nii.gz,2 165 | 00256.nii.gz,2 166 | 00339.nii.gz,2 167 | 00388.nii.gz,2 168 | 00292.nii.gz,2 169 | 00254.nii.gz,2 170 | 00346.nii.gz,2 171 | 00272.nii.gz,2 172 | 00700.nii.gz,2 173 | 00082.nii.gz,2 174 | 00042.nii.gz,2 175 | 00185.nii.gz,2 176 | 00761.nii.gz,2 177 | 00729.nii.gz,2 178 | 00359.nii.gz,2 179 | 00775.nii.gz,2 180 | 00789.nii.gz,2 181 | 00447.nii.gz,2 182 | 00488.nii.gz,2 183 | 00234.nii.gz,2 184 | 00736.nii.gz,2 185 | 00107.nii.gz,2 186 | 00391.nii.gz,2 187 | 00216.nii.gz,2 188 | 00538.nii.gz,2 189 | 00165.nii.gz,2 190 | 00624.nii.gz,2 191 | 00562.nii.gz,2 192 | 00587.nii.gz,2 193 | 00011.nii.gz,2 194 | 00634.nii.gz,2 195 | 00523.nii.gz,2 196 | 00678.nii.gz,2 197 | 00792.nii.gz,2 198 | 00294.nii.gz,2 199 | 00528.nii.gz,2 200 | 00566.nii.gz,2 201 | 00491.nii.gz,2 202 | 00595.nii.gz,2 203 | 00613.nii.gz,2 204 | 00650.nii.gz,2 205 | 00038.nii.gz,2 206 | 00453.nii.gz,2 207 | 00141.nii.gz,2 208 | 00103.nii.gz,2 209 | 00689.nii.gz,2 210 | 00325.nii.gz,2 211 | 00486.nii.gz,2 212 | 00709.nii.gz,2 213 | 00159.nii.gz,2 214 | 00720.nii.gz,2 215 | 00429.nii.gz,2 216 | 00646.nii.gz,2 217 | 00336.nii.gz,2 218 | 00192.nii.gz,2 219 | 00332.nii.gz,2 220 | 00731.nii.gz,2 221 | 00524.nii.gz,2 222 | 00554.nii.gz,2 223 | 00450.nii.gz,2 224 | 00531.nii.gz,2 225 | 00086.nii.gz,2 226 | 00054.nii.gz,2 227 | 00009.nii.gz,2 228 | 00399.nii.gz,2 229 | 00131.nii.gz,2 230 | 00177.nii.gz,2 231 | 00625.nii.gz,2 232 | 00362.nii.gz,2 233 | 00044.nii.gz,2 234 | 00240.nii.gz,2 235 | 00656.nii.gz,2 236 | 00637.nii.gz,2 237 | 00773.nii.gz,2 238 | 00344.nii.gz,2 239 | 00205.nii.gz,2 240 | 00280.nii.gz,2 241 | 00633.nii.gz,2 242 | 00271.nii.gz,3 243 | 00770.nii.gz,3 244 | 00245.nii.gz,3 245 | 00226.nii.gz,3 246 | 00540.nii.gz,3 247 | 00363.nii.gz,3 248 | 00657.nii.gz,3 249 | 00472.nii.gz,3 250 | 00002.nii.gz,3 251 | 00105.nii.gz,3 252 | 00264.nii.gz,3 253 | 00635.nii.gz,3 254 | 00111.nii.gz,3 255 | 00421.nii.gz,3 256 | 00765.nii.gz,3 257 | 00190.nii.gz,3 258 | 00277.nii.gz,3 259 | 00714.nii.gz,3 260 | 00402.nii.gz,3 261 | 00428.nii.gz,3 262 | 00221.nii.gz,3 263 | 00198.nii.gz,3 264 | 00108.nii.gz,3 265 | 00117.nii.gz,3 266 | 00552.nii.gz,3 267 | 00778.nii.gz,3 268 | 00747.nii.gz,3 269 | 00713.nii.gz,3 270 | 00629.nii.gz,3 271 | 00621.nii.gz,3 272 | 00303.nii.gz,3 273 | 00182.nii.gz,3 274 | 00467.nii.gz,3 275 | 00087.nii.gz,3 276 | 00518.nii.gz,3 277 | 00684.nii.gz,3 278 | 00176.nii.gz,3 279 | 00041.nii.gz,3 280 | 00128.nii.gz,3 281 | 00660.nii.gz,3 282 | 00372.nii.gz,3 283 | 00589.nii.gz,3 284 | 00661.nii.gz,3 285 | 00293.nii.gz,3 286 | 00520.nii.gz,3 287 | 00020.nii.gz,3 288 | 00365.nii.gz,3 289 | 00503.nii.gz,3 290 | 00760.nii.gz,3 291 | 00027.nii.gz,3 292 | 00766.nii.gz,3 293 | 00081.nii.gz,3 294 | 00615.nii.gz,3 295 | 00611.nii.gz,3 296 | 00782.nii.gz,3 297 | 00569.nii.gz,3 298 | 00051.nii.gz,3 299 | 00414.nii.gz,3 300 | 00015.nii.gz,3 301 | 00583.nii.gz,3 302 | 00084.nii.gz,3 303 | 00375.nii.gz,3 304 | 00361.nii.gz,3 305 | 00698.nii.gz,3 306 | 00114.nii.gz,3 307 | 00515.nii.gz,3 308 | 00701.nii.gz,3 309 | 00788.nii.gz,3 310 | 00040.nii.gz,3 311 | 00659.nii.gz,3 312 | 00651.nii.gz,3 313 | 00499.nii.gz,3 314 | 00338.nii.gz,3 315 | 00267.nii.gz,3 316 | 00437.nii.gz,3 317 | 00168.nii.gz,3 318 | 00387.nii.gz,3 319 | 00616.nii.gz,3 320 | 00069.nii.gz,3 321 | 00354.nii.gz,3 322 | 00073.nii.gz,4 323 | 00006.nii.gz,4 324 | 00435.nii.gz,4 325 | 00710.nii.gz,4 326 | 00279.nii.gz,4 327 | 00152.nii.gz,4 328 | 00022.nii.gz,4 329 | 00606.nii.gz,4 330 | 00335.nii.gz,4 331 | 00250.nii.gz,4 332 | 00623.nii.gz,4 333 | 00116.nii.gz,4 334 | 00163.nii.gz,4 335 | 00543.nii.gz,4 336 | 00017.nii.gz,4 337 | 00724.nii.gz,4 338 | 00575.nii.gz,4 339 | 00390.nii.gz,4 340 | 00340.nii.gz,4 341 | 00579.nii.gz,4 342 | 00308.nii.gz,4 343 | 00600.nii.gz,4 344 | 00379.nii.gz,4 345 | 00642.nii.gz,4 346 | 00058.nii.gz,4 347 | 00130.nii.gz,4 348 | 00236.nii.gz,4 349 | 00695.nii.gz,4 350 | 00199.nii.gz,4 351 | 00285.nii.gz,4 352 | 00461.nii.gz,4 353 | 00735.nii.gz,4 354 | 00376.nii.gz,4 355 | 00485.nii.gz,4 356 | 00060.nii.gz,4 357 | 00463.nii.gz,4 358 | 00655.nii.gz,4 359 | 00298.nii.gz,4 360 | 00715.nii.gz,4 361 | 00665.nii.gz,4 362 | 00089.nii.gz,4 363 | 00764.nii.gz,4 364 | 00452.nii.gz,4 365 | 00529.nii.gz,4 366 | 00187.nii.gz,4 367 | 00601.nii.gz,4 368 | 00053.nii.gz,4 369 | 00064.nii.gz,4 370 | 00473.nii.gz,4 371 | 00547.nii.gz,4 372 | 00067.nii.gz,4 373 | 00351.nii.gz,4 374 | 00197.nii.gz,4 375 | 00476.nii.gz,4 376 | 00455.nii.gz,4 377 | 00018.nii.gz,4 378 | 00448.nii.gz,4 379 | 00003.nii.gz,4 380 | 00482.nii.gz,4 381 | 00172.nii.gz,4 382 | 00667.nii.gz,4 383 | 00241.nii.gz,4 384 | 00799.nii.gz,4 385 | 00413.nii.gz,4 386 | 00035.nii.gz,4 387 | 00212.nii.gz,4 388 | 00318.nii.gz,4 389 | 00481.nii.gz,4 390 | 00384.nii.gz,4 391 | 00327.nii.gz,4 392 | 00083.nii.gz,4 393 | 00526.nii.gz,4 394 | 00744.nii.gz,4 395 | 00110.nii.gz,4 396 | 00419.nii.gz,4 397 | 00489.nii.gz,4 398 | 00302.nii.gz,4 399 | 00753.nii.gz,4 400 | 00233.nii.gz,4 401 | 00028.nii.gz,4 402 | 00309.nii.gz,5 403 | 00181.nii.gz,5 404 | 00304.nii.gz,5 405 | 00567.nii.gz,5 406 | 00570.nii.gz,5 407 | 00446.nii.gz,5 408 | 00679.nii.gz,5 409 | 00640.nii.gz,5 410 | 00383.nii.gz,5 411 | 00291.nii.gz,5 412 | 00535.nii.gz,5 413 | 00440.nii.gz,5 414 | 00099.nii.gz,5 415 | 00316.nii.gz,5 416 | 00458.nii.gz,5 417 | 00707.nii.gz,5 418 | 00145.nii.gz,5 419 | 00137.nii.gz,5 420 | 00132.nii.gz,5 421 | 00341.nii.gz,5 422 | 00509.nii.gz,5 423 | 00071.nii.gz,5 424 | 00217.nii.gz,5 425 | 00668.nii.gz,5 426 | 00416.nii.gz,5 427 | 00410.nii.gz,5 428 | 00681.nii.gz,5 429 | 00743.nii.gz,5 430 | 00502.nii.gz,5 431 | 00253.nii.gz,5 432 | 00750.nii.gz,5 433 | 00278.nii.gz,5 434 | 00762.nii.gz,5 435 | 00252.nii.gz,5 436 | 00200.nii.gz,5 437 | 00364.nii.gz,5 438 | 00763.nii.gz,5 439 | 00001.nii.gz,5 440 | 00024.nii.gz,5 441 | 00702.nii.gz,5 442 | 00513.nii.gz,5 443 | 00353.nii.gz,5 444 | 00757.nii.gz,5 445 | 00692.nii.gz,5 446 | 00312.nii.gz,5 447 | 00223.nii.gz,5 448 | 00144.nii.gz,5 449 | 00449.nii.gz,5 450 | 00188.nii.gz,5 451 | 00149.nii.gz,5 452 | 00112.nii.gz,5 453 | 00096.nii.gz,5 454 | 00260.nii.gz,5 455 | 00737.nii.gz,5 456 | 00262.nii.gz,5 457 | 00445.nii.gz,5 458 | 00155.nii.gz,5 459 | 00508.nii.gz,5 460 | 00696.nii.gz,5 461 | 00500.nii.gz,5 462 | 00033.nii.gz,5 463 | 00259.nii.gz,5 464 | 00189.nii.gz,5 465 | 00670.nii.gz,5 466 | 00012.nii.gz,5 467 | 00541.nii.gz,5 468 | 00276.nii.gz,5 469 | 00331.nii.gz,5 470 | 00662.nii.gz,5 471 | 00251.nii.gz,5 472 | 00039.nii.gz,5 473 | 00157.nii.gz,5 474 | 00738.nii.gz,5 475 | 00471.nii.gz,5 476 | 00158.nii.gz,5 477 | 00313.nii.gz,5 478 | 00311.nii.gz,5 479 | 00146.nii.gz,5 480 | 00118.nii.gz,5 481 | 00636.nii.gz,5 482 | 00793.nii.gz,6 483 | 00161.nii.gz,6 484 | 00686.nii.gz,6 485 | 00284.nii.gz,6 486 | 00658.nii.gz,6 487 | 00536.nii.gz,6 488 | 00444.nii.gz,6 489 | 00322.nii.gz,6 490 | 00261.nii.gz,6 491 | 00506.nii.gz,6 492 | 00273.nii.gz,6 493 | 00632.nii.gz,6 494 | 00396.nii.gz,6 495 | 00673.nii.gz,6 496 | 00244.nii.gz,6 497 | 00711.nii.gz,6 498 | 00542.nii.gz,6 499 | 00573.nii.gz,6 500 | 00516.nii.gz,6 501 | 00685.nii.gz,6 502 | 00052.nii.gz,6 503 | 00043.nii.gz,6 504 | 00565.nii.gz,6 505 | 00296.nii.gz,6 506 | 00125.nii.gz,6 507 | 00057.nii.gz,6 508 | 00591.nii.gz,6 509 | 00618.nii.gz,6 510 | 00671.nii.gz,6 511 | 00204.nii.gz,6 512 | 00174.nii.gz,6 513 | 00202.nii.gz,6 514 | 00717.nii.gz,6 515 | 00669.nii.gz,6 516 | 00545.nii.gz,6 517 | 00608.nii.gz,6 518 | 00143.nii.gz,6 519 | 00749.nii.gz,6 520 | 00745.nii.gz,6 521 | 00687.nii.gz,6 522 | 00008.nii.gz,6 523 | 00571.nii.gz,6 524 | 00175.nii.gz,6 525 | 00287.nii.gz,6 526 | 00427.nii.gz,6 527 | 00093.nii.gz,6 528 | 00321.nii.gz,6 529 | 00016.nii.gz,6 530 | 00734.nii.gz,6 531 | 00454.nii.gz,6 532 | 00791.nii.gz,6 533 | 00586.nii.gz,6 534 | 00222.nii.gz,6 535 | 00281.nii.gz,6 536 | 00612.nii.gz,6 537 | 00007.nii.gz,6 538 | 00425.nii.gz,6 539 | 00557.nii.gz,6 540 | 00242.nii.gz,6 541 | 00393.nii.gz,6 542 | 00045.nii.gz,6 543 | 00641.nii.gz,6 544 | 00080.nii.gz,6 545 | 00739.nii.gz,6 546 | 00708.nii.gz,6 547 | 00269.nii.gz,6 548 | 00056.nii.gz,6 549 | 00074.nii.gz,6 550 | 00005.nii.gz,6 551 | 00465.nii.gz,6 552 | 00213.nii.gz,6 553 | 00323.nii.gz,6 554 | 00479.nii.gz,6 555 | 00644.nii.gz,6 556 | 00230.nii.gz,6 557 | 00049.nii.gz,6 558 | 00560.nii.gz,6 559 | 00497.nii.gz,6 560 | 00299.nii.gz,6 561 | 00218.nii.gz,6 562 | 00704.nii.gz,7 563 | 00676.nii.gz,7 564 | 00010.nii.gz,7 565 | 00030.nii.gz,7 566 | 00382.nii.gz,7 567 | 00654.nii.gz,7 568 | 00464.nii.gz,7 569 | 00153.nii.gz,7 570 | 00411.nii.gz,7 571 | 00306.nii.gz,7 572 | 00186.nii.gz,7 573 | 00730.nii.gz,7 574 | 00767.nii.gz,7 575 | 00631.nii.gz,7 576 | 00301.nii.gz,7 577 | 00438.nii.gz,7 578 | 00755.nii.gz,7 579 | 00274.nii.gz,7 580 | 00741.nii.gz,7 581 | 00597.nii.gz,7 582 | 00378.nii.gz,7 583 | 00171.nii.gz,7 584 | 00754.nii.gz,7 585 | 00286.nii.gz,7 586 | 00184.nii.gz,7 587 | 00530.nii.gz,7 588 | 00551.nii.gz,7 589 | 00546.nii.gz,7 590 | 00638.nii.gz,7 591 | 00381.nii.gz,7 592 | 00420.nii.gz,7 593 | 00493.nii.gz,7 594 | 00123.nii.gz,7 595 | 00426.nii.gz,7 596 | 00556.nii.gz,7 597 | 00721.nii.gz,7 598 | 00768.nii.gz,7 599 | 00507.nii.gz,7 600 | 00603.nii.gz,7 601 | 00134.nii.gz,7 602 | 00070.nii.gz,7 603 | 00154.nii.gz,7 604 | 00310.nii.gz,7 605 | 00504.nii.gz,7 606 | 00063.nii.gz,7 607 | 00602.nii.gz,7 608 | 00078.nii.gz,7 609 | 00610.nii.gz,7 610 | 00647.nii.gz,7 611 | 00055.nii.gz,7 612 | 00183.nii.gz,7 613 | 00574.nii.gz,7 614 | 00532.nii.gz,7 615 | 00796.nii.gz,7 616 | 00208.nii.gz,7 617 | 00677.nii.gz,7 618 | 00697.nii.gz,7 619 | 00521.nii.gz,7 620 | 00238.nii.gz,7 621 | 00135.nii.gz,7 622 | 00300.nii.gz,7 623 | 00147.nii.gz,7 624 | 00794.nii.gz,7 625 | 00337.nii.gz,7 626 | 00783.nii.gz,7 627 | 00228.nii.gz,7 628 | 00180.nii.gz,7 629 | 00408.nii.gz,7 630 | 00490.nii.gz,7 631 | 00691.nii.gz,7 632 | 00345.nii.gz,7 633 | 00072.nii.gz,7 634 | 00577.nii.gz,7 635 | 00102.nii.gz,7 636 | 00094.nii.gz,7 637 | 00451.nii.gz,7 638 | 00614.nii.gz,7 639 | 00511.nii.gz,7 640 | 00307.nii.gz,7 641 | 00366.nii.gz,7 642 | 00106.nii.gz,8 643 | 00160.nii.gz,8 644 | 00138.nii.gz,8 645 | 00320.nii.gz,8 646 | 00215.nii.gz,8 647 | 00534.nii.gz,8 648 | 00077.nii.gz,8 649 | 00026.nii.gz,8 650 | 00334.nii.gz,8 651 | 00347.nii.gz,8 652 | 00436.nii.gz,8 653 | 00121.nii.gz,8 654 | 00139.nii.gz,8 655 | 00772.nii.gz,8 656 | 00798.nii.gz,8 657 | 00085.nii.gz,8 658 | 00718.nii.gz,8 659 | 00305.nii.gz,8 660 | 00771.nii.gz,8 661 | 00046.nii.gz,8 662 | 00239.nii.gz,8 663 | 00207.nii.gz,8 664 | 00091.nii.gz,8 665 | 00619.nii.gz,8 666 | 00598.nii.gz,8 667 | 00023.nii.gz,8 668 | 00356.nii.gz,8 669 | 00142.nii.gz,8 670 | 00626.nii.gz,8 671 | 00317.nii.gz,8 672 | 00076.nii.gz,8 673 | 00329.nii.gz,8 674 | 00787.nii.gz,8 675 | 00688.nii.gz,8 676 | 00558.nii.gz,8 677 | 00065.nii.gz,8 678 | 00487.nii.gz,8 679 | 00151.nii.gz,8 680 | 00522.nii.gz,8 681 | 00373.nii.gz,8 682 | 00470.nii.gz,8 683 | 00417.nii.gz,8 684 | 00733.nii.gz,8 685 | 00439.nii.gz,8 686 | 00206.nii.gz,8 687 | 00564.nii.gz,8 688 | 00431.nii.gz,8 689 | 00342.nii.gz,8 690 | 00229.nii.gz,8 691 | 00457.nii.gz,8 692 | 00031.nii.gz,8 693 | 00371.nii.gz,8 694 | 00423.nii.gz,8 695 | 00029.nii.gz,8 696 | 00599.nii.gz,8 697 | 00441.nii.gz,8 698 | 00514.nii.gz,8 699 | 00462.nii.gz,8 700 | 00609.nii.gz,8 701 | 00100.nii.gz,8 702 | 00319.nii.gz,8 703 | 00369.nii.gz,8 704 | 00605.nii.gz,8 705 | 00255.nii.gz,8 706 | 00537.nii.gz,8 707 | 00432.nii.gz,8 708 | 00283.nii.gz,8 709 | 00004.nii.gz,8 710 | 00759.nii.gz,8 711 | 00389.nii.gz,8 712 | 00170.nii.gz,8 713 | 00140.nii.gz,8 714 | 00021.nii.gz,8 715 | 00258.nii.gz,8 716 | 00265.nii.gz,8 717 | 00048.nii.gz,8 718 | 00643.nii.gz,8 719 | 00333.nii.gz,8 720 | 00169.nii.gz,8 721 | 00392.nii.gz,8 722 | 00510.nii.gz,9 723 | 00061.nii.gz,9 724 | 00326.nii.gz,9 725 | 00527.nii.gz,9 726 | 00781.nii.gz,9 727 | 00000.nii.gz,9 728 | 00549.nii.gz,9 729 | 00275.nii.gz,9 730 | 00639.nii.gz,9 731 | 00386.nii.gz,9 732 | 00468.nii.gz,9 733 | 00193.nii.gz,9 734 | 00682.nii.gz,9 735 | 00104.nii.gz,9 736 | 00693.nii.gz,9 737 | 00243.nii.gz,9 738 | 00588.nii.gz,9 739 | 00777.nii.gz,9 740 | 00664.nii.gz,9 741 | 00795.nii.gz,9 742 | 00649.nii.gz,9 743 | 00270.nii.gz,9 744 | 00178.nii.gz,9 745 | 00036.nii.gz,9 746 | 00227.nii.gz,9 747 | 00406.nii.gz,9 748 | 00167.nii.gz,9 749 | 00120.nii.gz,9 750 | 00092.nii.gz,9 751 | 00699.nii.gz,9 752 | 00607.nii.gz,9 753 | 00101.nii.gz,9 754 | 00050.nii.gz,9 755 | 00403.nii.gz,9 756 | 00368.nii.gz,9 757 | 00594.nii.gz,9 758 | 00357.nii.gz,9 759 | 00779.nii.gz,9 760 | 00098.nii.gz,9 761 | 00617.nii.gz,9 762 | 00418.nii.gz,9 763 | 00653.nii.gz,9 764 | 00412.nii.gz,9 765 | 00113.nii.gz,9 766 | 00059.nii.gz,9 767 | 00517.nii.gz,9 768 | 00109.nii.gz,9 769 | 00580.nii.gz,9 770 | 00126.nii.gz,9 771 | 00666.nii.gz,9 772 | 00415.nii.gz,9 773 | 00780.nii.gz,9 774 | 00314.nii.gz,9 775 | 00062.nii.gz,9 776 | 00550.nii.gz,9 777 | 00211.nii.gz,9 778 | 00191.nii.gz,9 779 | 00751.nii.gz,9 780 | 00136.nii.gz,9 781 | 00352.nii.gz,9 782 | 00712.nii.gz,9 783 | 00247.nii.gz,9 784 | 00374.nii.gz,9 785 | 00179.nii.gz,9 786 | 00315.nii.gz,9 787 | 00722.nii.gz,9 788 | 00645.nii.gz,9 789 | 00037.nii.gz,9 790 | 00133.nii.gz,9 791 | 00628.nii.gz,9 792 | 00544.nii.gz,9 793 | 00563.nii.gz,9 794 | 00459.nii.gz,9 795 | 00032.nii.gz,9 796 | 00328.nii.gz,9 797 | 00496.nii.gz,9 798 | 00288.nii.gz,9 799 | 00553.nii.gz,9 800 | 00358.nii.gz,9 801 | 00786.nii.gz,9 802 | -------------------------------------------------------------------------------- /mood/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ninatu/mood_challenge/c5b8d5ea0bd5fa4be512d099ab5f271b3060e33f/mood/__init__.py -------------------------------------------------------------------------------- /mood/dpa/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ninatu/mood_challenge/c5b8d5ea0bd5fa4be512d099ab5f271b3060e33f/mood/dpa/__init__.py -------------------------------------------------------------------------------- /mood/dpa/data_generators.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | 3 | from torch.utils.data.dataloader import DataLoader 4 | from torch import nn 5 | 6 | from mood.dpa.layers import EqualLayer, ConcatLayer, FadeinLayer 7 | from mood.dpa.pg_networks import ProgGrowStageType 8 | 9 | 10 | class AbstractProgGrowGenerator(ABC): 11 | @abstractmethod 12 | def set_stage_resolution(self, stage, resolution, batch_size): 13 | pass 14 | 15 | @abstractmethod 16 | def set_progress(self, progress): 17 | pass 18 | 19 | @abstractmethod 20 | def __next__(self): 21 | pass 22 | 23 | 24 | class ProgGrowImageGenerator(AbstractProgGrowGenerator): 25 | def __init__(self, dataset, max_resolution, batch_size, num_workers=8, inf=False, mode_3d=False): 26 | super(ProgGrowImageGenerator, self).__init__() 27 | 28 | self._dataset = dataset 29 | self._max_resolution = max_resolution 30 | self._inf = inf 31 | self._batch_size = batch_size 32 | self._mode_3d = mode_3d 33 | 34 | self.num_workers = num_workers 35 | 36 | self._data_loader = DataLoader(self._dataset, batch_size=batch_size, shuffle=True, drop_last=True, 37 | num_workers=self.num_workers, pin_memory=False) 38 | self._image_gen = iter(self._data_loader) 39 | 40 | self._resolution = max_resolution 41 | self._stage = ProgGrowStageType.stab 42 | self._mix_res_module = MixResolution(self._stage, self._resolution, self._max_resolution, mode_3d=self._mode_3d) 43 | 44 | def set_stage_resolution(self, stage, resolution, batch_size): 45 | self._stage = stage 46 | self._resolution = resolution 47 | 48 | # create new iterator if it is necessary 49 | if self._batch_size != batch_size: 50 | self._batch_size = batch_size 51 | 52 | self._data_loader = DataLoader(self._dataset, batch_size=batch_size, shuffle=True, drop_last=True, 53 | num_workers=self.num_workers, pin_memory=False) 54 | self._image_gen = iter(self._data_loader) 55 | 56 | self._mix_res_module = MixResolution(self._stage, self._resolution, self._max_resolution, mode_3d=self._mode_3d) 57 | 58 | def set_progress(self, progress): 59 | self._mix_res_module.set_progress(progress) 60 | 61 | def __iter__(self): 62 | self._image_gen = iter(self._data_loader) 63 | return self 64 | 65 | def __len__(self): 66 | return len(self._data_loader) 67 | 68 | def __next__(self): 69 | images = next(self._image_gen, None) 70 | if images is None: 71 | if self._inf: 72 | # del self._image_gen 73 | self._image_gen = iter(self._data_loader) 74 | images = next(self._image_gen, None) 75 | else: 76 | raise StopIteration() 77 | 78 | return self._mix_res_module(images).cuda() 79 | 80 | 81 | 82 | class MixResolution(nn.Module): 83 | def __init__(self, stage, resolution, max_resolution, mode_3d=False): 84 | super(MixResolution, self).__init__() 85 | self._stage = stage 86 | 87 | mode = 'trilinear' if mode_3d else 'bilinear' 88 | 89 | if resolution == max_resolution: 90 | high_res = EqualLayer() 91 | else: 92 | scale_factor = int(max_resolution / resolution) 93 | high_res = nn.Sequential( 94 | nn.Upsample(scale_factor=1/scale_factor, mode=mode), 95 | ) 96 | 97 | if stage == ProgGrowStageType.stab: 98 | self.mix_res_model = high_res 99 | elif stage == ProgGrowStageType.trns: 100 | self.mix_res_model = nn.Sequential() 101 | 102 | scale_factor = int(max_resolution / (resolution / 2)) 103 | low_res = nn.Sequential( 104 | nn.Upsample(scale_factor=1/scale_factor, mode=mode), 105 | nn.Upsample(scale_factor=2, mode='nearest') 106 | ) 107 | 108 | self.mix_res_model.add_module('concat', ConcatLayer(low_res, high_res)) 109 | self.mix_res_model.add_module('fadein', FadeinLayer()) 110 | else: 111 | raise NotImplementedError 112 | 113 | def set_progress(self, progress): 114 | if self._stage == ProgGrowStageType.trns: 115 | self.mix_res_model.fadein.set_progress(progress) 116 | 117 | def forward(self, x): 118 | return self.mix_res_model(x) 119 | -------------------------------------------------------------------------------- /mood/dpa/evaluate.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import yaml 3 | import os 4 | import torch 5 | from tqdm import tqdm 6 | from torch.utils.data import DataLoader 7 | from sklearn.metrics import roc_auc_score, average_precision_score 8 | import numpy as np 9 | import pandas as pd 10 | import nibabel as nib 11 | 12 | from mood.dpa.train import DeepPerceptualAutoencoder 13 | from mood.utils.datasets import DatasetType, DATASETS 14 | from mood.utils.transforms import TRANSFORMS 15 | from mood.dpa.rec_losses import ReconstructionLossType 16 | from mood.dpa.pg_rec_losses import PG_RECONSTRUCTION_LOSSES 17 | 18 | 19 | def main(config): 20 | verbose = config['verbose'] 21 | batch_size = config['test_batch_size'] 22 | results_root = config['results_root'] 23 | model_path = config['test_model_path'] 24 | save_inference = config['save_inference'] 25 | test_resize = config.get('test_resize') 26 | test_image_rec_loss = config.get('test_image_rec_loss') 27 | score_reduction = config['score_reduction'] 28 | 29 | os.makedirs(results_root, exist_ok=True) 30 | 31 | if verbose: 32 | print(yaml.dump(config, default_flow_style=False)) 33 | 34 | loaded_model = DeepPerceptualAutoencoder.load_anomaly_detection_model(torch.load(model_path)) 35 | if test_image_rec_loss is not None: 36 | enc, dec, image_rec_loss, (stage, resolution, progress, n_iter, mix_res_module) = loaded_model 37 | loss_type = config['test_image_rec_loss']['loss_type'] 38 | loss_kwargs = config['test_image_rec_loss']['loss_kwargs'] 39 | image_rec_loss = PG_RECONSTRUCTION_LOSSES[ReconstructionLossType[loss_type]]( 40 | max_resolution=resolution, **loss_kwargs) 41 | image_rec_loss.set_stage_resolution(stage, resolution) 42 | image_rec_loss.cuda() 43 | loaded_model = enc, dec, image_rec_loss, (stage, resolution, progress, n_iter, mix_res_module) 44 | 45 | dataset_type = DatasetType(config['test_dataset']['dataset_type']) 46 | dataset_kwargs = config['test_dataset']['dataset_kwargs'] 47 | transform_kwargs = config['test_dataset']['transform_kwargs'] 48 | 49 | transform = TRANSFORMS[dataset_type](**transform_kwargs) 50 | mask_transform = TRANSFORMS[dataset_type](**transform_kwargs, normalize=False) 51 | 52 | dataset = DATASETS[dataset_type]( 53 | transform=transform, 54 | mask_transform=mask_transform, 55 | return_image_name=True, 56 | **dataset_kwargs 57 | ) 58 | 59 | data_loader = DataLoader(dataset=dataset, batch_size=batch_size, shuffle=False, num_workers=8) 60 | 61 | if verbose: 62 | data_loader = tqdm(data_loader) 63 | 64 | results_root = config['results_root'] 65 | 66 | sample_true = [] 67 | pixel_true = [] 68 | 69 | sample_pred = [] 70 | pixel_pred = [] 71 | 72 | sample_output_dir = os.path.join(results_root, 'sample') 73 | pixel_output_dir = os.path.join(results_root, 'pixel') 74 | for data in data_loader: 75 | if len(data) == 3: 76 | images, masks, names = data 77 | elif len(data) == 4: 78 | images, masks, affine, names = data 79 | else: 80 | raise NotImplementedError() 81 | 82 | images = images.cuda() 83 | masks = masks.numpy() 84 | 85 | pixel_true.append((masks > 0).flatten()) 86 | sample_true.append(masks.sum(axis=tuple(range(1, len(masks.shape)))) > 0) 87 | 88 | pred = DeepPerceptualAutoencoder.predict_anomaly_score(loaded_model, images, reduction='pixelwise') 89 | pred = pred.detach().cpu().numpy() 90 | assert pred.shape[1] == 1 91 | pred = pred.squeeze(1) 92 | 93 | pixel_pred.append(pred.flatten()) 94 | if save_inference: 95 | save_pixel_score(pixel_output_dir, pred, data) 96 | 97 | if score_reduction == 'mean': 98 | anomaly_score = [x.mean() for x in pred] 99 | elif score_reduction == 'max': 100 | anomaly_score = [x.max() for x in pred] 101 | else: 102 | raise NotImplementedError() 103 | 104 | sample_pred.append(anomaly_score) 105 | save_sample_score(sample_output_dir, anomaly_score, names) 106 | 107 | sample_true = np.concatenate(sample_true).astype(np.bool) 108 | sample_pred = np.concatenate(sample_pred) 109 | pixel_true = np.concatenate(pixel_true).astype(np.bool) 110 | pixel_pred = np.concatenate(pixel_pred) 111 | 112 | ap_sample = average_precision_score(sample_true, sample_pred) 113 | print("AP sample", ap_sample) 114 | auc_sample = roc_auc_score(sample_true, sample_pred) 115 | print("AUC sample", auc_sample) 116 | 117 | ap_pixel = average_precision_score(pixel_true, pixel_pred) 118 | print("AP pixel", ap_pixel) 119 | auc_pixel = roc_auc_score(pixel_true, pixel_pred) 120 | print("AUC pixel", auc_pixel) 121 | 122 | scores = pd.DataFrame( 123 | [[ap_sample, ap_pixel], 124 | [ap_pixel, auc_pixel]], 125 | columns=['AP', 'ROC AUC'], index=['sample', 'pixel'] 126 | ) 127 | 128 | print(scores) 129 | 130 | scores.to_csv(os.path.join(results_root, 'scores.csv')) 131 | 132 | 133 | def save_sample_score(output_dir, scores, image_names): 134 | os.makedirs(output_dir, exist_ok=True) 135 | for score, image_name in zip(scores, image_names): 136 | with open(os.path.join(output_dir, f"{image_name}.txt"), "w") as write_file: 137 | write_file.write(str(score)) 138 | 139 | 140 | def save_pixel_score(output_dir, scores, data): 141 | print(scores.shape) 142 | os.makedirs(output_dir, exist_ok=True) 143 | if len(scores.shape) == 3: 144 | images, masks, names = data 145 | for score, image_name in zip(scores, names): 146 | np.save(os.path.join(output_dir, image_name), score) 147 | elif len(scores.shape) == 4: 148 | images, masks, affines, names = data 149 | for score, affine, image_name in zip(scores, affines, names): 150 | print(image_name) 151 | img = nib.Nifti1Image(score, affine=affine) 152 | nib.save(img, os.path.join(output_dir, image_name)) 153 | else: 154 | raise NotImplementedError() 155 | 156 | 157 | if __name__ == '__main__': 158 | parser = argparse.ArgumentParser() 159 | parser.add_argument('configs', type=str, nargs='*', help='Path to eval config') 160 | 161 | args = parser.parse_args() 162 | 163 | for config_path in args.configs: 164 | with open(config_path, 'r') as stream: 165 | config = yaml.load(stream, Loader=yaml.FullLoader) 166 | 167 | main(config) 168 | -------------------------------------------------------------------------------- /mood/dpa/feature_extractor.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | import torch.nn.functional as F 3 | import torch 4 | import numpy as np 5 | import random 6 | 7 | from collections import OrderedDict 8 | 9 | from mood.dpa.layers import EqualLayer 10 | from torchvision.models import vgg19 11 | 12 | # TODO: FIX IT! this hack is for testing large 3d scans (in order to fit in 12 Gb GPU memory) 13 | MAX_BATCH = 128 14 | 15 | 16 | class PretrainedVGG19FeatureExtractor(nn.Module): 17 | def __init__(self, pad_type='zero', path_to_vgg19_weights=None, pretrained=False): 18 | super(PretrainedVGG19FeatureExtractor, self).__init__() 19 | self.pad_type = pad_type 20 | 21 | if pad_type == 'reflect': 22 | self.pad = nn.ReflectionPad2d(1) 23 | padding = 0 24 | elif pad_type == 'zero': 25 | self.pad = EqualLayer() 26 | padding = 1 27 | elif pad_type == 'replication': 28 | self.pad = nn.ReplicationPad2d(1) 29 | padding = 0 30 | else: 31 | raise NotImplementedError 32 | 33 | # vgg modules 34 | 35 | self.conv1_1 = nn.Conv2d(3, 64, kernel_size=3, padding=padding) 36 | self.conv1_2 = nn.Conv2d(64, 64, kernel_size=3, padding=padding) 37 | self.conv2_1 = nn.Conv2d(64, 128, kernel_size=3, padding=padding) 38 | self.conv2_2 = nn.Conv2d(128, 128, kernel_size=3, padding=padding) 39 | self.conv3_1 = nn.Conv2d(128, 256, kernel_size=3, padding=padding) 40 | self.conv3_2 = nn.Conv2d(256, 256, kernel_size=3, padding=padding) 41 | self.conv3_3 = nn.Conv2d(256, 256, kernel_size=3, padding=padding) 42 | self.conv3_4 = nn.Conv2d(256, 256, kernel_size=3, padding=padding) 43 | self.conv4_1 = nn.Conv2d(256, 512, kernel_size=3, padding=padding) 44 | self.conv4_2 = nn.Conv2d(512, 512, kernel_size=3, padding=padding) 45 | self.conv4_3 = nn.Conv2d(512, 512, kernel_size=3, padding=padding) 46 | self.conv4_4 = nn.Conv2d(512, 512, kernel_size=3, padding=padding) 47 | self.conv5_1 = nn.Conv2d(512, 512, kernel_size=3, padding=padding) 48 | self.conv5_2 = nn.Conv2d(512, 512, kernel_size=3, padding=padding) 49 | self.conv5_3 = nn.Conv2d(512, 512, kernel_size=3, padding=padding) 50 | self.conv5_4 = nn.Conv2d(512, 512, kernel_size=3, padding=padding) 51 | 52 | self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2) 53 | self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2) 54 | self.pool3 = nn.MaxPool2d(kernel_size=2, stride=2) 55 | self.pool4 = nn.MaxPool2d(kernel_size=2, stride=2) 56 | self.pool5 = nn.MaxPool2d(kernel_size=2, stride=2) 57 | 58 | for param in self.parameters(): 59 | param.requires_grad = False 60 | 61 | if path_to_vgg19_weights is not None: 62 | # print(f"Using features from {path_to_vgg19_weights} in Perceptual Loss") 63 | pretrained_state_dict = torch.load(path_to_vgg19_weights) 64 | elif pretrained: 65 | # print("Using pretrained features in Perceptual Loss") 66 | pretrained_state_dict = vgg19(pretrained=True).features.state_dict() 67 | else: 68 | # print("Using random features in Perceptual Loss") 69 | torch.manual_seed(432) 70 | torch.cuda.manual_seed(3294820948304) 71 | np.random.seed(55254354) 72 | random.seed(543354) 73 | pretrained_state_dict = vgg19(pretrained=False).features.state_dict() 74 | 75 | state_dict = OrderedDict() 76 | for (new_name, _), (old_name, value) in zip(self.state_dict().items(), pretrained_state_dict.items()): 77 | state_dict[new_name] = value 78 | 79 | self.load_state_dict(state_dict) 80 | 81 | def forward(self, x, out_keys): 82 | if len(x) < MAX_BATCH: 83 | return self._forward(x, out_keys) 84 | else: 85 | results = None 86 | for i in range(0, len(x), MAX_BATCH): 87 | y = self._forward(x[i: i + MAX_BATCH], out_keys) 88 | if results is None: 89 | results = y 90 | else: 91 | for j in range(len(results)): 92 | results[j] = torch.cat((results[j], y[j]), dim=0) 93 | return results 94 | 95 | def _forward(self, x, out_keys): 96 | out = {} 97 | 98 | def finished(): 99 | return len(set(out_keys).difference(out.keys())) == 0 100 | 101 | out['c11'] = self.conv1_1(self.pad(x)) if not(finished()) else None 102 | out['r11'] = F.relu(out['c11']) if not(finished()) else None 103 | out['c12'] = self.conv1_2(self.pad(out['r11'])) if not(finished()) else None 104 | out['r12'] = F.relu(out['c12']) if not(finished()) else None 105 | out['p1'] = self.pool1(out['r12']) if not(finished()) else None 106 | 107 | out['c21'] = self.conv2_1(self.pad(out['p1'])) if not(finished()) else None 108 | out['r21'] = F.relu(out['c21']) if not(finished()) else None 109 | out['c22'] = self.conv2_2(self.pad(out['r21'])) if not(finished()) else None 110 | out['r22'] = F.relu(out['c22']) if not(finished()) else None 111 | out['p2'] = self.pool2(out['r22']) if not(finished()) else None 112 | 113 | out['c31'] = self.conv3_1(self.pad(out['p2'])) if not(finished()) else None 114 | out['r31'] = F.relu(out['c31']) if not(finished()) else None 115 | out['c32'] = self.conv3_2(self.pad(out['r31'])) if not(finished()) else None 116 | out['r32'] = F.relu(out['c32']) if not(finished()) else None 117 | out['c33'] = self.conv3_3(self.pad(out['r32'])) if not(finished()) else None 118 | out['r33'] = F.relu(out['c33']) if not(finished()) else None 119 | out['c34'] = self.conv3_4(self.pad(out['r33'])) if not(finished()) else None 120 | out['r34'] = F.relu(out['c34']) if not(finished()) else None 121 | out['p3'] = self.pool3(out['r34']) if not(finished()) else None 122 | 123 | out['c41'] = self.conv4_1(self.pad(out['p3'])) if not(finished()) else None 124 | out['r41'] = F.relu(out['c41']) if not(finished()) else None 125 | out['c42'] = self.conv4_2(self.pad(out['r41'])) if not(finished()) else None 126 | out['r42'] = F.relu(out['c42']) if not(finished()) else None 127 | out['c43'] = self.conv4_3(self.pad(out['r42'])) if not(finished()) else None 128 | out['r43'] = F.relu(out['c43']) if not(finished()) else None 129 | out['c44'] = self.conv4_4(self.pad(out['r43'])) if not(finished()) else None 130 | out['r44'] = F.relu(out['c44']) if not(finished()) else None 131 | out['p4'] = self.pool4(out['r44']) if not(finished()) else None 132 | 133 | out['c51'] = self.conv5_1(self.pad(out['p4'])) if not(finished()) else None 134 | out['r51'] = F.relu(out['c51']) if not(finished()) else None 135 | out['c52'] = self.conv5_2(self.pad(out['r51'])) if not(finished()) else None 136 | out['r52'] = F.relu(out['c52']) if not(finished()) else None 137 | out['c53'] = self.conv5_3(self.pad(out['r52'])) if not(finished()) else None 138 | out['r53'] = F.relu(out['c53']) if not(finished()) else None 139 | out['c54'] = self.conv5_4(self.pad(out['r53'])) if not(finished()) else None 140 | out['r54'] = F.relu(out['c54']) if not(finished()) else None 141 | out['p5'] = self.pool5(out['r54']) if not(finished()) else None 142 | return [out[key] for key in out_keys] 143 | -------------------------------------------------------------------------------- /mood/dpa/inference_evaluate_3d.py: -------------------------------------------------------------------------------- 1 | import nibabel as nib 2 | 3 | import argparse 4 | import yaml 5 | import os 6 | import torch 7 | import tqdm 8 | from sklearn.metrics import roc_auc_score, average_precision_score 9 | import numpy as np 10 | import pandas as pd 11 | import PIL.Image 12 | import monai 13 | 14 | from mood.dpa.evaluate import save_sample_score 15 | from mood.dpa.train import DeepPerceptualAutoencoder 16 | from mood.utils.datasets import DatasetType, DATASETS 17 | from mood.utils.transforms import TRANSFORMS 18 | from mood.dpa.rec_losses import ReconstructionLossType 19 | from mood.dpa.pg_rec_losses import PG_RECONSTRUCTION_LOSSES 20 | 21 | 22 | def main(config): 23 | verbose = config['verbose'] 24 | results_root = config['results_root'] 25 | model_path = config['test_model_path'] 26 | save_inference = config['save_inference'] 27 | 28 | along_axis = config['apply_along_axis'] 29 | test_image_rec_loss = config.get('test_image_rec_loss') 30 | score_reduction = config['score_reduction'] 31 | delete_zero_area = config['delete_zero_area'] 32 | do_not_process_small_area = config['do_not_process_small_area'] 33 | resize_3d_for_evaluation = config.get('resize_3d_for_evaluation') 34 | 35 | os.makedirs(results_root, exist_ok=True) 36 | 37 | if verbose: 38 | print(yaml.dump(config, default_flow_style=False)) 39 | 40 | enc, dec, image_rec_loss, (stage, resolution, progress, n_iter, mix_res_module) = \ 41 | DeepPerceptualAutoencoder.load_anomaly_detection_model(torch.load(model_path)) 42 | 43 | if test_image_rec_loss is not None: 44 | loss_type = config['test_image_rec_loss']['loss_type'] 45 | loss_kwargs = config['test_image_rec_loss']['loss_kwargs'] 46 | image_rec_loss = PG_RECONSTRUCTION_LOSSES[ReconstructionLossType[loss_type]]( 47 | max_resolution=resolution, **loss_kwargs) 48 | image_rec_loss.set_stage_resolution(stage, resolution) 49 | image_rec_loss.cuda() 50 | 51 | dataset_type = DatasetType(config['test_dataset']['dataset_type']) 52 | dataset_kwargs = config['test_dataset']['dataset_kwargs'] 53 | transform_kwargs = config['test_dataset']['transform_kwargs'] 54 | 55 | image_transform = TRANSFORMS[DatasetType.numpy2d](**transform_kwargs) 56 | 57 | dataset = DATASETS[dataset_type]( 58 | return_image_name=True, 59 | **dataset_kwargs 60 | ) 61 | 62 | results_root = config['results_root'] 63 | 64 | sample_true = [] 65 | pixel_true = [] 66 | 67 | sample_pred = [] 68 | pixel_pred = [] 69 | 70 | sample_output_dir = os.path.join(results_root, 'sample') 71 | pixel_output_dir = os.path.join(results_root, 'pixel') 72 | 73 | for image, mask, affine, name in tqdm.tqdm(dataset): 74 | tr_image = [] 75 | 76 | zeros_before = 0 77 | zeros_after = 0 78 | find_not_zeros = False 79 | for i in range(image.shape[along_axis]): 80 | slc = [slice(None)] * len(image.shape) 81 | slc[along_axis] = i 82 | 83 | if do_not_process_small_area: 84 | pil_image = image[slc].astype(np.float32) 85 | if pil_image.sum() > 4000: 86 | find_not_zeros = True 87 | pil_image = PIL.Image.fromarray(pil_image, mode='F') 88 | tr_image.append(image_transform(pil_image)) 89 | else: 90 | if find_not_zeros: 91 | zeros_after += 1 92 | else: 93 | zeros_before += 1 94 | else: 95 | pil_image = image[slc].astype(np.float32) 96 | pil_image = PIL.Image.fromarray(pil_image, mode='F') 97 | tr_image.append(image_transform(pil_image)) 98 | 99 | tr_image = torch.stack(tr_image, dim=0).cuda() 100 | 101 | with torch.no_grad(): 102 | # TODO: FIX IT. this hack is for testing large 3d scans (in order to fit in 12 Gb GPU memory) 103 | MAX_BATCH = 128 104 | if len(tr_image) < MAX_BATCH: 105 | rec_image = dec(enc(tr_image)).detach() 106 | else: 107 | rec_image = None 108 | for i in range(0, len(tr_image), MAX_BATCH): 109 | y = dec(enc(tr_image[i: i + MAX_BATCH])).detach() 110 | if rec_image is None: 111 | rec_image = y 112 | else: 113 | rec_image = torch.cat((rec_image, y), dim=0) 114 | # END todo. simple solution: rec_image = dec(enc(tr_image)).detach() 115 | 116 | tr_image = tr_image.squeeze(1).unsqueeze(0).unsqueeze(0) 117 | rec_image = rec_image.squeeze(1).unsqueeze(0).unsqueeze(0) 118 | 119 | image_rec_loss.set_reduction('pixelwise') 120 | pred = image_rec_loss(tr_image, rec_image) 121 | pred = pred.squeeze(0).squeeze(0).unsqueeze(1) 122 | 123 | assert pred.size(1) == 1 124 | pred = pred[:, 0] 125 | 126 | if do_not_process_small_area: 127 | slice_shape = pred.shape[1:] 128 | pred = torch.cat(( 129 | torch.zeros((zeros_before, *slice_shape)).cuda(), 130 | pred, 131 | torch.zeros((zeros_after, *slice_shape)).cuda() 132 | )) 133 | 134 | if along_axis == 0: 135 | pass 136 | elif along_axis == 1: 137 | pred = pred.permute(1, 0, 2) 138 | elif along_axis == 2: 139 | pred = pred.permute(1, 2, 0) 140 | else: 141 | raise NotImplementedError() 142 | pred = pred.unsqueeze(0).unsqueeze(0).detach().cpu() 143 | 144 | pred = torch.nn.functional.interpolate(pred, mode='trilinear', size=image.shape) 145 | pred = pred.squeeze(0).squeeze(0) 146 | pred = pred.detach().numpy() 147 | 148 | if delete_zero_area: 149 | pred = pred * (image > 0) 150 | 151 | if save_inference: 152 | save_3d_pixel_score(pixel_output_dir, pred, affine, name) 153 | 154 | if score_reduction == 'mean': 155 | anomaly_score = pred.mean() 156 | elif score_reduction == 'max': 157 | anomaly_score = pred.max() 158 | else: 159 | raise NotImplementedError() 160 | save_sample_score(sample_output_dir, [anomaly_score], [name]) 161 | 162 | if resize_3d_for_evaluation is not None: 163 | transform = monai.transforms.Resize( 164 | (resize_3d_for_evaluation, resize_3d_for_evaluation, resize_3d_for_evaluation), 165 | mode='trilinear') 166 | mask = transform(mask[None]).squeeze(0) 167 | pred = transform(pred[None]).squeeze(0) 168 | 169 | sample_true.append(mask.sum() > 0) 170 | sample_pred.append(anomaly_score) 171 | pixel_true.append(mask.flatten() > 0.5) 172 | pixel_pred.append(pred.flatten()) 173 | 174 | sample_true = np.array(sample_true).astype(np.bool) 175 | sample_pred = np.array(sample_pred) 176 | pixel_true = np.concatenate(pixel_true).astype(np.bool) 177 | pixel_pred = np.concatenate(pixel_pred) 178 | 179 | ap_sample = average_precision_score(sample_true, sample_pred) 180 | print("AP sample", ap_sample) 181 | auc_sample = roc_auc_score(sample_true, sample_pred) 182 | print("AUC sample", auc_sample) 183 | 184 | ap_pixel = average_precision_score(pixel_true, pixel_pred) 185 | print("AP pixel", ap_pixel) 186 | auc_pixel = roc_auc_score(pixel_true, pixel_pred) 187 | print("AUC pixel", auc_pixel) 188 | 189 | scores = pd.DataFrame( 190 | [[ap_sample, auc_sample], 191 | [ap_pixel, auc_pixel]], 192 | columns=['AP', 'ROC AUC'], index=['sample', 'pixel'] 193 | ) 194 | 195 | print(scores) 196 | 197 | scores.to_csv(os.path.join(results_root, 'scores.csv')) 198 | 199 | 200 | def save_3d_pixel_score(output_dir, score, affine, image_name): 201 | os.makedirs(output_dir, exist_ok=True) 202 | final_nimg = nib.Nifti1Image(score, affine=affine) 203 | nib.save(final_nimg, os.path.join(output_dir, image_name)) 204 | 205 | 206 | if __name__ == '__main__': 207 | parser = argparse.ArgumentParser() 208 | parser.add_argument('configs', type=str, nargs='*', help='Path to eval config') 209 | 210 | args = parser.parse_args() 211 | 212 | for config_path in args.configs: 213 | with open(config_path, 'r') as stream: 214 | config = yaml.load(stream, Loader=yaml.FullLoader) 215 | 216 | main(config) 217 | -------------------------------------------------------------------------------- /mood/dpa/layers.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | from torch import nn, nn as nn 4 | 5 | 6 | def get_norm_layer(type, **kwargs): 7 | mode_3d = kwargs.get('mode_3d', False) 8 | if mode_3d == False: 9 | if type == 'none': 10 | return [] 11 | elif type == 'bn': 12 | return [nn.BatchNorm2d(kwargs['num_features'])] 13 | elif type == 'in': 14 | return [nn.InstanceNorm2d(kwargs['num_features'])] 15 | else: 16 | raise NotImplementedError("Unknown type: {}".format(type)) 17 | else: 18 | if type == 'none': 19 | return [] 20 | elif type == 'bn': 21 | return [nn.BatchNorm3d(kwargs['num_features'])] 22 | elif type == 'in': 23 | return [nn.InstanceNorm3d(kwargs['num_features'])] 24 | else: 25 | raise NotImplementedError("Unknown type: {}".format(type)) 26 | 27 | 28 | def get_act_layer(type, **kwargs): 29 | if type == 'relu': 30 | return [nn.ReLU()] 31 | elif type == 'leaky_relu': 32 | return [nn.LeakyReLU(kwargs.get('negative_slope', 0.2), inplace=False)] 33 | elif type == 'tanh': 34 | return [nn.Tanh()] 35 | elif type == 'sigmoid': 36 | return [nn.Sigmoid()] 37 | elif type == 'linear': 38 | return [] 39 | else: 40 | raise NotImplementedError("Unknown type: {}".format(type)) 41 | 42 | 43 | def get_pool_layer(type, **kwargs): 44 | mode_3d = kwargs.get('mode_3d', False) 45 | if mode_3d == False: 46 | if type == 'avg': 47 | return [nn.AvgPool2d(kwargs.get('kernel_size', 2), kwargs.get('stride', 2))] 48 | elif type == 'max': 49 | return [nn.MaxPool2d(kwargs.get('kernel_size', 2), kwargs.get('stride', 2))] 50 | else: 51 | raise NotImplementedError("Unknown type: {}".format(type)) 52 | else: 53 | if type == 'avg': 54 | return [nn.AvgPool3d(kwargs.get('kernel_size', 2), kwargs.get('stride', 2))] 55 | elif type == 'max': 56 | return [nn.MaxPool3d(kwargs.get('kernel_size', 2), kwargs.get('stride', 2))] 57 | else: 58 | raise NotImplementedError("Unknown type: {}".format(type)) 59 | 60 | 61 | class Noise(nn.Module): 62 | def __init__(self, incoming): 63 | super(Noise, self).__init__() 64 | assert isinstance(incoming, nn.Conv2d) 65 | 66 | self.scale = nn.Parameter(torch.zeros(incoming.out_channels), requires_grad=True) 67 | 68 | self.bias = None 69 | if incoming.bias is not None: 70 | self.bias = incoming.bias 71 | incoming.bias = None 72 | 73 | def forward(self, x): 74 | noise = x.new_tensor(torch.rand(x.shape[0], 1, x.shape[2], x.shape[3])) 75 | x = x + noise * self.scale.reshape(1, -1, 1, 1) 76 | if self.bias is not None: 77 | x += self.bias.view(1, self.bias.size()[0], 1, 1) 78 | return x 79 | 80 | def __repr__(self): 81 | return self.__class__.__name__ + '()' 82 | 83 | 84 | class ConvBlock(nn.Module): 85 | def __init__(self, in_channels, out_channels, kernel_size, stride, padding, pad_type='zero', norm='none', 86 | act='linear', mode_3d=False): 87 | super(ConvBlock, self).__init__() 88 | leaky_relu_param = 0.2 89 | layers = [] 90 | 91 | if pad_type == 'reflect': 92 | layers.append(nn.ReflectionPad2d(padding)) 93 | padding = 0 94 | elif pad_type == 'zero': 95 | pass 96 | else: 97 | raise NotImplementedError 98 | 99 | conv_func = nn.Conv2d if mode_3d == False else nn.Conv3d 100 | conv = conv_func(in_channels, out_channels, kernel_size, stride, padding) 101 | nn.init.xavier_uniform_(conv.weight, gain=nn.init.calculate_gain(act, param=leaky_relu_param)) 102 | layers.append(conv) 103 | 104 | layers += get_norm_layer(norm, num_features=out_channels, mode_3d=mode_3d) 105 | layers += get_act_layer(act, negative_slope=leaky_relu_param) 106 | 107 | self.model = nn.Sequential(*layers) 108 | 109 | def forward(self, inputs): 110 | return self.model(inputs) 111 | 112 | 113 | class ResBlock(nn.Module): 114 | def __init__(self, input_dim, output_dim, norm='bn', act='relu', pad_type='zero', lambd=1, mode_3d=False): 115 | super(ResBlock, self).__init__() 116 | self.lambd = lambd 117 | 118 | model = [] 119 | model += [ConvBlock(input_dim, output_dim, 3, 1, 1, norm=norm, act=act, pad_type=pad_type,mode_3d=mode_3d)] 120 | model += [ConvBlock(output_dim, output_dim, 3, 1, 1, norm='none', act='linear', pad_type=pad_type, mode_3d=mode_3d)] 121 | self.model = nn.Sequential(*model) 122 | 123 | if input_dim == output_dim: 124 | self.skipcon = nn.Sequential() 125 | else: 126 | self.skipcon = ConvBlock(input_dim, output_dim, 1, 1, 0, norm='none', act='linear', 127 | pad_type=pad_type, mode_3d=mode_3d) 128 | 129 | def forward(self, x): 130 | return self.skipcon(x) + self.lambd * self.model(x) 131 | 132 | 133 | class PreActResnetBlock(nn.Module): 134 | def __init__(self, input_dim, output_dim, norm='bn', act='relu', pad_type='zero', lambd=1, mode_3d=False): 135 | super(PreActResnetBlock, self).__init__() 136 | self.lambd = lambd 137 | 138 | model = [] 139 | model += get_norm_layer(norm, num_features=input_dim, mode_3d=mode_3d) 140 | model += get_act_layer(act) 141 | model += [ 142 | ConvBlock(input_dim, output_dim, 3, 1, 1, norm=norm, act=act, pad_type=pad_type, mode_3d=mode_3d), 143 | ConvBlock(output_dim, output_dim, 3, 1, 1, norm='none', act='linear', pad_type=pad_type, mode_3d=mode_3d) 144 | ] 145 | self.model = nn.Sequential(*model) 146 | 147 | if input_dim == output_dim: 148 | self.skipcon = nn.Sequential() 149 | else: 150 | self.skipcon = ConvBlock(input_dim, output_dim, 1, 1, 0, norm='none', act='linear', 151 | pad_type=pad_type, mode_3d=mode_3d) 152 | 153 | def forward(self, x): 154 | return self.skipcon(x) + self.lambd * self.model(x) 155 | 156 | 157 | class PreActResnetBlockUp(nn.Module): 158 | def __init__(self, input_dim, output_dim, norm='bn', act='relu', pad_type='zero', upsample_mode='nearest', mode_3d=False): 159 | super(PreActResnetBlockUp, self).__init__() 160 | 161 | model = [] 162 | model += get_norm_layer(norm, num_features=input_dim, mode_3d=mode_3d) 163 | model += get_act_layer(act) 164 | model += [ 165 | nn.Upsample(scale_factor=2, mode=upsample_mode), 166 | ConvBlock(input_dim, output_dim, 3, 1, 1, norm=norm, act=act, pad_type=pad_type, mode_3d=mode_3d), 167 | ConvBlock(output_dim, output_dim, 3, 1, 1, norm='none', act='linear', pad_type=pad_type, mode_3d=mode_3d) 168 | ] 169 | self.model = nn.Sequential(*model) 170 | 171 | skipcon = [nn.Upsample(scale_factor=2, mode='nearest')] 172 | if input_dim != output_dim: 173 | skipcon += [ConvBlock(input_dim, output_dim, 1, 1, 0, norm='none', act='linear', 174 | pad_type=pad_type, mode_3d=mode_3d)] 175 | self.skipcon = nn.Sequential(*skipcon) 176 | 177 | def forward(self, x): 178 | return self.skipcon(x) + self.model(x) 179 | 180 | 181 | class PreActResnetBlockDown(nn.Module): 182 | def __init__(self, input_dim, output_dim, norm='bn', act='relu', pad_type='zero', pool='avg', mode_3d=False): 183 | super(PreActResnetBlockDown, self).__init__() 184 | 185 | model = [] 186 | model += get_norm_layer(norm, num_features=input_dim, mode_3d=mode_3d) 187 | model += get_act_layer(act) 188 | model += get_pool_layer(pool, kernel_size=2, stride=2, mode_3d=mode_3d) 189 | model += [ 190 | ConvBlock(input_dim, output_dim, 3, 1, 1, norm=norm, act=act, pad_type=pad_type, mode_3d=mode_3d), 191 | ConvBlock(output_dim, output_dim, 3, 1, 1, norm='none', act='linear', pad_type=pad_type, mode_3d=mode_3d), 192 | ] 193 | self.model = nn.Sequential(*model) 194 | 195 | skipcon = get_pool_layer(pool, kernel_size=2, stride=2, mode_3d=mode_3d) 196 | if input_dim != output_dim: 197 | skipcon += [ConvBlock(input_dim, output_dim, 1, 1, 0, norm='none', act='linear', 198 | pad_type=pad_type, mode_3d=mode_3d)] 199 | self.skipcon = nn.Sequential(*skipcon) 200 | 201 | def forward(self, x): 202 | return self.skipcon(x) + self.model(x) 203 | 204 | 205 | class EqualLayer(nn.Module): 206 | def forward(self, x): 207 | return x 208 | 209 | 210 | class ConcatLayer(nn.Module): 211 | def __init__(self, layer1, layer2): 212 | super(ConcatLayer, self).__init__() 213 | self.layer1 = layer1 214 | self.layer2 = layer2 215 | 216 | def forward(self, x): 217 | y = [self.layer1(x), self.layer2(x)] 218 | return y 219 | 220 | 221 | class FadeinLayer(nn.Module): 222 | def __init__(self, ): 223 | super(FadeinLayer, self).__init__() 224 | self._alpha = torch.nn.Parameter(torch.Tensor([0.0]), requires_grad=False) 225 | 226 | def set_progress(self, alpha): 227 | self._alpha.data[0] = np.clip(alpha, 0, 1.0) 228 | 229 | def get_progress(self): 230 | return self._alpha.data.cpu().item() 231 | 232 | def forward(self, x): 233 | return torch.add(x[0].mul(1.0 - self._alpha), x[1].mul(self._alpha)) 234 | 235 | def __repr__(self): 236 | return self.__class__.__name__ + '(get_alpha = {:.2f})'.format(self._alpha.data[0]) -------------------------------------------------------------------------------- /mood/dpa/main.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import yaml 3 | import time 4 | 5 | from mood.dpa import train, evaluate, inference_evaluate_3d 6 | 7 | if __name__ == '__main__': 8 | start_time = time.time() 9 | parser = argparse.ArgumentParser() 10 | parser.add_argument('action', type=str, choices=['train', 'evaluate', 'inference_evaluate_3d']) 11 | parser.add_argument('config', type=str, help='Path to config') 12 | 13 | args = parser.parse_args() 14 | 15 | action = args.action 16 | config_path = args.config 17 | 18 | with open(args.config[0], 'r') as stream: 19 | config = yaml.load(stream, Loader=yaml.FullLoader) 20 | 21 | if action == 'train': 22 | train.main(config) 23 | elif action == 'evaluate': 24 | evaluate.main(config) 25 | elif action == 'inference_evaluate_3d': 26 | inference_evaluate_3d.main(config) 27 | else: 28 | raise NotImplementedError() 29 | 30 | print(f"Finished. Took: {(time.time() - start_time) / 60:.02f}m") 31 | -------------------------------------------------------------------------------- /mood/dpa/optimizer.py: -------------------------------------------------------------------------------- 1 | from torch import nn 2 | from torch.optim import Adam 3 | 4 | 5 | class Optimizer(nn.Module): 6 | def __init__(self, enc_params, dec_params, image_rec_loss, adam_kwargs): 7 | super(Optimizer, self).__init__() 8 | 9 | self.adam_kwargs = adam_kwargs 10 | self.image_rec_loss = image_rec_loss 11 | self.opt = None 12 | 13 | self.set_new_params(enc_params, dec_params) 14 | 15 | def set_new_params(self, enc_params, dec_params, image_rec_loss=None): 16 | 17 | def preprocess(params): 18 | params = [p for p in params if p.requires_grad] 19 | return params 20 | 21 | params = preprocess(enc_params) + preprocess(dec_params) 22 | self.opt = Adam(params, **self.adam_kwargs) 23 | 24 | if image_rec_loss is not None: 25 | self.image_rec_loss = image_rec_loss 26 | 27 | def compute_loss(self, real_x, rec_x, update_parameters=True): 28 | loss = self.image_rec_loss(real_x, rec_x) 29 | 30 | if update_parameters: 31 | if loss.item() > 1e9: 32 | raise ValueError("Too large value of loss function (>10^9)!") 33 | 34 | self.opt.zero_grad() 35 | loss.backward() 36 | self.opt.step() 37 | 38 | loss_info = {"image_rec_loss": loss.item()} 39 | return loss_info 40 | -------------------------------------------------------------------------------- /mood/dpa/pg_decoders.py: -------------------------------------------------------------------------------- 1 | import math 2 | from abc import ABC, abstractmethod 3 | from collections import OrderedDict 4 | 5 | from torch import nn 6 | 7 | from mood.dpa.pg_networks import ProgGrowNetworks, ProgGrowStageType, STABNetwork, TRNSNetwork, \ 8 | NetworkType 9 | from mood.dpa.layers import ConvBlock, get_act_layer, PreActResnetBlock, PreActResnetBlockUp, ConcatLayer, \ 10 | FadeinLayer 11 | 12 | 13 | class AbstractDecoderNetworks(ProgGrowNetworks, ABC): 14 | """ 15 | The architecture of network is taken from 16 | 17 | Karras, Tero, et al. 18 | "Progressive growing of gans for improved quality, stability, and variation." 19 | arXiv preprint arXiv:1710.10196 (2017). 20 | """ 21 | 22 | def __init__(self, input_res, max_output_res, input_dim, output_dim, inner_dims, normalize_latents=False, 23 | norm='none', pad_type='zero', upsample_mode='nearest', mode_3d=False): 24 | """ 25 | API allows create Decoder which takes tensor as input. 26 | For example, input tensor may be 16x4x4 (input_res = 4, input_dim=16). 27 | But input_res must be degree of 2(except for 2): 1, 4, 8, 16, 32, ... 28 | 29 | inner_dims -- list of depths of convolution layers. 30 | 31 | """ 32 | super().__init__() 33 | 34 | self.input_res = input_res 35 | self.max_output_res = max_output_res 36 | self.input_dim = input_dim 37 | self.output_dim = output_dim 38 | self.inner_dims = inner_dims 39 | 40 | self.norm = norm 41 | self.pad_type = pad_type 42 | self.upsample_mode = upsample_mode 43 | self.mode_3d = mode_3d 44 | 45 | self._create_networks() 46 | 47 | def _create_networks(self): 48 | res_blocks = OrderedDict() 49 | 50 | # next resolution 51 | resolution = self._get_first_block_resolution() 52 | res_blocks['res_{}'.format(resolution)] = self._get_first_block() 53 | prev_postprocess = self._get_rgb_block(resolution) 54 | 55 | stab_model = nn.Sequential(res_blocks) 56 | stab_model.add_module('postprocess_res_{}'.format(resolution), prev_postprocess) 57 | self.set_net(ProgGrowStageType.stab, resolution, STABNetwork(stab_model)) 58 | 59 | resolution *= 2 60 | while resolution <= self.max_output_res: 61 | # trns dpa 62 | trns_model = nn.Sequential(res_blocks) 63 | 64 | low_resl = nn.Sequential(OrderedDict([ 65 | ('postprocess_res_{}'.format(resolution // 2), prev_postprocess), 66 | ('from_res_{}_to_res_{}'.format(resolution // 2, resolution), 67 | nn.Upsample(scale_factor=2, mode=self.upsample_mode)) 68 | ])) 69 | 70 | new_stage_block = self._get_intermediate_block(resolution) 71 | new_post_process = self._get_rgb_block(resolution) 72 | 73 | high_resl = nn.Sequential() 74 | high_resl.add_module('res_{}'.format(resolution), new_stage_block) 75 | high_resl.add_module('postprocess_res_{}'.format(resolution), new_post_process) 76 | 77 | trns_model.add_module('concat', ConcatLayer(low_resl, high_resl)) 78 | trns_model.add_module('fadein', FadeinLayer()) 79 | self.set_net(ProgGrowStageType.trns, resolution, TRNSNetwork(trns_model)) 80 | 81 | # stab dpa 82 | res_blocks['res_{}'.format(resolution)] = new_stage_block 83 | prev_postprocess = new_post_process 84 | 85 | stab_model = nn.Sequential(res_blocks) 86 | stab_model.add_module('postprocess_res_{}'.format(resolution), prev_postprocess) 87 | self.set_net(ProgGrowStageType.stab, resolution, STABNetwork(stab_model)) 88 | resolution *= 2 89 | 90 | def _get_num_filters(self, resolution): 91 | stage = int(math.log(resolution, 2)) 92 | init_stage = int(math.log(self._get_first_block_resolution(), 2)) 93 | 94 | return self.inner_dims[stage - init_stage] 95 | 96 | def _get_first_block_resolution(self): 97 | if self.input_res == 1: 98 | return 4 99 | else: 100 | return self.input_res 101 | 102 | @abstractmethod 103 | def _get_first_block(self): 104 | pass 105 | 106 | @abstractmethod 107 | def _get_intermediate_block(self, resolution): 108 | pass 109 | 110 | @abstractmethod 111 | def _get_rgb_block(self, resolution): 112 | pass 113 | 114 | 115 | class Resnet9DecoderNetworks(AbstractDecoderNetworks): 116 | @staticmethod 117 | def _init_layers(model): 118 | for m in model.modules(): 119 | if isinstance(m, (nn.Conv2d, nn.Conv3d)): 120 | nn.init.kaiming_normal_(m.weight, mode='fan_out', 121 | nonlinearity='leaky_relu', a=0.2) 122 | elif isinstance(m, (nn.BatchNorm2d, nn.BatchNorm3d, nn.GroupNorm)): 123 | nn.init.constant_(m.weight, 1) 124 | nn.init.constant_(m.bias, 0) 125 | return model 126 | 127 | def _get_block(self, block, prev_nf, nf): 128 | return block(prev_nf, nf, norm=self.norm, act='leaky_relu', pad_type=self.pad_type, mode_3d=self.mode_3d) 129 | 130 | def _get_first_block(self): 131 | resolution = self._get_first_block_resolution() 132 | nf = self._get_num_filters(resolution) 133 | 134 | if self.input_res == 1: 135 | layers = get_act_layer('leaky_relu') \ 136 | + [ConvBlock(self.input_dim, nf, kernel_size=4, stride=1, padding=3, norm='none', 137 | act='linear', pad_type='zero', mode_3d=self.mode_3d)] 138 | else: 139 | layers = [self._get_block(PreActResnetBlock, self.input_dim, nf)] 140 | 141 | return self._init_layers(nn.Sequential(*layers)) 142 | 143 | def _get_intermediate_block(self, resolution): 144 | prev_nf = self._get_num_filters(resolution / 2) 145 | nf = self._get_num_filters(resolution) 146 | layers = [ 147 | self._get_block(PreActResnetBlockUp, prev_nf, nf), 148 | ] 149 | return self._init_layers(nn.Sequential(*layers)) 150 | 151 | def _get_rgb_block(self, resolution): 152 | nf = self._get_num_filters(resolution) 153 | layers = get_act_layer('leaky_relu') 154 | layers += [ConvBlock(nf, self.output_dim, kernel_size=3, stride=1, padding=1, norm='none', 155 | act='linear', pad_type=self.pad_type, mode_3d=self.mode_3d)] 156 | 157 | return self._init_layers(nn.Sequential(*layers)) 158 | 159 | 160 | class Resnet18DecoderNetworks(Resnet9DecoderNetworks): 161 | def _get_intermediate_block(self, resolution): 162 | prev_nf = self._get_num_filters(resolution / 2) 163 | nf = self._get_num_filters(resolution) 164 | layers = [ 165 | self._get_block(PreActResnetBlockUp, prev_nf, nf), 166 | self._get_block(PreActResnetBlock, nf, nf), 167 | ] 168 | return self._init_layers(nn.Sequential(*layers)) 169 | 170 | 171 | DECODER_NETWORKS = { 172 | NetworkType.residual9: Resnet9DecoderNetworks, 173 | NetworkType.residual18: Resnet18DecoderNetworks, 174 | } -------------------------------------------------------------------------------- /mood/dpa/pg_encoders.py: -------------------------------------------------------------------------------- 1 | import math 2 | from abc import ABC, abstractmethod 3 | from collections import OrderedDict 4 | 5 | from torch import nn 6 | 7 | from mood.dpa.pg_networks import ProgGrowNetworks, ProgGrowStageType, \ 8 | STABNetwork, TRNSNetwork, NetworkType 9 | from mood.dpa.layers import PreActResnetBlockDown, ConvBlock, PreActResnetBlock, get_act_layer, \ 10 | get_pool_layer, ConcatLayer, FadeinLayer 11 | 12 | 13 | class AbstractEncoderNetworks(ProgGrowNetworks, ABC): 14 | def __init__(self, max_input_res, output_res, input_dim, output_dim, inner_dims, 15 | norm='none', pad_type='zero', pool='avg', mode_3d=False): 16 | """ 17 | API allows create Encoder which output is tensor. 18 | For example, output tensor may be 16x4x4 (output_res = 4, output_dim=16). 19 | But output_res must be degree of 2(except for 2): 1, 4, 8, 16, 32, ... 20 | 21 | inner_dims -- list of depths of inner convolution layers. 22 | 23 | """ 24 | super().__init__() 25 | 26 | self.max_input_res = max_input_res 27 | self.output_res = output_res 28 | 29 | self.input_dim = input_dim 30 | self.output_dim = output_dim 31 | self.inner_dims = inner_dims 32 | 33 | self.norm = norm 34 | self.pad_type = pad_type 35 | self.pool = pool 36 | self.mode_3d = mode_3d 37 | 38 | self._create_networks() 39 | 40 | def _create_networks(self): 41 | res_blocks = [] 42 | 43 | # next resolution 44 | resolution = self._get_last_block_resolution() 45 | res_blocks += [('res_{}'.format(resolution), self._get_last_block())] 46 | prev_preprocess = self._get_rgb_block(resolution) 47 | 48 | layers = OrderedDict([('preprocess_res_{}'.format(resolution), prev_preprocess)] + res_blocks) 49 | stab_model = nn.Sequential(layers) 50 | self.set_net(ProgGrowStageType.stab, resolution, STABNetwork(stab_model)) 51 | 52 | resolution *= 2 53 | while resolution <= self.max_input_res: 54 | # trns network 55 | 56 | low_res = nn.Sequential(OrderedDict([ 57 | ('from_res_{}_to_res_{}'.format(resolution, resolution // 2), 58 | get_pool_layer(self.pool, kernel_size=2, stride=2, mode_3d=self.mode_3d)[0]), 59 | ('preprocess_res_{}'.format(resolution // 2), prev_preprocess) 60 | ])) 61 | 62 | new_preprocess = self._get_rgb_block(resolution) 63 | new_res_block = self._get_intermediate_block(resolution) 64 | 65 | high_res = nn.Sequential() 66 | high_res.add_module('preprocess_res_{}'.format(resolution), new_preprocess) 67 | high_res.add_module('res_{}'.format(resolution), new_res_block) 68 | 69 | layers = [ 70 | ('concat', ConcatLayer(low_res, high_res)), 71 | ('fadein', FadeinLayer()) 72 | ] + res_blocks 73 | 74 | trns_model = nn.Sequential(OrderedDict(layers)) 75 | self.set_net(ProgGrowStageType.trns, resolution, TRNSNetwork(trns_model)) 76 | 77 | # stab network 78 | prev_preprocess = new_preprocess 79 | res_blocks.insert(0, ('res_{}'.format(resolution), new_res_block)) 80 | 81 | layers = [('preprocess_res_{}'.format(resolution), prev_preprocess)] + res_blocks 82 | stab_model = nn.Sequential(OrderedDict(layers)) 83 | self.set_net(ProgGrowStageType.stab, resolution, STABNetwork(stab_model)) 84 | 85 | resolution *= 2 86 | 87 | def _get_last_block_resolution(self): 88 | if self.output_res == 1: 89 | return 4 90 | else: 91 | return self.output_res 92 | 93 | def _get_num_filters(self, resolution): 94 | stage = int(math.log(resolution, 2)) 95 | last_stage = int(math.log(self._get_last_block_resolution(), 2)) 96 | 97 | return self.inner_dims[-(stage - last_stage) - 1] 98 | 99 | @abstractmethod 100 | def _get_rgb_block(self, resolution): 101 | pass 102 | 103 | @abstractmethod 104 | def _get_last_block(self): 105 | pass 106 | 107 | @abstractmethod 108 | def _get_intermediate_block(self, resolution): 109 | pass 110 | 111 | 112 | class Resnet9EncoderNetworks(AbstractEncoderNetworks): 113 | @staticmethod 114 | def _init_layers(model): 115 | for m in model.modules(): 116 | if isinstance(m, (nn.Conv2d, nn.Conv3d)): 117 | nn.init.kaiming_normal_(m.weight, mode='fan_out', 118 | nonlinearity='leaky_relu', a=0.2) 119 | elif isinstance(m, (nn.BatchNorm2d, nn.BatchNorm3d, nn.GroupNorm)): 120 | nn.init.constant_(m.weight, 1) 121 | nn.init.constant_(m.bias, 0) 122 | return model 123 | 124 | def _get_block(self, block, prev_nf, nf): 125 | return block(prev_nf, nf, norm=self.norm, act='leaky_relu', pad_type=self.pad_type, 126 | mode_3d=self.mode_3d) 127 | 128 | def _get_intermediate_block(self, resolution): 129 | prev_nf = self._get_num_filters(resolution) 130 | nf = self._get_num_filters(resolution // 2) 131 | layers = [ 132 | self._get_block(PreActResnetBlockDown, prev_nf, nf), 133 | ] 134 | return self._init_layers(nn.Sequential(*layers)) 135 | 136 | def _get_rgb_block(self, resolution): 137 | nf = self._get_num_filters(resolution) 138 | layers = [ 139 | ConvBlock(self.input_dim, nf, 140 | kernel_size=3, stride=1, padding=1, norm='none', act='linear', 141 | pad_type=self.pad_type, mode_3d=self.mode_3d), 142 | self._get_block(PreActResnetBlock, nf, nf), 143 | ] 144 | return self._init_layers(nn.Sequential(*layers)) 145 | 146 | def _get_last_block(self): 147 | layers = [] 148 | resolution = self._get_last_block_resolution() 149 | nf = self._get_num_filters(resolution) 150 | 151 | if self.output_res == 1: 152 | layers += get_act_layer('leaky_relu') 153 | layers += [ConvBlock(nf, self.output_dim, kernel_size=4, stride=1, padding=0, norm='none', 154 | pad_type=self.pad_type, act='linear', mode_3d=self.mode_3d)] 155 | else: 156 | layers += [self._get_block(PreActResnetBlock, nf, self.output_dim)] 157 | return self._init_layers(nn.Sequential(*layers)) 158 | 159 | 160 | class Resnet18EncoderNetworks(Resnet9EncoderNetworks): 161 | def _get_intermediate_block(self, resolution): 162 | prev_nf = self._get_num_filters(resolution) 163 | nf = self._get_num_filters(resolution // 2) 164 | layers = [ 165 | self._get_block(PreActResnetBlockDown, prev_nf, nf), 166 | self._get_block(PreActResnetBlock, nf, nf) 167 | ] 168 | return self._init_layers(nn.Sequential(*layers)) 169 | 170 | def _get_rgb_block(self, resolution): 171 | nf = self._get_num_filters(resolution) 172 | layers = [ 173 | ConvBlock(self.input_dim, nf, 174 | kernel_size=3, stride=1, padding=1, norm='none', act='linear', 175 | pad_type=self.pad_type, mode_3d=self.mode_3d), 176 | self._get_block(PreActResnetBlock, nf, nf), 177 | self._get_block(PreActResnetBlock, nf, nf), 178 | ] 179 | return self._init_layers(nn.Sequential(*layers)) 180 | 181 | 182 | ENCODER_NETWORKS = { 183 | NetworkType.residual9: Resnet9EncoderNetworks, 184 | NetworkType.residual18: Resnet18EncoderNetworks, 185 | } -------------------------------------------------------------------------------- /mood/dpa/pg_networks.py: -------------------------------------------------------------------------------- 1 | from collections import defaultdict 2 | from enum import Enum 3 | from abc import ABC, abstractmethod 4 | 5 | from torch import nn 6 | 7 | 8 | class ProgGrowStageType(Enum): 9 | trns = 'trns' # translation stage - increasing resolution twice 10 | stab = 'stab' # stabilization stage - training at a fixed resolution 11 | 12 | 13 | class NetworkType(Enum): 14 | residual9 = 'residual9' 15 | residual18 = 'residual18' 16 | 17 | 18 | class AbstractNetwork(nn.Module, ABC): 19 | def __init__(self, model): 20 | super().__init__() 21 | self.model = model 22 | 23 | @abstractmethod 24 | def get_progress(self): 25 | pass 26 | 27 | @abstractmethod 28 | def set_progress(self, progress): 29 | pass 30 | 31 | def forward(self, x): 32 | return self.model(x) 33 | 34 | 35 | class STABNetwork(AbstractNetwork): 36 | def __init__(self, model): 37 | super().__init__(model) 38 | 39 | def get_progress(self): 40 | return 1 41 | 42 | def set_progress(self, progress): 43 | pass 44 | 45 | 46 | class TRNSNetwork(AbstractNetwork): 47 | def __init__(self, model): 48 | super().__init__(model) 49 | try: 50 | model.fadein 51 | except Exception: 52 | raise ValueError('Model have to have Fadein layer, called "fadein"') 53 | 54 | def get_progress(self): 55 | return self.model.fadein.get_progress() 56 | 57 | def set_progress(self, progress): 58 | self.model.fadein.set_progress(progress) 59 | 60 | 61 | class ProgGrowNetworks(nn.Module): 62 | def __init__(self): 63 | super(ProgGrowNetworks, self).__init__() 64 | self._resolution_nets_dict = defaultdict(lambda: dict()) 65 | 66 | def get_net(self, stage, resolution): 67 | return self._resolution_nets_dict[resolution][stage] 68 | 69 | def set_net(self, stage, resolution, network): 70 | if stage == ProgGrowStageType.trns: 71 | assert isinstance(network, TRNSNetwork) 72 | elif stage == ProgGrowStageType.stab: 73 | assert isinstance(network, STABNetwork) 74 | else: 75 | raise ValueError("Stage: {} is not avaliable".format(stage)) 76 | self._resolution_nets_dict[resolution][stage] = network 77 | -------------------------------------------------------------------------------- /mood/dpa/pg_rec_losses.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from abc import ABC, abstractmethod 3 | 4 | from mood.dpa.rec_losses import L2Loss, L1Loss, ReconstructionLossType, PerceptualLoss 5 | from mood.dpa.pg_networks import ProgGrowStageType 6 | 7 | 8 | class AbstractPGLoss(torch.nn.Module, ABC): 9 | def __init__(self, max_resolution, mode_3d=False): 10 | super().__init__() 11 | 12 | self._resolution = max_resolution 13 | self._stage = ProgGrowStageType.stab 14 | self._progress = 0 15 | self._mode_3d = mode_3d 16 | 17 | @abstractmethod 18 | def set_stage_resolution(self, stage, resolution): 19 | pass 20 | 21 | @abstractmethod 22 | def set_progress(self, progress): 23 | pass 24 | 25 | @abstractmethod 26 | def forward(self, x, y): 27 | pass 28 | 29 | @abstractmethod 30 | def set_reduction(self, reduction): 31 | pass 32 | 33 | 34 | class PGPerceptualLoss(AbstractPGLoss): 35 | def __init__(self, max_resolution, weights_per_resolution, reduction='mean', 36 | use_smooth_pg=False, 37 | use_L1_norm=False, use_relative_error=False, 38 | pad_type='zero', 39 | path_to_vgg19_weights=None, 40 | normalize_to_vgg_input=True, 41 | imagenet_pretrained=False, 42 | mode_3d=False, 43 | mode_3d_apply_along_axes=[0]): 44 | super(PGPerceptualLoss, self).__init__(max_resolution, mode_3d) 45 | 46 | self._max_resolution = max_resolution 47 | self._weights_per_resolution = weights_per_resolution 48 | self._use_smooth_pg = use_smooth_pg 49 | self._reduction = reduction 50 | self._loss = PerceptualLoss(reduction=reduction, 51 | use_L1_norm=use_L1_norm, 52 | use_relative_error=use_relative_error, 53 | pad_type=pad_type, 54 | path_to_vgg19_weights=path_to_vgg19_weights, 55 | imagenet_pretrained=imagenet_pretrained, 56 | normalize_to_vgg_input=normalize_to_vgg_input, 57 | mode_3d=mode_3d, 58 | mode_3d_apply_along_axes=mode_3d_apply_along_axes) 59 | 60 | self._resolution = self._max_resolution 61 | self._stage = ProgGrowStageType.stab 62 | self._progress = 0 63 | 64 | def set_stage_resolution(self, stage, resolution): 65 | self._stage = stage 66 | self._resolution = resolution 67 | self._progress = 0 68 | 69 | def set_progress(self, progress): 70 | self._progress = progress 71 | 72 | def set_reduction(self, reduction): 73 | self._reduction = reduction 74 | self._loss.reduction = reduction 75 | 76 | def forward(self, x, y): 77 | self._loss.set_new_weights(**self._weights_per_resolution[self._resolution]) 78 | loss = self._loss(x, y) 79 | 80 | if self._use_smooth_pg: 81 | if self._stage == ProgGrowStageType.trns and self._progress < 1: 82 | prev_res = int(self._resolution / 2) 83 | self._loss.set_new_weights(**self._weights_per_resolution[prev_res]) 84 | 85 | x = torch.nn.functional.upsample(x, scale_factor=0.5, mode='bilinear') 86 | y = torch.nn.functional.upsample(y, scale_factor=0.5, mode='bilinear') 87 | 88 | prev_loss = self._loss(x, y) 89 | loss = (1 - self._progress) * prev_loss + self._progress * loss 90 | 91 | return loss 92 | 93 | 94 | class PGRelativePerceptualL1Loss(PGPerceptualLoss): 95 | def __init__(self, max_resolution, weights_per_resolution, reduction='mean', 96 | use_smooth_pg=False, pad_type='zero', mode_3d=False, 97 | mode_3d_apply_along_axes=[0], 98 | path_to_vgg19_weights=None, 99 | normalize_to_vgg_input=True, 100 | imagenet_pretrained=False): 101 | super().__init__(max_resolution, weights_per_resolution, reduction=reduction, 102 | use_smooth_pg=use_smooth_pg, 103 | use_L1_norm=True, use_relative_error=True, 104 | path_to_vgg19_weights=path_to_vgg19_weights, 105 | imagenet_pretrained=imagenet_pretrained, 106 | normalize_to_vgg_input=normalize_to_vgg_input, 107 | pad_type=pad_type, mode_3d=mode_3d, 108 | mode_3d_apply_along_axes=mode_3d_apply_along_axes) 109 | 110 | 111 | class PGL2Loss(AbstractPGLoss): 112 | def __init__(self, max_resolution, reduction='mean', mode_3d=False): 113 | super().__init__(max_resolution) 114 | self._loss = L2Loss(reduction=reduction, mode_3d=mode_3d) 115 | 116 | def set_stage_resolution(self, stage, resolution): 117 | pass 118 | 119 | def set_progress(self, progress): 120 | pass 121 | 122 | def set_reduction(self, reduction): 123 | self._loss.set_reduction(reduction) 124 | 125 | def forward(self, x, y): 126 | return self._loss(x, y) 127 | 128 | 129 | class PGL1Loss(AbstractPGLoss): 130 | def __init__(self, max_resolution, reduction='mean', mode_3d=False): 131 | super().__init__(max_resolution) 132 | self._loss = L1Loss(reduction=reduction, mode_3d=mode_3d) 133 | 134 | def set_stage_resolution(self, stage, resolution): 135 | pass 136 | 137 | def set_progress(self, progress): 138 | pass 139 | 140 | def set_reduction(self, reduction): 141 | self._loss.set_reduction(reduction) 142 | 143 | def forward(self, x, y): 144 | return self._loss(x, y) 145 | 146 | 147 | PG_RECONSTRUCTION_LOSSES = { 148 | ReconstructionLossType.perceptual: PGPerceptualLoss, 149 | ReconstructionLossType.relative_perceptual_L1: PGRelativePerceptualL1Loss, 150 | ReconstructionLossType.l1: PGL1Loss, 151 | ReconstructionLossType.l2: PGL2Loss, 152 | } 153 | 154 | 155 | 156 | -------------------------------------------------------------------------------- /mood/dpa/rec_losses.py: -------------------------------------------------------------------------------- 1 | from collections import OrderedDict 2 | from enum import Enum 3 | import torch 4 | from torch import nn 5 | from torch.nn import functional as F 6 | 7 | from mood.dpa.feature_extractor import PretrainedVGG19FeatureExtractor 8 | 9 | 10 | class L2Loss(nn.Module): 11 | def __init__(self, reduction='mean', mode_3d=False): 12 | super(L2Loss, self).__init__() 13 | assert reduction in ['none', 'sum', 'mean'] 14 | self._reduction = reduction 15 | self._mode_3d = mode_3d 16 | 17 | def set_reduction(self, reduction): 18 | assert reduction in ['none', 'sum', 'mean'] 19 | self._reduction = reduction 20 | 21 | def forward(self, x, y): 22 | loss = (x - y) * (x - y) 23 | 24 | if not self._mode_3d: 25 | loss = loss.sum(3).sum(2).sum(1) / (x.size(1) * x.size(2) * x.size(3)) 26 | else: 27 | loss = loss.sum(4).sum(3).sum(2).sum(1) / ( 28 | loss.size(1) * loss.size(2) * loss.size(3) * loss.size(4)) 29 | 30 | if self._reduction == 'none': 31 | return loss 32 | elif self._reduction == 'mean': 33 | return torch.mean(loss) 34 | else: 35 | return torch.sum(loss) 36 | 37 | 38 | class L1Loss(nn.Module): 39 | def __init__(self, reduction='none', mode_3d=False): 40 | super(L1Loss, self).__init__() 41 | assert reduction in ['none', 'sum', 'mean', 'pixelwise'] 42 | self._reduction = reduction 43 | self._mode_3d = mode_3d 44 | 45 | def set_reduction(self, reduction): 46 | assert reduction in ['none', 'sum', 'mean', 'pixelwise'] 47 | self._reduction = reduction 48 | 49 | def forward(self, x, y): 50 | loss = torch.abs(x - y) 51 | if self._reduction == 'pixelwise': 52 | return loss 53 | 54 | if not self._mode_3d: 55 | loss = loss.sum(3).sum(2).sum(1) / (loss.size(1) * loss.size(2) * loss.size(3)) 56 | else: 57 | loss = loss.sum(4).sum(3).sum(2).sum(1) / (loss.size(1) * loss.size(2) * loss.size(3) * loss.size(4)) 58 | 59 | if self._reduction == 'none': 60 | return loss 61 | elif self._reduction == 'mean': 62 | return torch.mean(loss) 63 | else: 64 | return torch.sum(loss) 65 | 66 | 67 | class PerceptualLoss(torch.nn.Module): 68 | def __init__(self, reduction='mean', img_weight=0, feature_weights=None, 69 | use_L1_norm=False, use_relative_error=False, 70 | pad_type='zero', 71 | path_to_vgg19_weights=None, 72 | imagenet_pretrained=False, 73 | normalize_to_vgg_input=True, 74 | mode_3d=False, mode_3d_apply_along_axes=[0]): 75 | super(PerceptualLoss, self).__init__() 76 | """ 77 | We assume that input is normalized with 0.5 mean and 0.5 std 78 | """ 79 | 80 | assert reduction in ['none', 'sum', 'mean', 'pixelwise'] 81 | 82 | self.vgg19_mean = torch.Tensor([0.485, 0.456, 0.406]) 83 | self.vgg19_std = torch.Tensor([0.229, 0.224, 0.225]) 84 | 85 | self.reduction = reduction 86 | 87 | self.use_L1_norm = use_L1_norm 88 | self.use_relative_error = use_relative_error 89 | self.mode_3d = mode_3d 90 | self.mode_3d_apply_along_axes = mode_3d_apply_along_axes 91 | self.normalize_to_vgg_input = normalize_to_vgg_input 92 | 93 | self.model = PretrainedVGG19FeatureExtractor( 94 | pad_type=pad_type, 95 | path_to_vgg19_weights=path_to_vgg19_weights, 96 | pretrained=imagenet_pretrained 97 | ) 98 | self.set_new_weights(img_weight, feature_weights) 99 | 100 | def set_reduction(self, reduction): 101 | self.reduction = reduction 102 | 103 | def forward(self, x, y): 104 | if not self.mode_3d: 105 | return self._forward(x, y) 106 | else: 107 | final_pred = None 108 | for axis in self.mode_3d_apply_along_axes: 109 | pred = self._forward(x, y, axis) 110 | if final_pred is None: 111 | final_pred = pred 112 | else: 113 | final_pred += pred 114 | return final_pred / len(self.mode_3d_apply_along_axes) 115 | 116 | def _forward(self, x, y, axis=None): 117 | if self.reduction == 'pixelwise': 118 | assert (len(self.feature_weights) + (self.img_weight != 0)) == 1 119 | 120 | layers = list(self.feature_weights.keys()) 121 | weights = list(self.feature_weights.values()) 122 | 123 | loss = None 124 | if self.img_weight != 0: 125 | loss = self.img_weight * self._loss(x, y) 126 | 127 | # preprocess 128 | if not self.mode_3d: 129 | x = self._preprocess(x) 130 | y = self._preprocess(y) 131 | 132 | f_x = self.model(x, layers) 133 | f_y = self.model(y, layers) 134 | else: 135 | if axis == 0: 136 | pass 137 | elif axis == 1: 138 | x = x.permute(0, 1, 3, 2, 4) 139 | y = y.permute(0, 1, 3, 2, 4) 140 | elif axis == 2: 141 | x = x.permute(0, 1, 4, 3, 2) 142 | y = y.permute(0, 1, 4, 3, 2) 143 | else: 144 | raise NotImplementedError() 145 | 146 | x_flat = x.reshape(-1, 1, *x.shape[3:]) 147 | y_flat = y.reshape(-1, 1, *y.shape[3:]) 148 | 149 | x_flat = self._preprocess(x_flat) 150 | y_flat = self._preprocess(y_flat) 151 | 152 | f_x = self.model(x_flat, layers) 153 | f_y = self.model(y_flat, layers) 154 | 155 | def convert(f_x, x_shape): 156 | f_x = [data.reshape((x_shape[0], x_shape[2], *data.shape[1:])) for data in f_x] 157 | f_x = [z.permute(0, 2, 1, 3, 4) for z in f_x] 158 | return f_x 159 | 160 | x_shape = x.shape 161 | f_x = convert(f_x, x_shape) 162 | f_y = convert(f_y, x_shape) 163 | 164 | # compute loss 165 | for i in range(len(f_x)): 166 | cur_loss = self._loss(f_x[i], f_y[i]) 167 | 168 | if loss is None: 169 | loss = weights[i] * cur_loss 170 | else: 171 | loss += weights[i] * cur_loss 172 | 173 | loss /= (self.img_weight + sum(weights)) 174 | 175 | if self.reduction == 'none': 176 | return loss 177 | elif self.reduction == 'mean': 178 | return loss.mean() 179 | elif self.reduction == 'sum': 180 | return loss.sum() 181 | elif self.reduction == 'pixelwise': 182 | loss = loss.unsqueeze(1) 183 | 184 | if self.mode_3d: 185 | mode='trilinear' 186 | else: 187 | mode = 'bilinear' 188 | loss = F.interpolate(loss, mode=mode, size=x.shape[2:]) 189 | 190 | if axis == 0: 191 | pass 192 | elif axis == 1: 193 | loss = loss.permute(0, 1, 3, 2, 4) 194 | elif axis == 2: 195 | loss = loss.permute(0, 1, 4, 3, 2) 196 | else: 197 | raise NotImplementedError() 198 | 199 | return loss 200 | else: 201 | raise NotImplementedError('Not implemented reduction: {:s}'.format(self.reduction)) 202 | 203 | def set_new_weights(self, img_weight=0, feature_weights=None): 204 | self.img_weight = img_weight 205 | if feature_weights is None: 206 | self.feature_weights = OrderedDict({}) 207 | else: 208 | self.feature_weights = OrderedDict(feature_weights) 209 | 210 | def _preprocess(self, x): 211 | assert len(x.shape) == 4 212 | 213 | if x.shape[1] != 3: 214 | x = x.expand(-1, 3, -1, -1) 215 | 216 | # denormalize 217 | vector = torch.Tensor([0.5, 0.5, 0.5]).reshape(1, 3, 1, 1).to(x.device) 218 | x = x * vector + vector 219 | 220 | # normalize 221 | if self.normalize_to_vgg_input: 222 | x = (x - self.vgg19_mean.reshape(1, 3, 1, 1).to(x.device)) / self.vgg19_std.reshape(1, 3, 1, 1).to(x.device) 223 | return x 224 | 225 | def _loss(self, x, y): 226 | if self.use_L1_norm: 227 | norm = lambda x: torch.abs(x) 228 | else: 229 | norm = lambda x: x * x 230 | 231 | diff = (x - y) 232 | if not self.use_relative_error: 233 | loss = norm(diff) 234 | else: 235 | if self.mode_3d: 236 | means = norm(x).mean(4).mean(3).mean(2).mean(1) 237 | means = means.detach() 238 | loss = norm(diff) / means.reshape((means.size(0), 1, 1, 1, 1)) 239 | else: 240 | means = norm(x).mean(3).mean(2).mean(1) 241 | means = means.detach() 242 | loss = norm(diff) / means.reshape((means.size(0), 1, 1, 1)) 243 | 244 | # perform reduction 245 | if self.reduction == 'pixelwise': 246 | return loss.mean(1) 247 | else: 248 | if self.mode_3d: 249 | return loss.mean(4).mean(3).mean(2).mean(1) 250 | else: 251 | return loss.mean(3).mean(2).mean(1) 252 | 253 | 254 | class ReconstructionLossType(Enum): 255 | perceptual = 'perceptual' 256 | relative_perceptual_L1 = 'relative_perceptual_L1' 257 | l1 = 'l1' 258 | l2 = 'l2' 259 | 260 | 261 | RECONSTRUCTION_LOSSES = { 262 | ReconstructionLossType.perceptual: PerceptualLoss, 263 | ReconstructionLossType.l1: L1Loss, 264 | ReconstructionLossType.l2: L2Loss 265 | } -------------------------------------------------------------------------------- /mood/dpa/train.py: -------------------------------------------------------------------------------- 1 | import os 2 | import argparse 3 | import tqdm 4 | import torch 5 | import numpy as np 6 | import yaml 7 | 8 | from mood.dpa.pg_networks import ProgGrowStageType, NetworkType 9 | from mood.dpa.pg_decoders import DECODER_NETWORKS 10 | from mood.dpa.pg_encoders import ENCODER_NETWORKS 11 | from mood.dpa.data_generators import ProgGrowImageGenerator, MixResolution 12 | from mood.utils.datasets import DatasetType, DATASETS 13 | from mood.utils.transforms import TRANSFORMS 14 | from mood.dpa.optimizer import Optimizer 15 | from mood.dpa.rec_losses import ReconstructionLossType 16 | from mood.dpa.pg_rec_losses import PG_RECONSTRUCTION_LOSSES 17 | from mood.utils.utils import save_yaml 18 | 19 | torch.backends.cudnn.deterministic = True 20 | 21 | 22 | class DeepPerceptualAutoencoder: 23 | def __init__(self, config): 24 | self.config = config 25 | self.verbose = config['verbose'] 26 | self.random_seed = config['random_seed'] 27 | self.finetune_from = config['finetune_from'] 28 | self.mode_3d = config.get('mode_3d', False) 29 | 30 | self.checkpoint_root = config['checkpoint_root'] 31 | self.log_root = config['log_root'] 32 | save_yaml(config, os.path.join(self.checkpoint_root, 'train_config.yaml')) 33 | save_yaml(config, os.path.join(self.log_root, 'train_config.yaml')) 34 | 35 | self.num_workers = config['num_workers'] 36 | self.max_image_res = config['max_image_res'] 37 | self.initial_image_res = config['initial_image_res'] 38 | self.image_dim = config['image_dim'] 39 | self.latent_res = config['latent_res'] 40 | self.latent_dim = config['latent_dim'] 41 | 42 | trns_iter = config['trns_iter'] 43 | stab_iter = config['stab_iter'] 44 | self.iters = { 45 | ProgGrowStageType.stab: stab_iter, 46 | ProgGrowStageType.trns: trns_iter 47 | } 48 | 49 | self.iters_per_res = { 50 | int(res): iters for res, iters in config.get('iters_per_res', {}).items() 51 | } 52 | # check iters_per_res 53 | error_msg = """" 54 | Incorrect 'iters_per_res' param. Correct example: 55 | iters_per_res: 56 | 64: 57 | trns: 1000 58 | stab: 2000 59 | """ 60 | for res, values in self.iters_per_res.items(): 61 | if not isinstance(values, dict): 62 | raise ValueError(error_msg) 63 | for stage, val in values.items(): 64 | if stage != 'stab' and stage != 'trns': 65 | raise ValueError(error_msg) 66 | 67 | self.log_iter = config['log_iter'] 68 | self.val_iter = config['val_iter'] 69 | self.early_stopping_patience = config['early_stopping_patience'] 70 | self.early_stopping_min_delta = config['early_stopping_min_delta'] 71 | self.image_sample_iter = config['image_sample_iter'] 72 | 73 | self.batch_sizes = {int(res): batch_size for res, batch_size in config['batch_sizes'].items()} 74 | # check batch sizes 75 | _res = self.initial_image_res 76 | while _res <= self.max_image_res: 77 | if _res not in self.batch_sizes.keys(): 78 | raise ValueError(f"The batch size isn't specified for the resolution {_res}") 79 | _res = int(_res * 2) 80 | 81 | "=========================================== initialize =======================================================" 82 | 83 | if self.verbose: 84 | print(yaml.dump(self.config, default_flow_style=False)) 85 | 86 | if self.random_seed is not None: 87 | torch.manual_seed(self.random_seed) 88 | np.random.seed(self.random_seed) 89 | 90 | self.stage = ProgGrowStageType.stab 91 | self.resolution = self.initial_image_res 92 | self.progress = 0 93 | self.n_iter = 0 94 | self.best_val_iter = 0 95 | self.best_val_loss = 10e6 96 | self.batch_size = int(self.batch_sizes[self.resolution]) 97 | 98 | from mood.utils.loggers import Logger 99 | os.makedirs(self.checkpoint_root, exist_ok=True) 100 | self.logger = Logger(self.log_root) 101 | 102 | "=========================================== create data dpa ================================================" 103 | 104 | dataset_type = config['train_dataset']['dataset_type'] 105 | dataset_kwargs = config['train_dataset']['dataset_kwargs'] 106 | transform_kwargs = config['train_dataset']['transform_kwargs'] 107 | 108 | transform = TRANSFORMS[DatasetType[dataset_type]](**transform_kwargs) 109 | dataset = DATASETS[DatasetType[dataset_type]]( 110 | transform=transform, 111 | **dataset_kwargs 112 | ) 113 | 114 | self.pg_image_gen = ProgGrowImageGenerator(dataset, self.max_image_res, 115 | batch_size=self.batch_size, 116 | num_workers=self.num_workers, inf=True, mode_3d=self.mode_3d) 117 | 118 | dataset_type = config['val_dataset']['dataset_type'] 119 | dataset_kwargs = config['val_dataset']['dataset_kwargs'] 120 | transform_kwargs = config['val_dataset']['transform_kwargs'] 121 | 122 | transform = TRANSFORMS[DatasetType[dataset_type]](**transform_kwargs) 123 | val_dataset = DATASETS[DatasetType[dataset_type]]( 124 | transform=transform, 125 | **dataset_kwargs 126 | ) 127 | self.val_pg_image_gen = ProgGrowImageGenerator(val_dataset, self.max_image_res, 128 | batch_size=self.batch_size, 129 | num_workers=self.num_workers, inf=False, mode_3d=self.mode_3d) 130 | 131 | "============================================= create networks ================================================" 132 | 133 | mtype = config['enc']['type'] 134 | kwargs = config['enc']['kwargs'] 135 | self.enc_pg_networks = ENCODER_NETWORKS[NetworkType[mtype]]( 136 | max_input_res=self.max_image_res, 137 | output_res=self.latent_res, 138 | input_dim=self.image_dim, 139 | output_dim=self.latent_dim, 140 | mode_3d=self.mode_3d, 141 | **kwargs 142 | ) 143 | 144 | mtype = config['dec']['type'] 145 | kwargs = config['dec']['kwargs'] 146 | self.dec_pg_networks = DECODER_NETWORKS[NetworkType[mtype]]( 147 | input_res=self.latent_res, 148 | max_output_res=self.max_image_res, 149 | input_dim=self.latent_dim, 150 | output_dim=self.image_dim, 151 | mode_3d=self.mode_3d, 152 | **kwargs 153 | ) 154 | 155 | if self.verbose: 156 | print("================================== MODELS ON LARGEST RESOLUTION ==================================") 157 | print("====================== Encoder =============================") 158 | print(self.enc_pg_networks.get_net(ProgGrowStageType.stab, self.max_image_res)) 159 | print("====================== Decoder ===========================") 160 | print(self.dec_pg_networks.get_net(ProgGrowStageType.stab, self.max_image_res)) 161 | 162 | self.dec = self.dec_pg_networks.get_net(self.stage, self.resolution).cuda() 163 | self.enc = self.enc_pg_networks.get_net(self.stage, self.resolution).cuda() 164 | 165 | "=========================================== create reconstruction losses =====================================" 166 | 167 | loss_type = config['image_rec_loss']['loss_type'] 168 | loss_kwargs = config['image_rec_loss']['loss_kwargs'] 169 | self.image_rec_loss = PG_RECONSTRUCTION_LOSSES[ReconstructionLossType[loss_type]]( 170 | max_resolution=self.max_image_res, mode_3d=self.mode_3d, **loss_kwargs) 171 | self.image_rec_loss.set_stage_resolution(self.stage, self.resolution) 172 | 173 | "=========================================== create optimizers ================================================" 174 | 175 | adam_kwargs = config['adam_kwargs'] 176 | self.optimizer = Optimizer( 177 | enc_params=self.enc.parameters(), 178 | dec_params=self.dec.parameters(), 179 | image_rec_loss=self.image_rec_loss, 180 | adam_kwargs=adam_kwargs, 181 | ).cuda() 182 | 183 | "=========================================== data for logging =================================================" 184 | 185 | self.pg_image_gen.set_stage_resolution(ProgGrowStageType.stab, self.max_image_res, batch_size=self.batch_size) 186 | self.display_x = next(self.pg_image_gen)[:4].cpu() 187 | self.pg_image_gen.set_stage_resolution(self.stage, self.resolution, self.batch_size) 188 | if self.val_pg_image_gen is not None: 189 | self.val_pg_image_gen.set_stage_resolution(self.stage, self.resolution, self.batch_size) 190 | self.mix_res_module = MixResolution(self.stage, self.resolution, self.max_image_res, mode_3d=self.mode_3d) 191 | 192 | "=========================================== initialization ===================================================" 193 | 194 | self.updating_models_during_training = \ 195 | [self.enc, self.dec, self.image_rec_loss, 196 | self.pg_image_gen, self.val_pg_image_gen, self.mix_res_module] 197 | 198 | if self.finetune_from is not None: 199 | self.load_state(torch.load(self.finetune_from)) 200 | else: 201 | self._init_stage() 202 | 203 | def _create_new_stage(self): 204 | del self.dec, self.enc, self.updating_models_during_training 205 | if self.stage == ProgGrowStageType.stab: 206 | self.stage = ProgGrowStageType.trns 207 | self.resolution *= 2 208 | else: 209 | self.stage = ProgGrowStageType.stab 210 | self.progress = 0 211 | self._init_stage() 212 | 213 | def _init_stage(self): 214 | self.batch_size = int(self.batch_sizes[self.resolution]) 215 | 216 | "=========================== update data generators and global params =========================================" 217 | 218 | self.pg_image_gen.set_stage_resolution(self.stage, resolution=self.resolution, batch_size=self.batch_size) 219 | self.val_pg_image_gen.set_stage_resolution(self.stage, resolution=self.resolution, batch_size=self.batch_size) 220 | self.mix_res_module = MixResolution(self.stage, self.resolution, self.max_image_res, mode_3d=self.mode_3d) 221 | 222 | "========================================== update networks ===================================================" 223 | 224 | self.enc = self.enc_pg_networks.get_net(self.stage, self.resolution).cuda() 225 | self.dec = self.dec_pg_networks.get_net(self.stage, self.resolution).cuda() 226 | self.image_rec_loss.set_stage_resolution(self.stage, self.resolution) 227 | 228 | self.optimizer.set_new_params( 229 | dec_params=self.dec.parameters(), 230 | enc_params=self.enc.parameters(), 231 | image_rec_loss=self.image_rec_loss) 232 | 233 | "========================================== other updates ===================================================" 234 | 235 | self.updating_models_during_training = \ 236 | [self.enc, self.dec, self.image_rec_loss, 237 | self.pg_image_gen, self.val_pg_image_gen, self.mix_res_module] 238 | 239 | total_iterations = self.iters[self.stage] 240 | total_iterations = self.iters_per_res\ 241 | .get(self.resolution, {self.stage.name: total_iterations})\ 242 | .get(self.stage.name, total_iterations) 243 | 244 | self.delta_progress = 1.0 / total_iterations 245 | self.tqdm_logger = tqdm.tqdm(total=total_iterations) 246 | 247 | if self.verbose: 248 | print("====================== Encoder =============================") 249 | print(self.enc) 250 | print("====================== Decoder ===========================") 251 | print(self.dec) 252 | 253 | def train(self): 254 | while not ( 255 | self.resolution == self.max_image_res and self.stage == ProgGrowStageType.stab and self.progress >= 1.0): 256 | 257 | "========================================== updating params ===============================================" 258 | 259 | self.n_iter += 1 260 | self.tqdm_logger.update(1) 261 | self.progress = np.clip(self.progress + self.delta_progress, 0, 1.0) 262 | 263 | if self.stage == ProgGrowStageType.trns: 264 | progress_resolution = self.resolution * self.progress + (self.resolution / 2) * ( 265 | 1 - self.progress) 266 | 267 | for model in self.updating_models_during_training: 268 | if model is not None: 269 | model.set_progress(self.progress) 270 | else: 271 | progress_resolution = self.resolution 272 | 273 | "=========================================== train step ===================================================" 274 | 275 | real_x = next(self.pg_image_gen).cuda() 276 | rec_x = self.dec(self.enc(real_x)) 277 | loss_info = self.optimizer.compute_loss(real_x, rec_x, update_parameters=True) 278 | 279 | del real_x, rec_x 280 | 281 | "============================================== logging ===================================================" 282 | 283 | if self.tqdm_logger.n % self.log_iter == 0: 284 | self.logger.add_scalars('train/losses', loss_info, self.n_iter) 285 | self.logger.add_scalar('train/resolution', progress_resolution, self.n_iter) 286 | 287 | if self.tqdm_logger.n % self.image_sample_iter == 0: 288 | self._save_image_sample() 289 | 290 | "============================================ checkpoint ==================================================" 291 | 292 | if self.tqdm_logger.n % self.val_iter == 0: 293 | val_loss = self._compute_val_loss() 294 | self.logger.add_scalar('val/total', val_loss, self.n_iter) 295 | 296 | # early stopping at the last stage 297 | if self.resolution == self.max_image_res and self.stage == ProgGrowStageType.stab: 298 | if val_loss < self.best_val_loss - self.early_stopping_min_delta: 299 | self.best_val_loss = val_loss 300 | self.best_val_iter = self.n_iter 301 | self._do_checkpoint() 302 | self._save_ad_model() 303 | else: 304 | if (self.n_iter - self.best_val_iter) / self.val_iter > self.early_stopping_patience: 305 | break 306 | 307 | "========================================== create new stage ==============================================" 308 | 309 | if abs(self.progress - 1.0) < 1e-6 and \ 310 | not(self.resolution == self.max_image_res and self.stage == ProgGrowStageType.stab): 311 | self._do_checkpoint() 312 | self._save_ad_model() 313 | self._create_new_stage() 314 | 315 | # if we did not check last iteration for performance, then check and save 316 | if self.tqdm_logger.n % self.val_iter != 0: 317 | val_loss = self._compute_val_loss() 318 | self.logger.add_scalar('val/total', val_loss, self.n_iter) 319 | if val_loss < self.best_val_loss: 320 | self.best_val_iter = self.n_iter 321 | self._do_checkpoint() 322 | self._save_ad_model() 323 | 324 | def get_state(self): 325 | return { 326 | 'config': self.config, 327 | 'stage': self.stage.name, 328 | 'resolution': self.resolution, 329 | 'progress': self.progress, 330 | 'n_iter': self.n_iter, 331 | 'best_val_loss': self.best_val_loss, 332 | 'best_val_iter': self.best_val_iter, 333 | 'display_x': self.display_x.data, 334 | 'optimizer': self.optimizer.state_dict(), 335 | 'dec': self.dec.state_dict(), 336 | 'enc': self.enc.state_dict(), 337 | 'mix_res_module': self.mix_res_module.state_dict(), 338 | } 339 | 340 | def load_state(self, state): 341 | self.stage = ProgGrowStageType[state['stage']] 342 | self.resolution = state['resolution'] 343 | self.progress = state['progress'] 344 | self.n_iter = state['n_iter'] 345 | self.best_val_loss = state['best_val_loss'] 346 | self.best_val_iter = state['best_val_iter'] 347 | self.display_x.data = state['display_x'] 348 | 349 | self._init_stage() 350 | 351 | self.dec.load_state_dict(state['dec']) 352 | self.enc.load_state_dict(state['enc']) 353 | 354 | self.mix_res_module.load_state_dict(state['mix_res_module']) 355 | 356 | self.tqdm_logger.update(int(len(self.tqdm_logger) * self.progress)) 357 | 358 | def get_state_anomaly_detection_model(self): 359 | return { 360 | 'niter': self.tqdm_logger.n, 361 | 'config': self.config, 362 | 'stage': self.stage.name, 363 | 'resolution': self.resolution, 364 | 'progress': self.progress, 365 | 'enc': self.enc.state_dict(), 366 | 'dec': self.dec.state_dict(), 367 | } 368 | 369 | @staticmethod 370 | def load_anomaly_detection_model(state_dict): 371 | config = state_dict['config'] 372 | stage = ProgGrowStageType[state_dict['stage']] 373 | resolution = state_dict['resolution'] 374 | progress = state_dict['progress'] 375 | 376 | mtype = config['enc']['type'] 377 | kwargs = config['enc']['kwargs'] 378 | mode_3d = config.get('mode_3d', False) 379 | 380 | enc_pg_networks = ENCODER_NETWORKS[NetworkType[mtype]]( 381 | max_input_res=config['max_image_res'], 382 | output_res=config['latent_res'], 383 | input_dim=config['image_dim'], 384 | output_dim=config['latent_dim'], 385 | mode_3d=mode_3d, 386 | **kwargs 387 | ) 388 | 389 | mtype = config['dec']['type'] 390 | kwargs = config['dec']['kwargs'] 391 | dec_pg_networks = DECODER_NETWORKS[NetworkType[mtype]]( 392 | input_res=config['latent_res'], 393 | max_output_res=config['max_image_res'], 394 | input_dim=config['latent_dim'], 395 | output_dim=config['image_dim'], 396 | mode_3d=mode_3d, 397 | **kwargs 398 | ) 399 | 400 | enc = enc_pg_networks.get_net(stage, resolution) 401 | dec = dec_pg_networks.get_net(stage, resolution) 402 | enc.load_state_dict(state_dict['enc']) 403 | dec.load_state_dict(state_dict['dec']) 404 | 405 | loss_type = config['image_rec_loss']['loss_type'] 406 | loss_kwargs = config['image_rec_loss']['loss_kwargs'] 407 | image_rec_loss = PG_RECONSTRUCTION_LOSSES[ReconstructionLossType[loss_type]]( 408 | config['max_image_res'], mode_3d=mode_3d, **loss_kwargs) 409 | image_rec_loss.set_stage_resolution(stage, resolution) 410 | image_rec_loss.set_reduction('none') 411 | 412 | mix_res_module = MixResolution(stage, resolution, config['max_image_res'], mode_3d=mode_3d) 413 | 414 | for model in [enc, dec, image_rec_loss, mix_res_module]: 415 | model.set_progress(progress) 416 | 417 | n_iter = state_dict['niter'] 418 | 419 | enc, dec, image_rec_loss = enc.cuda().eval(), dec.cuda().eval(), image_rec_loss.cuda().eval() 420 | return enc, dec, image_rec_loss, (stage, resolution, progress, n_iter, mix_res_module) 421 | 422 | @staticmethod 423 | def predict_anomaly_score(loaded_model, image, reduction): 424 | enc, dec, image_rec_loss, _ = loaded_model 425 | 426 | with torch.no_grad(): 427 | rec_image = dec(enc(image)).detach() 428 | image_rec_loss.set_reduction(reduction) 429 | pred = image_rec_loss(image, rec_image) 430 | 431 | return pred 432 | 433 | def _compute_val_loss(self): 434 | self.enc.eval() 435 | self.dec.eval() 436 | torch.set_grad_enabled(False) 437 | 438 | sum_loss = 0 439 | count = 0 440 | 441 | for real_x in self.val_pg_image_gen: 442 | real_x = real_x.cuda() 443 | rec_x = self.dec(self.enc(real_x)) 444 | 445 | loss_info = self.optimizer.compute_loss(real_x, rec_x, update_parameters=False) 446 | 447 | val_loss = sum(loss_info.values()) 448 | 449 | batch_size = real_x.shape[0] 450 | sum_loss += val_loss * batch_size 451 | count += batch_size 452 | 453 | val_avg_loss = sum_loss / count 454 | 455 | self.enc.train() 456 | self.dec.train() 457 | torch.set_grad_enabled(True) 458 | 459 | return val_avg_loss 460 | 461 | def _do_checkpoint(self): 462 | torch.save(self.get_state(), os.path.join(self.checkpoint_root, 'checkpoint.tar')) 463 | 464 | def _save_ad_model(self): 465 | if self.stage == ProgGrowStageType.stab: 466 | ad_model_path = 'anomaly_detection_res_{}_iter_{}.tar'.format(self.resolution, self.n_iter) 467 | torch.save(self.get_state_anomaly_detection_model(), os.path.join(self.checkpoint_root, ad_model_path)) 468 | 469 | torch.save(self.get_state_anomaly_detection_model(), os.path.join(self.checkpoint_root, 'anomaly_detection.tar')) 470 | 471 | def _save_image_sample(self): 472 | torch.set_grad_enabled(False) 473 | self.optimizer.eval() 474 | 475 | real_images = self.mix_res_module(self.display_x) 476 | images = torch.cat([ 477 | real_images, 478 | self.dec(self.enc(real_images.cuda())).cpu().detach() 479 | ], 0) 480 | 481 | # if 3d mode, get only central slice 482 | if self.mode_3d: 483 | images = images[:, :, int(images.shape[2] // 2)] 484 | name = 'res_{}_stage_{}_niter{}.png'.format(self.resolution, self.stage.name, 485 | self.tqdm_logger.n) 486 | 487 | self.logger.save_grid(images, grid_size=self.max_image_res * 4, name=name, nrow=min(4, len(self.display_x))) 488 | self.logger.save_grid(images, grid_size=self.max_image_res * 4, name='sample.png', nrow=min(4, len(self.display_x))) 489 | 490 | torch.set_grad_enabled(True) 491 | self.optimizer.train() 492 | 493 | 494 | def main(config): 495 | trainer = DeepPerceptualAutoencoder(config) 496 | trainer.train() 497 | 498 | 499 | if __name__ == '__main__': 500 | parser = argparse.ArgumentParser() 501 | parser.add_argument('config', type=str, help='Path to config') 502 | args = parser.parse_args() 503 | 504 | with open(args.config, 'r') as stream: 505 | config = yaml.load(stream, Loader=yaml.FullLoader) 506 | 507 | main(config) 508 | -------------------------------------------------------------------------------- /mood/main.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import yaml 3 | import time 4 | 5 | import mood.dpa.train 6 | import mood.dpa.evaluate 7 | import mood.dpa.inference_evaluate_3d 8 | 9 | 10 | if __name__ == '__main__': 11 | start_time = time.time() 12 | parser = argparse.ArgumentParser() 13 | parser.add_argument('action', type=str, choices=['train', 'evaluate', 'inference_evaluate_3d']) 14 | parser.add_argument('config', type=str, help='Path to config') 15 | 16 | args = parser.parse_args() 17 | 18 | action = args.action 19 | config_path = args.config 20 | 21 | with open(args.config, 'r') as stream: 22 | config = yaml.load(stream, Loader=yaml.FullLoader) 23 | 24 | model_type = config['model_type'] 25 | if model_type == 'dpa': 26 | if action == 'train': 27 | mood.dpa.train.main(config) 28 | elif action == 'evaluate': 29 | mood.dpa.evaluate.main(config) 30 | elif action == 'inference_evaluate_3d': 31 | mood.dpa.inference_evaluate_3d.main(config) 32 | else: 33 | raise NotImplementedError() 34 | else: 35 | raise NotImplementedError() 36 | 37 | print(f"Finished. Took: {(time.time() - start_time) / 60:.02f}m") 38 | -------------------------------------------------------------------------------- /mood/utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ninatu/mood_challenge/c5b8d5ea0bd5fa4be512d099ab5f271b3060e33f/mood/utils/__init__.py -------------------------------------------------------------------------------- /mood/utils/datasets.py: -------------------------------------------------------------------------------- 1 | import os 2 | from torch.utils.data import Dataset 3 | import numpy as np 4 | from enum import Enum 5 | import PIL.Image 6 | import pandas as pd 7 | import nibabel as nib 8 | import tqdm 9 | 10 | 11 | class NumpyDatasetBase(Dataset): 12 | def __init__(self, image_root, folds_path, fold, split, filename_endwith, mask_root=None, transform=None, 13 | mask_transform=None, return_image_name=False): 14 | 15 | self.image_root = image_root 16 | self.mask_root = mask_root 17 | self.folds_path = folds_path 18 | self.fold = fold 19 | self.split = split 20 | self.transform = transform 21 | self.mask_transform = mask_transform 22 | self.return_image_name = return_image_name 23 | 24 | folds = pd.read_csv(folds_path) 25 | if split == 'train': 26 | folds = folds[folds.test_fold != self.fold] 27 | elif split == 'val': 28 | folds = folds[folds.test_fold == self.fold] 29 | elif split == 'all': 30 | folds = folds 31 | else: 32 | raise NotImplementedError() 33 | 34 | self.filenames = folds.filename 35 | self.filenames = [name.replace('.nii.gz', '') for name in self.filenames] 36 | 37 | image_names = os.listdir(self.image_root) 38 | image_names = [name for name in image_names if name.endswith(filename_endwith)] 39 | self.image_names = [] 40 | for name in image_names: 41 | basename = name.replace(filename_endwith, '') 42 | basename = basename.split('_')[0] 43 | if basename in self.filenames: 44 | self.image_names.append(name) 45 | 46 | def __len__(self): 47 | return len(self.image_names) 48 | 49 | 50 | class Numpy2DDataset(NumpyDatasetBase): 51 | def __init__(self, image_root, folds_path, fold, split, mask_root=None, transform=None, 52 | mask_transform=None, return_image_name=False, brain_size_path=None, filter_brain_size=None, 53 | cache=False, max_dataset_size=None, channels3=False): 54 | super().__init__(image_root, folds_path, fold, split, filename_endwith='.npy', 55 | mask_root=mask_root, transform=transform, mask_transform=mask_transform, 56 | return_image_name=return_image_name) 57 | 58 | self.brain_size_path = brain_size_path 59 | self.filter_brain_size = filter_brain_size 60 | self.max_dataset_size = max_dataset_size 61 | self.channels3 = channels3 62 | 63 | if self.filter_brain_size is not None: 64 | assert self.brain_size_path is not None 65 | 66 | brain_size = pd.read_csv(self.brain_size_path) 67 | unknown_image_names = list(set(self.image_names).difference(brain_size.filename)) 68 | 69 | brain_size = brain_size[brain_size.filename.isin(self.image_names)] 70 | filtered_image_names = list(brain_size[brain_size.brain_size >= self.filter_brain_size].filename) 71 | self.image_names = unknown_image_names + filtered_image_names 72 | 73 | if self.max_dataset_size is not None: 74 | np.random.shuffle(self.image_names) 75 | self.image_names = self.image_names[:max_dataset_size] 76 | 77 | self.cache_data = {} 78 | self.cache = False 79 | if cache: 80 | print("Caching ....") 81 | for i in tqdm.tqdm(range(len(self))): 82 | self.cache_data[i] = self[i] 83 | self.cache = True 84 | 85 | def __getitem__(self, idx): 86 | if self.cache: 87 | return self.cache_data[idx] 88 | 89 | image_name = self.image_names[idx] 90 | image = np.load(os.path.join(self.image_root, image_name)) 91 | if not self.channels3: 92 | image = PIL.Image.fromarray(image.astype(np.float32), mode='F') 93 | else: 94 | image = PIL.Image.fromarray((np.array(image) * 255).astype(np.uint8)) 95 | image = image.convert("RGB") 96 | 97 | if self.transform: 98 | image = self.transform(image) 99 | 100 | if self.mask_root is None: 101 | if self.return_image_name: 102 | return image, image_name 103 | else: 104 | return image 105 | 106 | mask = np.load(os.path.join(self.mask_root, image_name)) 107 | mask = PIL.Image.fromarray(mask.astype(np.float32), mode='F') 108 | 109 | if self.mask_transform: 110 | mask = self.mask_transform(mask) 111 | 112 | if self.return_image_name: 113 | return image, mask, image_name 114 | else: 115 | return image, mask 116 | 117 | 118 | class Nifti3DDataset(NumpyDatasetBase): 119 | def __init__(self, image_root, folds_path, fold, split, mask_root=None, transform=None, 120 | mask_transform=None, return_image_name=False): 121 | super().__init__(image_root, folds_path, fold, split, filename_endwith='.nii.gz', 122 | mask_root=mask_root, transform=transform, mask_transform=mask_transform, 123 | return_image_name=return_image_name) 124 | self.image_names = sorted(self.image_names) 125 | 126 | def __getitem__(self, idx): 127 | 128 | image_name = self.image_names[idx] 129 | nimage = nib.load(os.path.join(self.image_root, image_name)) 130 | 131 | image = nimage.get_data().astype(np.float32) 132 | affine = nimage.affine 133 | 134 | if self.transform: 135 | image = self.transform(image) 136 | 137 | if self.mask_root is None: 138 | if self.return_image_name: 139 | return image, image_name 140 | else: 141 | return image 142 | 143 | nmask = nib.load(os.path.join(self.mask_root, image_name)) 144 | mask = nmask.get_data() 145 | 146 | if self.mask_transform: 147 | mask = self.mask_transform(mask) 148 | 149 | if self.return_image_name: 150 | return image, mask, affine, image_name 151 | else: 152 | return image, mask 153 | 154 | 155 | class DatasetType(Enum): 156 | numpy2d = 'numpy2d' 157 | nifti3d = 'nifti3d' 158 | 159 | 160 | DATASETS = { 161 | DatasetType.numpy2d: Numpy2DDataset, 162 | DatasetType.nifti3d: Nifti3DDataset, 163 | } 164 | -------------------------------------------------------------------------------- /mood/utils/loggers.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | from PIL import Image 4 | 5 | from tensorboardX import SummaryWriter 6 | from torchvision.utils import make_grid 7 | 8 | 9 | def ensure_dir(dir_name): 10 | os.makedirs(dir_name, exist_ok=True) 11 | 12 | 13 | class Logger(object): 14 | def __init__(self, root): 15 | self.root = root 16 | self.grid_dir = os.path.join(self.root, 'images') 17 | self.tensorboard = os.path.join(self.root, 'tensorboard') 18 | 19 | ensure_dir(self.grid_dir) 20 | ensure_dir(self.tensorboard) 21 | 22 | self.writer = SummaryWriter(self.tensorboard) 23 | 24 | def add_scalar(self, name, value, iter): 25 | self.writer.add_scalar(name, value, iter) 26 | 27 | def add_scalars(self, main_name, tag_value_dict, iter): 28 | self.writer.add_scalars(main_name, tag_value_dict, iter) 29 | 30 | def add_histogram(self, name, values, iter): 31 | self.writer.add_histogram(name, values, iter) 32 | 33 | def save_grid(self, images, grid_size, name, nrow=4): 34 | n = images.size(0) 35 | grid = make_grid(images, nrow=nrow) 36 | grid = (grid * 0.5 + 0.5) * 255 37 | grid = grid.clamp(0, 255).byte().permute(1, 2, 0).cpu().numpy() 38 | grid = Image.fromarray(grid) 39 | grid = grid.resize((grid_size * nrow, grid_size * int(n / nrow)), Image.NEAREST) 40 | grid.save(os.path.join(self.grid_dir, name)) 41 | 42 | def save_config(self, config_dict, name): 43 | with open(os.path.join(self.root, name), 'w') as f_out: 44 | json.dump(config_dict, f_out) 45 | -------------------------------------------------------------------------------- /mood/utils/preprocessing/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ninatu/mood_challenge/c5b8d5ea0bd5fa4be512d099ab5f271b3060e33f/mood/utils/preprocessing/__init__.py -------------------------------------------------------------------------------- /mood/utils/preprocessing/create_2d_val_dataset_with_synthetic_anomalies.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | from tqdm import tqdm 4 | import argparse 5 | import skimage.draw 6 | import pandas as pd 7 | 8 | from mood.utils.preprocessing.create_val_dataset_with_synthetic_anomalies \ 9 | import gen_coord, gen_radius, gen_alpha, get_center_and_radius, gen_color, gen_noise 10 | 11 | 12 | def draw_circle(image, mask, h, w, r): 13 | x_h, x_w = gen_coord(h, r), gen_coord(w, r) 14 | radius = gen_radius(r) 15 | 16 | c = gen_color() 17 | alpha = gen_alpha() 18 | 19 | rr, cc = skimage.draw.circle(x_w, x_h, radius, image.shape) 20 | image[rr, cc] = alpha * image[rr, cc] + (1 - alpha) * (c + gen_noise(c, len(rr))) 21 | np.clip(image, 0, 1, out=image) 22 | mask[rr, cc] = 1 23 | 24 | 25 | def draw_ellipse(image, mask, h, w, r): 26 | x_h, x_w = gen_coord(h, r), gen_coord(w, r) 27 | radius1 = gen_radius(r) 28 | radius2 = gen_radius(r) 29 | 30 | c = gen_color() 31 | alpha = gen_alpha() 32 | rr, cc = skimage.draw.ellipse(x_w, x_h, radius1, radius2, image.shape) 33 | image[rr, cc] = alpha * image[rr, cc] + (1 - alpha) * (c + gen_noise(c, len(rr))) 34 | np.clip(image, 0, 1, out=image) 35 | mask[rr, cc] = 1 36 | 37 | 38 | def create_anomaly_2d_dataset(input_folder, output_image_folder, output_mask_folder, folds_path, fold): 39 | zero_mask_path = None 40 | 41 | filename_endwith = '.npy' 42 | filenames = os.listdir(input_folder) 43 | # print(len(filenames)) 44 | filenames = [name for name in filenames if name.endswith(filename_endwith)] 45 | # print(len(filenames)) 46 | 47 | if folds_path is not None: 48 | folds = pd.read_csv(folds_path) 49 | folds = folds[folds.test_fold == int(fold)] 50 | fold_filenames = folds.filename 51 | fold_filenames = [name.replace('.nii.gz', '') for name in fold_filenames] 52 | # print(len(fold_filenames), fold_filenames[:10]) 53 | 54 | filtered_filenames = [] 55 | for name in filenames: 56 | basename = name.replace(filename_endwith, '') 57 | basename = basename.split('_')[0] 58 | # print(basename) 59 | if basename in fold_filenames: 60 | filtered_filenames.append(name) 61 | filenames = filtered_filenames 62 | 63 | for fname in tqdm(sorted(filenames)): 64 | base_name, ext = os.path.splitext(fname) 65 | path = os.path.join(input_folder, fname) 66 | image = np.load(path) 67 | image = np.array(image).astype(np.float32) 68 | mask = np.zeros_like(image).astype(np.uint8) 69 | 70 | # Add normal image 71 | os.link(path, os.path.join(output_image_folder, fname)) 72 | if zero_mask_path is None: 73 | zero_mask_path = os.path.join(output_mask_folder, fname) 74 | np.save(zero_mask_path, mask) 75 | else: 76 | os.link(zero_mask_path, os.path.join(output_mask_folder, fname)) 77 | 78 | # Create and add abnormal image 79 | brain_mask = image > 0 80 | if brain_mask.sum() < 4000: 81 | continue 82 | 83 | h, w, r = get_center_and_radius(brain_mask) 84 | draw_func = [draw_circle, draw_ellipse][np.random.randint(0, 2)] 85 | for _ in range(np.random.randint(1, 4)): 86 | draw_func(image, mask, h, w, r) 87 | 88 | outname = f'{base_name}_anom{ext}' 89 | np.save(os.path.join(output_image_folder, outname), image.astype(np.float16)) 90 | np.save(os.path.join(output_mask_folder, outname), mask) 91 | 92 | 93 | if __name__ == "__main__": 94 | parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) 95 | parser.add_argument("-i", "--input_dir", 96 | default='./data/preprocessed/brain_train/2d_axis_2', 97 | help='input_dir') 98 | parser.add_argument("-o", "--output_image_dir", 99 | default='./data/preprocessed/brain_train/2d_axis_2_test', 100 | help='output_image_dir') 101 | parser.add_argument("-m", "--output_mask_dir", 102 | default='./data/preprocessed/brain_train/2d_axis_2_test_masks/', 103 | help='output_mask_dir') 104 | parser.add_argument("-p", "--folds_path", required=False, type=str, default=None, 105 | help='Path to csv file with folds info. ' 106 | 'Use if you want to create a synthetic dataset only from one "test" fold of input dataset') 107 | parser.add_argument("-f", "--fold", required=False, type=str, default=None, 108 | help='# of fold. ' 109 | 'Use if you want to create a synthetic dataset only from one "test" fold of input dataset') 110 | 111 | args = parser.parse_args() 112 | 113 | input_dir = args.input_dir 114 | output_image_dir = args.output_image_dir 115 | output_mask_dir = args.output_mask_dir 116 | folds_path = args.folds_path 117 | fold = args.fold 118 | 119 | os.makedirs(output_image_dir, exist_ok=True) 120 | os.makedirs(output_mask_dir, exist_ok=True) 121 | 122 | create_anomaly_2d_dataset(input_dir, output_image_dir, output_mask_dir, folds_path, fold) 123 | -------------------------------------------------------------------------------- /mood/utils/preprocessing/create_folds.py: -------------------------------------------------------------------------------- 1 | import os 2 | from sklearn.model_selection import KFold 3 | import pandas as pd 4 | import numpy as np 5 | import argparse 6 | 7 | 8 | def create_folds(input_folder: str, output_path: str, n_folds: int): 9 | files = os.listdir(input_folder) 10 | files = [filename for filename in files if os.path.splitext(filename)[1] == '.gz'] 11 | np.random.shuffle(files) 12 | files = np.array(files) 13 | 14 | df = pd.DataFrame(index=files, columns=['test_fold']) 15 | df.index.name = 'filename' 16 | 17 | for i_fold, (train_index, test_index) in enumerate(KFold(n_splits=n_folds).split(files)): 18 | test_patients = files[test_index] 19 | df.loc[test_patients] = i_fold 20 | 21 | df.to_csv(output_path) 22 | 23 | 24 | if __name__ == "__main__": 25 | parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) 26 | parser.add_argument("-i", "--input_dir", 27 | default='./data/original/brain_train/', 28 | help='input_dir') 29 | parser.add_argument("-o", "--output_path", 30 | default='./folds/brain/train_folds_10.csv', 31 | help='output_path') 32 | parser.add_argument("-n", "--n_folds", 33 | type=int, 34 | default=10, 35 | help='n_folds') 36 | 37 | args = parser.parse_args() 38 | 39 | input_dir = args.input_dir 40 | output_path = args.output_path 41 | n_folds = args.n_folds 42 | 43 | os.makedirs(os.path.dirname(output_path), exist_ok=True) 44 | 45 | create_folds(input_dir, output_path, n_folds) 46 | -------------------------------------------------------------------------------- /mood/utils/preprocessing/create_val_dataset_with_synthetic_anomalies.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | from tqdm import tqdm 4 | import argparse 5 | import nibabel as nib 6 | import pyellipsoid.drawing 7 | import pandas as pd 8 | 9 | 10 | def get_center_and_radius(mask): 11 | def get_center(row): 12 | return np.array(range(len(row)))[row > 0].sum() / (row > 0).sum() 13 | 14 | row = mask.sum(axis=0) 15 | h = get_center(row) 16 | h_r = (row > 0).sum() / 2 17 | 18 | row = mask.sum(axis=1) 19 | w = get_center(row) 20 | w_r = (row > 0).sum() / 2 21 | 22 | return h, w, (h_r + w_r) / 2 23 | 24 | 25 | def gen_coord(h, r): 26 | return np.random.normal(h, r / 3) 27 | 28 | 29 | def gen_radius(r): 30 | return max(5, np.random.normal(r / 16, r / 16)) 31 | 32 | 33 | def gen_color(): 34 | return np.random.rand() 35 | 36 | 37 | def gen_noise(color, size): 38 | return np.random.rand(size) * color / 10 39 | 40 | 41 | def gen_alpha(): 42 | return np.clip(np.random.exponential(scale=1 / 4), 0, 1) 43 | 44 | 45 | def get_3d_center_and_radius(mask): 46 | def get_center(row): 47 | return np.array(range(len(row)))[row > 0].sum() / (row > 0).sum() 48 | def get_radius(row): 49 | return (row > 0).sum() / 2 50 | row = mask.sum(axis=2).sum(axis=1) 51 | h, h_r = get_center(row), get_radius(row) 52 | 53 | row = mask.sum(axis=2).sum(axis=0) 54 | w, w_r = get_center(row), get_radius(row) 55 | 56 | row = mask.sum(axis=1).sum(axis=0) 57 | c, c_r = get_center(row), get_radius(row) 58 | 59 | return h, w, c, (h_r + w_r + c_r) / 3 60 | 61 | 62 | def gen_angle(): 63 | return np.deg2rad(np.random.uniform(0, 90)) 64 | 65 | 66 | def draw_ellipsoid(image, mask, h, w, c, r): 67 | centers = gen_coord(h, r), gen_coord(w, r), gen_coord(c, r) 68 | radius = gen_radius(r), gen_radius(r), gen_radius(r) 69 | angles = gen_angle(), gen_angle(), gen_angle() 70 | 71 | c = gen_color() 72 | alpha = gen_alpha() 73 | ellipse = pyellipsoid.drawing.make_ellipsoid_image(image.shape, centers, radius, angles) > 0 74 | image_slice = image[ellipse] 75 | image[ellipse] = alpha * image_slice + (1 - alpha) * (c + gen_noise(c, len(image_slice))) 76 | np.clip(image, 0, 1, out=image) 77 | mask[ellipse] = 1 78 | 79 | 80 | def create_anomaly_3d_dataset(input_folder, output_image_folder, output_mask_folder, folds_path=None, fold=0): 81 | zero_mask_path = None 82 | 83 | EXT = ".nii.gz" 84 | filenames = os.listdir(input_folder) 85 | filenames = [name for name in filenames if name.endswith(EXT)] 86 | 87 | if folds_path is not None: 88 | folds = pd.read_csv(folds_path) 89 | folds = folds[folds.test_fold == int(fold)] 90 | fold_filenames = folds.filename 91 | fold_filenames = [name.replace('.nii.gz', '') for name in fold_filenames] 92 | 93 | filtered_filenames = [] 94 | for name in filenames: 95 | basename = name.replace(EXT, '') 96 | basename = basename.split('_')[0] 97 | if basename in fold_filenames: 98 | filtered_filenames.append(name) 99 | filenames = filtered_filenames 100 | 101 | for fname in tqdm(sorted(filenames)): 102 | if not fname.endswith(EXT): 103 | continue 104 | base_name = fname.replace(EXT, '') 105 | 106 | path = os.path.join(input_folder, fname) 107 | nimage = nib.load(path) 108 | image = nimage.get_fdata() 109 | affine = nimage.affine 110 | image = image.astype(np.float32) 111 | mask = np.zeros_like(image).astype(np.uint8) 112 | 113 | # Add normal image 114 | os.link(path, os.path.join(output_image_folder, fname)) 115 | if zero_mask_path is None: 116 | zero_mask_path = os.path.join(output_mask_folder, fname) 117 | nib.save(nib.Nifti1Image(mask, affine=affine), zero_mask_path) 118 | else: 119 | os.link(zero_mask_path, os.path.join(output_mask_folder, fname)) 120 | 121 | # Create and add abnormal image 122 | brain_mask = image > 0 123 | h, w, c, r = get_3d_center_and_radius(brain_mask) 124 | for _ in range(np.random.randint(1, 4)): 125 | draw_ellipsoid(image, mask, h, w, c, r) 126 | 127 | outname = f'{base_name}_anom{EXT}' 128 | nib.save(nib.Nifti1Image(image, affine=affine), os.path.join(output_image_folder, outname)) 129 | nib.save(nib.Nifti1Image(mask, affine=affine), os.path.join(output_mask_folder, outname)) 130 | 131 | 132 | if __name__ == "__main__": 133 | parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) 134 | parser.add_argument("-i", "--input_dir", 135 | default='./data/original/brain_train/', 136 | help='input_dir') 137 | parser.add_argument("-o", "--output_image_dir", 138 | default='./data/preprocessed/brain_train/3d_test', 139 | help='output_image_dir') 140 | parser.add_argument("-m", "--output_mask_dir", 141 | default='./data/preprocessed/brain_train/3d_test_masks/', 142 | help='output_mask_dir') 143 | parser.add_argument("-p", "--folds_path", required=False, type=str, default=None, 144 | help='Path to csv file with folds info. ' 145 | 'Use if you want to create a synthetic dataset only from one "test" fold of input dataset') 146 | parser.add_argument("-f", "--fold", required=False, type=str, default=None, 147 | help='# of fold. ' 148 | 'Use if you want to create a synthetic dataset only from one "test" fold of input dataset') 149 | 150 | args = parser.parse_args() 151 | 152 | input_dir = args.input_dir 153 | output_image_dir = args.output_image_dir 154 | output_mask_dir = args.output_mask_dir 155 | folds_path = args.folds_path 156 | fold = args.fold 157 | 158 | os.makedirs(output_image_dir, exist_ok=True) 159 | os.makedirs(output_mask_dir, exist_ok=True) 160 | 161 | create_anomaly_3d_dataset(input_dir, output_image_dir, output_mask_dir, folds_path, fold) 162 | -------------------------------------------------------------------------------- /mood/utils/preprocessing/save_2D.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | import nibabel as nib 4 | from tqdm import tqdm 5 | import argparse 6 | import monai 7 | 8 | 9 | def nifti_to_2d_slices(input_folder: str, output_folder: str, axis: int, filtered, resize): 10 | for fname in tqdm(sorted(os.listdir(input_folder))): 11 | 12 | if not fname.endswith("nii.gz"): 13 | continue 14 | 15 | n_file = os.path.join(input_folder, fname) 16 | nifti = nib.load(n_file) 17 | 18 | np_data = nifti.get_fdata() 19 | np_data = np_data.astype(np.float16) 20 | 21 | f_basename = fname.split(".")[0] 22 | 23 | for i in range(np_data.shape[axis]): 24 | slc = [slice(None)] * len(np_data.shape) 25 | slc[axis] = i 26 | image = np_data[slc] 27 | 28 | if resize: 29 | tr = monai.transforms.Resize((resize, resize)) 30 | image = tr(image[None])[0] 31 | 32 | if filtered: 33 | brain_mask = image > 0 34 | if brain_mask.sum() < 4000: 35 | continue 36 | 37 | np.save(os.path.join(output_folder, f"{f_basename}_{i}.npy"), image) 38 | 39 | 40 | if __name__ == "__main__": 41 | 42 | parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) 43 | parser.add_argument("-i", "--input_dir", 44 | default='./data/original/brain_train/', 45 | help='input_dir') 46 | parser.add_argument("-o", "--output_dir", 47 | default='./data/preprocessed/brain_train/2d_axis_2', 48 | help='input_dir') 49 | parser.add_argument("-a", "--axis", type=int, 50 | default=2, 51 | help='axis') 52 | parser.add_argument("-f", "--do_not_filter", action='store_true', 53 | default=False, 54 | help='Default behavior: do not save slices where # of non zero pixels < 4000. \n' 55 | 'Use this flag to disable this behavior') 56 | parser.add_argument("-r", "--resize", type=int, 57 | default=256, 58 | help='Resize image while saving') 59 | 60 | args = parser.parse_args() 61 | 62 | input_dir = args.input_dir 63 | output_dir = args.output_dir 64 | axis = args.axis 65 | do_filter = (args.do_not_filter == False) 66 | resize = args.resize 67 | os.makedirs(output_dir, exist_ok=True) 68 | 69 | nifti_to_2d_slices(input_dir, output_dir, axis, do_filter, resize) 70 | -------------------------------------------------------------------------------- /mood/utils/transforms.py: -------------------------------------------------------------------------------- 1 | from torchvision import transforms 2 | import monai.transforms 3 | 4 | from mood.utils.datasets import DatasetType 5 | 6 | 7 | class Transform2D: 8 | def __init__(self, resize=None, to_tensor=True, normalize=True): 9 | tr = [] 10 | 11 | if resize is not None: 12 | tr += [transforms.Resize((resize, resize))] 13 | 14 | if to_tensor: 15 | tr += [transforms.ToTensor()] 16 | 17 | if normalize: 18 | tr += [transforms.Normalize((0.5,), (0.5,))] 19 | self.transform = transforms.Compose(tr) 20 | 21 | def __call__(self, img): 22 | return self.transform(img) 23 | 24 | 25 | class Transform3D: 26 | def __init__(self, resize=None, normalize=True): 27 | tr = [] 28 | 29 | if resize is not None: 30 | tr += [monai.transforms.Resize((resize, resize, resize))] 31 | 32 | tr += [monai.transforms.ToTensor()] 33 | 34 | if normalize: 35 | tr += [Normalize3D(0.5, 0.5)] 36 | self.transform = transforms.Compose(tr) 37 | 38 | def __call__(self, img): 39 | return self.transform(img[None]) 40 | 41 | 42 | TRANSFORMS = { 43 | DatasetType.numpy2d: Transform2D, 44 | DatasetType.nifti3d: Transform3D, 45 | } 46 | 47 | 48 | class Normalize3D(object): 49 | def __init__(self, mean, std, inplace=False): 50 | self.mean = mean 51 | self.std = std 52 | self.inplace = inplace 53 | 54 | def __call__(self, tensor): 55 | return tensor.sub_(self.mean).div_(self.std) 56 | 57 | def __repr__(self): 58 | return self.__class__.__name__ + '(mean={0}, std={1})'.format(self.mean, self.std) 59 | 60 | -------------------------------------------------------------------------------- /mood/utils/utils.py: -------------------------------------------------------------------------------- 1 | import yaml 2 | import os 3 | import PIL 4 | import numpy as np 5 | import matplotlib.pyplot as plt 6 | import nibabel as nib 7 | import itertools 8 | import monai 9 | 10 | 11 | def update_dict(obj, new_values): 12 | if obj is None: 13 | return new_values 14 | for k, v in new_values.items(): 15 | if isinstance(v, dict) and k in obj.keys(): 16 | obj[k] = update_dict(obj[k], new_values[k]) 17 | elif obj is None: 18 | obj = {k: new_values[k]} 19 | else: 20 | obj[k] = new_values[k] 21 | return obj 22 | 23 | 24 | def load_yaml(path): 25 | return yaml.load(open(path, 'r'), Loader=yaml.FullLoader) 26 | 27 | 28 | def save_yaml(data, path): 29 | os.makedirs(os.path.dirname(path), exist_ok=True) 30 | yaml.dump(data, open(path, 'w')) 31 | 32 | 33 | def update_config(config, overwrite_params): 34 | overwrite_params = yaml.load(overwrite_params, Loader=yaml.FullLoader) 35 | return update_dict(config, overwrite_params) 36 | 37 | 38 | def draw_image(image): 39 | return PIL.Image.fromarray((np.array(image) * 255).astype(np.uint8)) 40 | 41 | 42 | def read_nii_file(path): 43 | nifti = nib.load(path) 44 | data_array = nifti.get_data() 45 | affine_matrix = nifti.affine 46 | return data_array 47 | 48 | 49 | def create_pltfig_from_path(path, n=5, axis=0): 50 | nifti = nib.load(path) 51 | image = nifti.get_data() 52 | return create_pltfig_of_slices(image, n=n, axis=axis) 53 | 54 | 55 | def create_pltfig_from_path_128(path, n=5, axis=0): 56 | nifti = nib.load(path) 57 | image = nifti.get_data() 58 | 59 | transform = monai.transforms.Resize((128, 128, 128), mode='trilinear') 60 | image = transform(image[None]).squeeze(0) 61 | image = np.clip(image, a_max=1, a_min=0) 62 | image = image > 0.5 63 | 64 | return create_pltfig_of_slices(image, n=n, axis=axis) 65 | 66 | 67 | def create_pltfig_of_slices(image, n=5, axis=0): 68 | f, axs = plt.subplots(n, n, figsize=(20,20), squeeze=False) 69 | axs = list(itertools.chain.from_iterable(axs)) 70 | 71 | step = int(image.shape[2] // (n * n)) 72 | 73 | for i in range(1, n * n + 1): 74 | slc = [slice(None)] * len(image.shape) 75 | slc[axis] = i * step 76 | axs[i - 1].imshow(image[slc], cmap='gray') 77 | axs[i - 1].axis('off') 78 | axs[i - 1].set_title(str(i * step)) 79 | 80 | plt.tight_layout() 81 | return f 82 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | numpy~=1.18.5 2 | pandas~=1.1.0 3 | setuptools~=46.4.0.post20200518 4 | nibabel~=3.1.1 5 | torch~=1.6.0 6 | Pillow~=7.2.0 7 | tqdm~=4.46.0 8 | monai~=0.2.0 9 | torchvision~=0.7.0 10 | PyYAML~=5.3.1 11 | scikit-learn~=0.23.2 12 | pyellipsoid~=0.0.3 13 | scikit-image~=0.17.2 14 | matplotlib~=3.3.1 15 | tensorboardX~=2.1 -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup, find_packages 2 | 3 | 4 | setup(name='mood', 5 | version='0.1', 6 | description='Anomaly Detection Package For MOOD Challenge (MICCAI 2020)', 7 | url='', 8 | author='Nina Tuluptceva', 9 | author_email='nina.tuluptceva@philips.com', 10 | license='Apache License 2.0', 11 | packages=find_packages(), 12 | dependency_links=[], 13 | zip_safe=False) 14 | -------------------------------------------------------------------------------- /submission_data/configs/abdom_pixel.yaml: -------------------------------------------------------------------------------- 1 | do_not_process_small_area: True 2 | delete_zero_area: False 3 | resize: 128 4 | 5 | path_to_vgg19_weights: /workspace/data/vgg_weights/simclr_exp_1.tar 6 | 7 | models: 8 | - checkpoint_path: /workspace/data/abdom/pixel/axis_0/anomaly_detection.tar 9 | along_axis: 0 10 | resize: 128 11 | 12 | - checkpoint_path: /workspace/data/abdom/pixel/axis_2/anomaly_detection.tar 13 | along_axis: 2 14 | resize: 128 15 | -------------------------------------------------------------------------------- /submission_data/configs/abdom_sample.yaml: -------------------------------------------------------------------------------- 1 | do_not_process_small_area: True 2 | delete_zero_area: False 3 | resize: 128 4 | 5 | path_to_vgg19_weights: /workspace/data/vgg_weights/simclr_exp_1.tar 6 | 7 | models: 8 | - checkpoint_path: /workspace/data/abdom/sample/axis_0/anomaly_detection.tar 9 | along_axis: 0 10 | resize: 128 11 | 12 | - checkpoint_path: /workspace/data/abdom/sample/axis_2/anomaly_detection.tar 13 | along_axis: 2 14 | resize: 128 15 | -------------------------------------------------------------------------------- /submission_data/configs/brain_pixel.yaml: -------------------------------------------------------------------------------- 1 | do_not_process_small_area: True 2 | delete_zero_area: False 3 | resize: 128 4 | 5 | path_to_vgg19_weights: /workspace/data/vgg_weights/simclr_exp_1.tar 6 | 7 | models: 8 | - checkpoint_path: /workspace/data/brain/pixel/axis_0/anomaly_detection.tar 9 | along_axis: 0 10 | resize: 128 11 | 12 | - checkpoint_path: /workspace/data/brain/pixel/axis_1/anomaly_detection.tar 13 | along_axis: 1 14 | resize: 128 15 | 16 | - checkpoint_path: /workspace/data/brain/pixel/axis_2/anomaly_detection.tar 17 | along_axis: 2 18 | resize: 128 19 | -------------------------------------------------------------------------------- /submission_data/configs/brain_sample.yaml: -------------------------------------------------------------------------------- 1 | do_not_process_small_area: True 2 | delete_zero_area: False 3 | resize: 128 4 | 5 | path_to_vgg19_weights: /workspace/data/vgg_weights/simclr_exp_1.tar 6 | 7 | models: 8 | - checkpoint_path: /workspace/data/brain/sample/axis_0/anomaly_detection.tar 9 | along_axis: 0 10 | resize: 128 11 | 12 | - checkpoint_path: /workspace/data/brain/sample/axis_1/anomaly_detection.tar 13 | along_axis: 1 14 | resize: 128 15 | 16 | - checkpoint_path: /workspace/data/brain/sample/axis_2/anomaly_detection.tar 17 | along_axis: 2 18 | resize: 128 19 | -------------------------------------------------------------------------------- /submission_data/scripts/predict_3d.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import nibabel as nib 4 | import numpy as np 5 | import torch 6 | from torchvision import transforms 7 | import PIL 8 | import argparse 9 | import tqdm 10 | import monai 11 | 12 | from mood.dpa.train import DeepPerceptualAutoencoder 13 | from mood.dpa.rec_losses import ReconstructionLossType 14 | from mood.dpa.pg_rec_losses import PG_RECONSTRUCTION_LOSSES 15 | from mood.utils.utils import load_yaml 16 | 17 | 18 | def predict(input_folder, target_folder, config_path, mode): 19 | config = load_yaml(config_path) 20 | do_not_process_small_area = config['do_not_process_small_area'] 21 | delete_zero_area = config['delete_zero_area'] 22 | intermediate_resize = config['resize'] 23 | 24 | path_to_vgg19_weights = config['path_to_vgg19_weights'] 25 | 26 | models = [] 27 | for params in config['models']: 28 | checkpoint = torch.load(params['checkpoint_path']) 29 | along_axis = params['along_axis'] 30 | resize = params['resize'] 31 | 32 | # checkpoint['config']['image_rec_loss']['loss_kwargs']['mode_3d'] = True 33 | checkpoint['config']['image_rec_loss']['loss_kwargs']['path_to_vgg19_weights'] = path_to_vgg19_weights 34 | checkpoint['config']['image_rec_loss']['loss_type'] = 'relative_perceptual_L1' 35 | checkpoint['config']['image_rec_loss']['loss_kwargs']['normalize_to_vgg_input'] = \ 36 | checkpoint['config']['image_rec_loss']['loss_kwargs']['normalize_vgg_input'] 37 | del checkpoint['config']['image_rec_loss']['loss_kwargs']['normalize_vgg_input'] 38 | 39 | model = DeepPerceptualAutoencoder.load_anomaly_detection_model(checkpoint) 40 | enc, dec, image_rec_loss, (stage, resolution, _, _, _) = model 41 | 42 | loss_type = checkpoint['config']['image_rec_loss']['loss_type'] 43 | loss_kwargs = checkpoint['config']['image_rec_loss']['loss_kwargs'] 44 | loss_kwargs['mode_3d'] = True 45 | image_rec_loss = PG_RECONSTRUCTION_LOSSES[ReconstructionLossType[loss_type]]( 46 | max_resolution=resolution, **loss_kwargs) 47 | image_rec_loss.set_stage_resolution(stage, resolution) 48 | image_rec_loss.cuda() 49 | 50 | model = enc, dec, image_rec_loss, _ 51 | 52 | models.append((model, along_axis, resize)) 53 | 54 | for filename in tqdm.tqdm(sorted(os.listdir(input_folder))): 55 | source_file = os.path.join(input_folder, filename) 56 | 57 | nimg = nib.load(source_file) 58 | image_3d = nimg.get_fdata() 59 | 60 | if intermediate_resize is None: 61 | sum_pred_3d = np.zeros_like(image_3d) 62 | count_pred_3d = np.zeros_like(image_3d) 63 | else: 64 | sum_pred_3d = np.zeros((intermediate_resize, intermediate_resize, intermediate_resize)) 65 | count_pred_3d = np.zeros((intermediate_resize, intermediate_resize, intermediate_resize)) 66 | 67 | for model, along_axis, resize in models: 68 | image_transform = transforms.Compose([ 69 | transforms.Resize((resize, resize)), 70 | transforms.ToTensor(), 71 | transforms.Normalize((0.5,), (0.5,)) 72 | ]) 73 | 74 | tr_image = [] 75 | 76 | zeros_before = 0 77 | zeros_after = 0 78 | find_not_zeros = False 79 | for i in range(image_3d.shape[along_axis]): 80 | slc = [slice(None)] * len(image_3d.shape) 81 | slc[along_axis] = i 82 | 83 | if do_not_process_small_area: 84 | pil_image = image_3d[slc].astype(np.float32) 85 | if pil_image.sum() > 4000: 86 | find_not_zeros = True 87 | pil_image = PIL.Image.fromarray(pil_image, mode='F') 88 | tr_image.append(image_transform(pil_image)) 89 | else: 90 | if find_not_zeros: 91 | zeros_after += 1 92 | else: 93 | zeros_before += 1 94 | else: 95 | pil_image = image_3d[slc].astype(np.float32) 96 | pil_image = PIL.Image.fromarray(pil_image, mode='F') 97 | tr_image.append(image_transform(pil_image)) 98 | 99 | tr_image = torch.stack(tr_image, dim=0).cuda() 100 | 101 | with torch.no_grad(): 102 | enc, dec, image_rec_loss, _ = model 103 | MAX_BATCH = 128 104 | # MAX_BATCH = 512 105 | 106 | if len(tr_image) < MAX_BATCH: 107 | rec_image = dec(enc(tr_image)).detach() 108 | else: 109 | rec_image = None 110 | for i in range(0, len(tr_image), MAX_BATCH): 111 | y = dec(enc(tr_image[i: i + MAX_BATCH])).detach() 112 | if rec_image is None: 113 | rec_image = y 114 | else: 115 | rec_image = torch.cat((rec_image, y), dim=0) 116 | # print(rec_image.shape) 117 | 118 | tr_image = tr_image.squeeze(1).unsqueeze(0).unsqueeze(0) 119 | rec_image = rec_image.squeeze(1).unsqueeze(0).unsqueeze(0) 120 | 121 | image_rec_loss.set_reduction('pixelwise') 122 | pred = image_rec_loss(tr_image, rec_image) 123 | pred = pred.squeeze(0).squeeze(0).unsqueeze(1) 124 | 125 | assert pred.size(1) == 1 126 | pred = pred[:, 0] 127 | 128 | if do_not_process_small_area: 129 | slice_shape = pred.shape[1:] 130 | pred = torch.cat(( 131 | torch.zeros((zeros_before, *slice_shape)).cuda(), 132 | pred, 133 | torch.zeros((zeros_after, *slice_shape)).cuda() 134 | )) 135 | 136 | if along_axis == 0: 137 | pass 138 | elif along_axis == 1: 139 | pred = pred.permute(1, 0, 2) 140 | elif along_axis == 2: 141 | pred = pred.permute(1, 2, 0) 142 | else: 143 | raise NotImplementedError() 144 | pred = pred.unsqueeze(0).unsqueeze(0).detach().cpu() 145 | 146 | if intermediate_resize is None: 147 | pred = torch.nn.functional.interpolate(pred, mode='trilinear', size=image_3d.shape) 148 | else: 149 | pred = torch.nn.functional.interpolate(pred, mode='trilinear', 150 | size=(intermediate_resize, intermediate_resize, intermediate_resize)) 151 | 152 | pred = pred.squeeze(0).squeeze(0) 153 | pred = pred.detach().numpy() 154 | 155 | if delete_zero_area: 156 | pred = pred * (image_3d > 0) 157 | 158 | sum_pred_3d += pred 159 | count_pred_3d += (pred > 0) 160 | 161 | pred_3d = sum_pred_3d / (count_pred_3d + (count_pred_3d == 0)) / 100 162 | 163 | if mode == 'sample': 164 | anomaly_score = pred_3d.max() 165 | 166 | with open(os.path.join(target_folder, filename + ".txt"), "w") as write_file: 167 | write_file.write(str(anomaly_score)) 168 | 169 | elif mode == 'pixel': 170 | if intermediate_resize is not None: 171 | transform = monai.transforms.Resize(image_3d.shape, mode='trilinear') 172 | pred_3d = transform(pred_3d[None]).squeeze(0) 173 | 174 | # transform = monai.transforms.Resize((128, 128, 128), mode='trilinear') 175 | # pred_3d = transform(pred_3d[None]).squeeze(0) 176 | 177 | final_nimg = nib.Nifti1Image(pred_3d.astype(np.float32), affine=nimg.affine) 178 | nib.save(final_nimg, os.path.join(target_folder, filename)) 179 | 180 | 181 | if __name__ == "__main__": 182 | parser = argparse.ArgumentParser() 183 | parser.add_argument("-i", "--input", required=True, type=str) 184 | parser.add_argument("-o", "--output", required=True, type=str) 185 | parser.add_argument("-c", "--config", required=True, type=str) 186 | parser.add_argument("-m", "--mode", type=str, default="pixel", help="can be either 'pixel' or 'sample'.", required=False, 187 | choices=['pixel', 'sample']) 188 | 189 | args = parser.parse_args() 190 | 191 | input_dir = args.input 192 | output_dir = args.output 193 | config = args.config 194 | mode = args.mode 195 | 196 | os.makedirs(output_dir, exist_ok=True) 197 | 198 | predict(input_dir, output_dir, config, mode) 199 | -------------------------------------------------------------------------------- /submission_data/scripts/run_pixel_abdom.sh: -------------------------------------------------------------------------------- 1 | python /workspace/predict_3d.py -i $1 -o $2 -c /workspace/configs/abdom_pixel.yaml -m 'pixel' -------------------------------------------------------------------------------- /submission_data/scripts/run_pixel_brain.sh: -------------------------------------------------------------------------------- 1 | python /workspace/predict_3d.py -i $1 -o $2 -c /workspace/configs/brain_pixel.yaml -m 'pixel' 2 | -------------------------------------------------------------------------------- /submission_data/scripts/run_sample_abdom.sh: -------------------------------------------------------------------------------- 1 | python /workspace/predict_3d.py -i $1 -o $2 -c /workspace/configs/abdom_sample.yaml -m 'sample' -------------------------------------------------------------------------------- /submission_data/scripts/run_sample_brain.sh: -------------------------------------------------------------------------------- 1 | python /workspace/predict_3d.py -i $1 -o $2 -c /workspace/configs/brain_sample.yaml -m 'sample' --------------------------------------------------------------------------------