├── .gitignore ├── README.md ├── code ├── data │ ├── dataset.py │ └── transforms.py ├── inference │ ├── main.py │ └── main_test.py ├── model_zoo │ └── models.py ├── params.py ├── training │ ├── lovasz.py │ ├── main.py │ ├── meter.py │ ├── mix.py │ ├── optim.py │ ├── predict.py │ └── train.py └── utils │ ├── logger.py │ ├── metrics.py │ ├── plots.py │ ├── rle.py │ └── torch.py ├── input └── annotation_v3 │ ├── 095bf7a1f.json │ ├── 095bf7a1f.tiff - default.qpdata │ ├── 1e2425f28.json │ ├── 1e2425f28.tiff - default.qpdata │ ├── 2f6ecfcdf.json │ ├── 2f6ecfcdf.tiff - VAN0014-LK-207-2-PAS_FFPE_pyr.qpdata │ ├── 4ef6695ce.json │ ├── 4ef6695ce.tiff - default.qpdata │ ├── 54f2eec69.json │ ├── 54f2eec69.tiff - default.qpdata │ ├── 8242609fa.json │ ├── 8242609fa.tiff - VAN0016-LK-208-2-PAS_FFPE_pyr.qpdata │ ├── PAS_I_4_S_1_ROI_3.json │ ├── PAS_I_4_S_1_ROI_3.qpdata │ ├── PAS_I_4_S_2_ROI_1.json │ ├── PAS_I_4_S_2_ROI_1.qpdata │ ├── PAS_I_4_S_2_ROI_3.json │ ├── PAS_I_4_S_2_ROI_3.qpdata │ ├── PAS_I_4_S_3_ROI_1.json │ ├── PAS_I_4_S_3_ROI_1.qpdata │ ├── PAS_I_4_S_3_ROI_2.json │ ├── PAS_I_4_S_3_ROI_2.qpdata │ ├── PAS_I_4_S_4_ROI_1.json │ ├── PAS_I_4_S_4_ROI_1.qpdata │ ├── PAS_I_4_S_4_ROI_3.json │ ├── PAS_I_4_S_4_ROI_3.qpdata │ ├── PAS_I_4_S_4_ROI_4.json │ ├── PAS_I_4_S_4_ROI_4.qpdata │ ├── PAS_I_4_S_5_ROI_1.json │ ├── PAS_I_4_S_5_ROI_1.qpdata │ ├── PAS_I_4_S_5_ROI_4.json │ ├── PAS_I_4_S_5_ROI_4.qpdata │ ├── PAS_I_4_S_6_ROI_3.json │ ├── PAS_I_4_S_6_ROI_3.qpdata │ ├── PAS_I_4_S_6_ROI_4.json │ ├── PAS_I_4_S_6_ROI_4.qpdata │ ├── PAS_I_4_S_6_ROI_6.json │ ├── PAS_I_4_S_6_ROI_6.qpdata │ ├── SAS_21883_001.json │ ├── SAS_21883_001.qpdata │ ├── SAS_21891_001.json │ ├── SAS_21891_001.qpdata │ ├── SAS_21896_001.json │ ├── SAS_21896_001.qpdata │ ├── SAS_21904_001.json │ ├── SAS_21904_001.qpdata │ ├── SAS_21908_001.json │ ├── SAS_21908_001.qpdata │ ├── SAS_21915_001.json │ ├── SAS_21915_001.qpdata │ ├── SAS_21924_001.json │ ├── SAS_21924_001.qpdata │ ├── SAS_21930_001.json │ ├── SAS_21930_001.qpdata │ ├── SAS_21937_001.json │ ├── SAS_21937_001.qpdata │ ├── SAS_21942_001.json │ ├── SAS_21942_001.qpdata │ ├── SESCAM_102.json │ ├── SESCAM_102.qpdata │ ├── SESCAM_1_0.json │ ├── SESCAM_1_0.qpdata │ ├── SESCAM_2_0.json │ ├── SESCAM_2_0.qpdata │ ├── SESCAM_3_0.json │ ├── SESCAM_3_0.qpdata │ ├── SESCAM_4_0.json │ ├── SESCAM_4_0.qpdata │ ├── SESCAM_5_0.json │ ├── SESCAM_5_0.qpdata │ ├── SESCAM_6_0.json │ ├── SESCAM_6_0.qpdata │ ├── SESCAM_7_0.json │ ├── SESCAM_7_0.qpdata │ ├── SESCAM_8_0.json │ ├── SESCAM_8_0.qpdata │ ├── SESCAM_9_0.json │ ├── SESCAM_9_0.qpdata │ ├── VUHSK_1272.json │ ├── VUHSK_1272.qpdata │ ├── VUHSK_1352.json │ ├── VUHSK_1352.qpdata │ ├── VUHSK_1432.json │ ├── VUHSK_1432.qpdata │ ├── VUHSK_1502.json │ ├── VUHSK_1502.qpdata │ ├── VUHSK_1622.json │ ├── VUHSK_1622.qpdata │ ├── VUHSK_1702.json │ ├── VUHSK_1702.qpdata │ ├── VUHSK_1762.json │ ├── VUHSK_1762.qpdata │ ├── VUHSK_1832.json │ ├── VUHSK_1832.qpdata │ ├── VUHSK_1912.json │ ├── VUHSK_1912.qpdata │ ├── VUHSK_1992.json │ ├── VUHSK_1992.qpdata │ ├── VUHSK_2072.json │ ├── VUHSK_2072.qpdata │ ├── aaa6a05cc.json │ ├── aaa6a05cc.tiff - VAN0003-LK-33-2-PAS_FFPE_pyr.qpdata │ ├── afa5e8098.json │ ├── afa5e8098.qpdata │ ├── b2dc8411c.json │ ├── b2dc8411c.tiff - VAN0009-LK-106-2-PAS_FFPE_pyr.qpdata │ ├── b9a3865fc.json │ ├── b9a3865fc.tiff - VAN0005-RK-8-2-PAS_FFPE_pyr.qpdata │ ├── c68fe75ea.json │ ├── c68fe75ea.tiff - default.qpdata │ ├── cb2d976f4.json │ ├── cb2d976f4.tiff - VAN0006-LK-7-2-PAS_FFPE_pyr.qpdata │ ├── e79de561c.json │ └── e79de561c.tiff - default.qpdata ├── logs └── logs.txt ├── notebooks ├── Image downscaling.ipynb ├── Inference Test.ipynb ├── Inference.ipynb ├── Json to Mask.ipynb ├── Results analysis.ipynb ├── Training.ipynb └── Visualize Predictions.ipynb ├── output └── submission_final.csv └── overview.png /.gitignore: -------------------------------------------------------------------------------- 1 | .cache/ 2 | ../.history/ 3 | .history/ 4 | ./data/ 5 | input_old/ 6 | external_code/ 7 | output/ 8 | input/ 9 | models/ 10 | logs/ 11 | .vscode 12 | **/__pycache__/ 13 | .ipynb_checkpoints/ 14 | *.pt 15 | *~ 16 | *.npy 17 | ./dataset/ 18 | /dataset/ 19 | # Byte-compiled / optimized / DLL files 20 | __pycache__/ 21 | *.py[cod] 22 | *$py.class 23 | 24 | # C extensions 25 | *.so 26 | 27 | # Distribution / packaging 28 | .Python 29 | build/ 30 | develop-eggs/ 31 | dist/ 32 | downloads/ 33 | eggs/ 34 | .eggs/ 35 | lib/ 36 | lib64/ 37 | parts/ 38 | sdist/ 39 | var/ 40 | wheels/ 41 | pip-wheel-metadata/ 42 | share/python-wheels/ 43 | *.egg-info/ 44 | .installed.cfg 45 | *.egg 46 | MANIFEST 47 | 48 | # PyInstaller 49 | # Usually these files are written by a python script from a template 50 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 51 | *.manifest 52 | *.spec 53 | 54 | # Installer logs 55 | pip-log.txt 56 | pip-delete-this-directory.txt 57 | 58 | # Unit test / coverage reports 59 | htmlcov/ 60 | .tox/ 61 | .nox/ 62 | .coverage 63 | .coverage.* 64 | .cache 65 | nosetests.xml 66 | coverage.xml 67 | *.cover 68 | *.py,cover 69 | .hypothesis/ 70 | .pytest_cache/ 71 | 72 | # Translations 73 | *.mo 74 | *.pot 75 | 76 | # Django stuff: 77 | *.log 78 | local_settings.py 79 | db.sqlite3 80 | db.sqlite3-journal 81 | 82 | # Flask stuff: 83 | instance/ 84 | .webassets-cache 85 | 86 | # Scrapy stuff: 87 | .scrapy 88 | 89 | # Sphinx documentation 90 | docs/_build/ 91 | 92 | # PyBuilder 93 | target/ 94 | 95 | # Jupyter Notebook 96 | .ipynb_checkpoints 97 | 98 | # IPython 99 | profile_default/ 100 | ipython_config.py 101 | 102 | # pyenv 103 | .python-version 104 | 105 | # pipenv 106 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 107 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 108 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 109 | # install all needed dependencies. 110 | #Pipfile.lock 111 | 112 | # celery beat schedule file 113 | celerybeat-schedule 114 | 115 | # SageMath parsed files 116 | *.sage.py 117 | 118 | # Environments 119 | .env 120 | .venv 121 | env/ 122 | venv/ 123 | ENV/ 124 | env.bak/ 125 | venv.bak/ 126 | 127 | # Spyder project settings 128 | .spyderproject 129 | .spyproject 130 | 131 | # Rope project settings 132 | .ropeproject 133 | 134 | # mkdocs documentation 135 | /site 136 | 137 | # mypy 138 | .mypy_cache/ 139 | .dmypy.json 140 | dmypy.json 141 | 142 | # Pyre type checker 143 | .pyre/ 144 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # 4th place solution to the HuBMAP - Hacking the Kidney Kaggle Competition 2 | #### Authors : [@theoviel](https://github.com/theoviel), [@optimox](https://github.com/Optimox), [@iafoss](https://github.com/iafoss) 3 | 4 | #### Status : 5 | - Document code : Done 6 | - Clean notebooks : Done 7 | - Make ReadMe : Almost Done 8 | - Add the code for individual differences : WIP 9 | - Clean code for glomeruli level metrics : WIP 10 | 11 | 12 | ## Introduction 13 | 14 | Our approach is built on understanding the challenges behind the data. Our main contribution is the consideration of the link between healthy glomeruli and unhealthy ones by predicting both into two different classes. We incorporate several external datasets in our pipeline and manually annotated the two classes. 15 | Our model architecture is relatively simple, and the pipeline can be easily transferred to other tasks. 16 | 17 | You can read more about our solution [here](https://www.kaggle.com/theoviel/hubmap-final-methodology-submission/). A more concise write-up is also available [here](https://www.kaggle.com/c/hubmap-kidney-segmentation/discussion/238024). 18 | 19 | The `main` branch contains a cleaned and simplified version of our pipeline, that is enough to reproduce our solution. 20 | 21 | 22 | ## Overview 23 | 24 | ![](overview.png) 25 | 26 | Our pipeline achieves highly competitive performance on the task, because of the following aspects : 27 | - It allows for fast experimenting and results interpretation: 28 | - Pre-computation of resized images and masks of the desired size for short dataset preparation times 29 | - Uses half-precision for faster training 30 | - Interactive visualization of predicted masks in notebooks 31 | - Glomeruli level metrics and confidence to understand the flaws of the model [TODO : Add code] 32 | - It uses intelligent tiling, compatible with every segmentation task on big images. 33 | - We sample only interesting regions, using mask and tissue information 34 | - Tiling is made on the fly, on previously resized images for efficiency. 35 | - Augmentations are made on slightly bigger tiles to get rid of side effects 36 | - It is adapted to the specificity of the problem, to better tackle its complexity : 37 | - We added another class to the problem : unhealthy glomeruli 38 | - We manually annotated external data, as well as missing masks in the training data using model feedback. 39 | - Aggressive augmentations help the model generalize well to quality issues in the test data 40 | 41 | 42 | For the following reasons, our code is convenient to use, especially for researchers : 43 | - It is only based on commonly used and reliable libraries: 44 | - [PyTorch](https://pytorch.org/) 45 | - [Albumentations](https://albumentations.ai/) for augmentations 46 | - [Segmentations Models PyTorch](https://github.com/qubvel/segmentation_models.pytorch) for modeling 47 | - It is easily re-usable: 48 | - It is documented and formatted 49 | - It includes best-practices from top Kagglers, who also have experience in research and in the industry 50 | - It is (relatively) low level, which means one can independently use each brick of our pipeline in their code 51 | - We applied our pipeline to keratinocytes segmentation in LC-OCT and quickly achieved good results. See [here](https://www.kaggle.com/theoviel/hubmap-final-methodology-submission/) for more information. 52 | 53 | 54 | ## How to use the repository 55 | 56 | - Clone the repository 57 | - [TODO : Requirements] 58 | 59 | - Download the data : 60 | - Put the competition data from [Kaggle](https://www.kaggle.com/c/hubmap-kidney-segmentation/data) in the `input` folder 61 | - Put the extra `Dataset A` images from [data.mendeley.com](https://data.mendeley.com/datasets/k7nvtgn2x6/3) in the `input/extra/` folder. 62 | - Put the [two additional images from the HubMAP portal](https://www.kaggle.com/iafoss/hubmap-ex) in the `input/test/` folder. 63 | - You can download pseudo labels [on Kaggle](https://www.kaggle.com/theoviel/hubmap-pl/) 64 | - We also provide our trained model weights [on Kaggle](https://www.kaggle.com/theoviel/hubmap-cp/) 65 | 66 | - Prepare the data : 67 | - Extract the hand labels using `notebooks/Json to Mask.ipynb` : 68 | - Use the `ADD_FC` and `ONLY_FC` parameters to generate labels for the healthy and unhealthy classes. 69 | - Use the `SAVE_TIFF `parameter to save the external data as tiff files of half resolution. 70 | - Use the `PLOT` parameter to visualize the masks. 71 | - Use the `SAVE` parameter to save the masks as rle. 72 | - Create lower resolution masks and images using `notebooks/Image downscaling.ipynb` : 73 | - Use the `FACTOR` parameter to specify the downscaling factor. We recommend generating data of downscaling 2 and 4. 74 | - For training data, we save extra time by also computing downscaling rles. Use the `NAME` parameter to specify which rle to downscale. Make sure to run the script for all the dataframes you want to use. 75 | - It is only require to save the downscaled images once, use the `SAVE_IMG` parameters to this extent. 76 | - The process is a bit time-consuming, but only requires to be done once. This allows for faster experimenting : loading and downscaling the images when building the dataset takes a while, so we don't want to do it every time. 77 | 78 | - Train models using `notebooks/Training.ipynb` 79 | - Use the `DEBUG` parameter to launch the code in debug mode (single fold, no logging) 80 | - Specify the training parameters in the `Config` class. Feel free to experiment with the parameters, here are the main ones : 81 | - `tile_size` : Tile size 82 | - `reduce_factor` : Downscaling factor 83 | - `on_spot_sampling` : Probability to accept a random tile with in the dataset 84 | - `overlap_factor` : Tile overlapping during inference 85 | - `selected_folds` : Folds to run computations for. 86 | - `encoder` : Encoder as defined in [Segmentation Models PyTorch](https://github.com/qubvel/segmentation_models.pytorch) 87 | - `decoder` : Decoders from [Segmentation Models PyTorch](https://github.com/qubvel/segmentation_models.pytorch) 88 | - `num_classes` : Number of classes. Keep it at 2 to use the healthy and unhealthy classes 89 | - `loss` : Loss function. We use the BCE but the lovasz is also interesting 90 | - `optimizer` : Optimizer name 91 | - `batch_size` : Training batch size, adapt the `BATCH_SIZES` dictionary to your gpu 92 | - `val_bs` : Validation batch size 93 | - `epochs` : Number of training epochs 94 | - `iter_per_epoch` : Number of tiles to use per epoch 95 | - `lr` : Learning rate. Will be decayed linearly 96 | - `warmup_prop` : Proportion of steps to use for learning rate warmup 97 | - `mix_proba` : Probability to apply MixUp with 98 | - `mix_alpha` : Alpha parameter for MixUp 99 | - `use_pl`: Probability to sample a tile from the pseudo-labeled images 100 | - `use_external`: Probability to sample a tile from the external images 101 | - `pl_path`: Path to pseudo labels generated by `notebooks/Inference_test.ipynb` 102 | - `extra_path` : Path to extra labels generated by `notebooks/Json to Mask.ipynb` (should not be changed) 103 | - `rle_path` : Path to train labels downscaled by `notebooks/Image downscaling.ipynb` (should not be changed) 104 | 105 | - Validate models with `notebooks/Inference.ipynb` : 106 | - Use the `log_folder` parameter to specify the experiment. 107 | - Use the `use_tta` parameter to specify whether to use test time augmentations. 108 | - Use the `save` parameter to indicate whether to save predictions. 109 | - Use the `save_all_tta` parameter to save predictions for each tta (takes a lot of disk space). 110 | - Use the `global_threshold` parameter to tweak the threshold. 111 | 112 | - Generate pseudo-labels with `notebooks/Inference Test.ipynb` : 113 | - Use the `log_folder` parameter to specify the experiment. 114 | - Use the `use_tta` parameter to speciy whether to use test time augmentations. 115 | - Use the `save` parameter to indicate whether to save predictions. 116 | 117 | - Visualize predictions : `notebooks/Visualize Predictions.ipynb` 118 | - Works to visualize predictions from the two previous notebooks, but also from a submission file. 119 | - Specify the `name`, `log_folder` and `sub` parameters according to what you want to plot. 120 | 121 | 122 | ## Code structure 123 | 124 | If you wish to dive into the code, the repository naming should be straight-forward. Each function is documented. 125 | The structure is the following : 126 | 127 | ``` 128 | code 129 | ├── data 130 | │ ├── dataset.py # Torch datasets 131 | │ └── transforms.py # Augmentations 132 | ├── inference 133 | │ ├── main_test.py # Inference for the test data 134 | │ └── main.py # Inference for the train data 135 | ├── model_zoo 136 | │ └── models.py # Model definition 137 | ├── training 138 | │ ├── lovasz.py # Lovasz loss implementation 139 | │ ├── main.py # k-fold and training main functions 140 | │ ├── meter.py # Meter for evaluation during training 141 | │ ├── mix.py # CutMix and MixUp 142 | │ ├── optim.py # Losses and optimizer handling 143 | │ ├── predict.py # Functions for prediction 144 | │ └── train.py # Fitting a model 145 | ├── utils 146 | │ ├── logger.py # Logging utils 147 | │ ├── metrics.py # Metrics for the competition 148 | │ ├── plots.py # Plotting utils 149 | │ ├── rle.py # RLE encoding utils 150 | │ └── torch.py # Torch utils 151 | └── params.py # Main parameters 152 | ``` 153 | -------------------------------------------------------------------------------- /code/data/transforms.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | import albumentations as albu 4 | from albumentations.pytorch import ToTensorV2 5 | from albumentations.core.transforms_interface import ImageOnlyTransform 6 | from params import MEAN, STD 7 | 8 | 9 | def disk(radius, alias_blur=0.1, dtype=np.float32): 10 | """ 11 | From https://github.com/hendrycks/robustness/blob/master/ImageNet-C/create_c/make_imagenet_c.py 12 | and https://github.com/albumentations-team/albumentations/issues/477 13 | """ 14 | if radius <= 8: 15 | L = np.arange(-8, 8 + 1) 16 | ksize = (3, 3) 17 | else: 18 | L = np.arange(-radius, radius + 1) 19 | ksize = (5, 5) 20 | X, Y = np.meshgrid(L, L) 21 | aliased_disk = np.array((X ** 2 + Y ** 2) <= radius ** 2, dtype=dtype) 22 | aliased_disk /= np.sum(aliased_disk) 23 | 24 | # supersample disk to antialias 25 | return cv2.GaussianBlur(aliased_disk, ksize=ksize, sigmaX=alias_blur) 26 | 27 | 28 | class DefocusBlur(ImageOnlyTransform): 29 | """ 30 | From https://github.com/hendrycks/robustness/blob/master/ImageNet-C/create_c/make_imagenet_c.py 31 | and https://github.com/albumentations-team/albumentations/issues/477 32 | """ 33 | def __init__( 34 | self, 35 | severity=1, 36 | always_apply=False, 37 | p=1.0, 38 | ): 39 | super(DefocusBlur, self).__init__(always_apply, p) 40 | self.severity = severity 41 | self.radius, self.blur = [(3, 0.1), (4, 0.5), (6, 0.5), (8, 0.5), (10, 0.5)][ 42 | self.severity - 1 43 | ] 44 | 45 | def apply(self, image, **params): 46 | image = np.array(image) / 255.0 47 | kernel = disk(radius=self.radius, alias_blur=self.blur) 48 | channels = [] 49 | for d in range(3): 50 | channels.append(cv2.filter2D(image[:, :, d], -1, kernel)) 51 | channels = np.array(channels).transpose((1, 2, 0)) 52 | return np.clip(channels, 0, 1) * 255 53 | 54 | def get_transform_init_args_names(self): 55 | return "severty" 56 | 57 | 58 | def blur_transforms(p=0.5, blur_limit=5, gaussian_limit=(5, 7), severity=1): 59 | """ 60 | Applies MotionBlur or GaussianBlur random with a probability p. 61 | 62 | Args: 63 | p (float, optional): probability. Defaults to 0.5. 64 | blur_limit (int, optional): Blur intensity limit. Defaults to 5. 65 | 66 | Returns: 67 | albumentation transforms: transforms. 68 | """ 69 | return albu.OneOf( 70 | [ 71 | DefocusBlur(severity=severity, always_apply=True), 72 | albu.MotionBlur(blur_limit=blur_limit, always_apply=True), 73 | albu.GaussianBlur(blur_limit=gaussian_limit, always_apply=True), 74 | ], 75 | p=p, 76 | ) 77 | 78 | 79 | def noise_transforms(p=0.5): 80 | """ 81 | Applies GaussNoise or RandomFog random with a probability p. 82 | 83 | Args: 84 | p (float, optional): probability. Defaults to 0.5. 85 | 86 | Returns: 87 | albumentation transforms: transforms. 88 | """ 89 | return albu.OneOf( 90 | [ 91 | albu.GaussNoise(var_limit=(1.0, 50.0), always_apply=True), 92 | albu.RandomFog(fog_coef_lower=0.01, fog_coef_upper=0.25, always_apply=True), 93 | ], 94 | p=p, 95 | ) 96 | 97 | 98 | def color_transforms(p=0.5): 99 | """ 100 | Applies RandomGamma or RandomBrightnessContrast random with a probability p. 101 | 102 | Args: 103 | p (float, optional): probability. Defaults to 0.5. 104 | 105 | Returns: 106 | albumentation transforms: transforms. 107 | """ 108 | return albu.OneOf( 109 | [ 110 | albu.Compose( 111 | [ 112 | albu.RandomGamma(gamma_limit=(80, 120), p=1), 113 | albu.RandomBrightnessContrast( 114 | brightness_limit=0.1, # 0.3 115 | contrast_limit=0.1, # 0.3 116 | p=1, 117 | ), 118 | ] 119 | ), 120 | albu.RGBShift( 121 | r_shift_limit=30, 122 | g_shift_limit=0, 123 | b_shift_limit=30, 124 | p=1, 125 | ), 126 | albu.HueSaturationValue( 127 | hue_shift_limit=30, 128 | sat_shift_limit=30, 129 | val_shift_limit=30, 130 | p=1, 131 | ), 132 | albu.ColorJitter( 133 | brightness=0.3, # 0.3 134 | contrast=0.3, # 0.3 135 | saturation=0.3, 136 | hue=0.05, 137 | p=1, 138 | ), 139 | ], 140 | p=p, 141 | ) 142 | 143 | 144 | def deformation_transform(p=0.5): 145 | """ 146 | Applies ElasticTransform, GridDistortion or OpticalDistortion with a probability p. 147 | 148 | Args: 149 | p (float, optional): probability. Defaults to 0.5. 150 | 151 | Returns: 152 | albumentation transforms: transforms. 153 | """ 154 | return albu.OneOf( 155 | [ 156 | albu.ElasticTransform( 157 | alpha=1, 158 | sigma=25, 159 | alpha_affine=25, 160 | border_mode=cv2.BORDER_CONSTANT, 161 | value=0, 162 | always_apply=True, 163 | ), 164 | albu.GridDistortion(always_apply=True), 165 | albu.OpticalDistortion(distort_limit=1, shift_limit=0.2, always_apply=True), 166 | ], 167 | p=p, 168 | ) 169 | 170 | 171 | def center_crop(size): 172 | """ 173 | Applies a padded center crop. 174 | 175 | Args: 176 | size (int): Crop size. 177 | 178 | Returns: 179 | albumentation transforms: transforms. 180 | """ 181 | if size is None: # disable cropping 182 | p = 0 183 | else: # always crop 184 | p = 1 185 | 186 | return albu.Compose( 187 | [ 188 | albu.PadIfNeeded(size, size, p=p, border_mode=cv2.BORDER_CONSTANT), 189 | albu.CenterCrop(size, size, p=p), 190 | ], 191 | p=1, 192 | ) 193 | 194 | 195 | def HE_preprocess(augment=True, visualize=False, mean=MEAN, std=STD, size=None): 196 | """ 197 | Returns transformations for the H&E images. 198 | 199 | Args: 200 | augment (bool, optional): Whether to apply augmentations. Defaults to True. 201 | visualize (bool, optional): Whether to use transforms for visualization. Defaults to False. 202 | mean (np array, optional): Mean for normalization. Defaults to MEAN. 203 | std (np array, optional): Standard deviation for normalization. Defaults to STD. 204 | 205 | Returns: 206 | albumentation transforms: transforms. 207 | """ 208 | if visualize: 209 | normalizer = albu.Compose( 210 | [ 211 | center_crop(size), 212 | albu.Normalize(mean=[0, 0, 0], std=[1, 1, 1]), 213 | ToTensorV2(), 214 | ], 215 | p=1, 216 | ) 217 | else: 218 | normalizer = albu.Compose( 219 | [center_crop(size), albu.Normalize(mean=mean, std=std), ToTensorV2()], p=1 220 | ) 221 | 222 | if augment: 223 | return albu.Compose( 224 | [ 225 | albu.VerticalFlip(p=0.5), 226 | albu.HorizontalFlip(p=0.5), 227 | albu.ShiftScaleRotate( 228 | scale_limit=0.1, # 0 229 | shift_limit=0.1, # 0.05 230 | rotate_limit=90, 231 | p=0.5, 232 | ), 233 | deformation_transform(p=0.5), 234 | color_transforms(p=0.5), 235 | blur_transforms(p=0.5), 236 | normalizer, 237 | ] 238 | ) 239 | else: 240 | return normalizer 241 | 242 | 243 | def HE_preprocess_test(augment=False, visualize=False, mean=MEAN, std=STD): 244 | """ 245 | Returns transformations for the H&E images. 246 | 247 | Args: 248 | augment (bool, optional): Whether to apply augmentations. Defaults to True. 249 | visualize (bool, optional): Whether to use transforms for visualization. Defaults to False. 250 | mean (np array, optional): Mean for normalization. Defaults to MEAN. 251 | std (np array, optional): Standard deviation for normalization. Defaults to STD. 252 | 253 | Returns: 254 | albumentation transforms: transforms. 255 | """ 256 | if visualize: 257 | normalizer = albu.Compose( 258 | [albu.Normalize(mean=[0, 0, 0], std=[1, 1, 1]), ToTensorV2()], p=1 259 | ) 260 | else: 261 | normalizer = albu.Compose( 262 | [albu.Normalize(mean=mean, std=std), ToTensorV2()], p=1 263 | ) 264 | 265 | if augment: 266 | raise NotImplementedError 267 | 268 | return normalizer 269 | -------------------------------------------------------------------------------- /code/inference/main.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | import pandas as pd 4 | 5 | from training.predict import ( 6 | predict_entire_mask_downscaled, 7 | predict_entire_mask, 8 | threshold_resize_torch, 9 | predict_entire_mask_downscaled_tta 10 | ) 11 | 12 | from model_zoo.models import define_model 13 | 14 | from data.dataset import InferenceDataset 15 | from data.transforms import HE_preprocess_test 16 | 17 | from utils.rle import enc2mask 18 | from utils.torch import load_model_weights 19 | from utils.metrics import dice_scores_img, tweak_threshold 20 | 21 | from params import TIFF_PATH, DATA_PATH 22 | 23 | 24 | def validate_inf( 25 | model, 26 | config, 27 | val_images, 28 | log_folder=None, 29 | use_full_size=True, 30 | global_threshold=None, 31 | use_tta=False, 32 | save=False, 33 | save_all_tta=False, 34 | ): 35 | """ 36 | Performs inference with a model on a list of train images. 37 | 38 | Args: 39 | model (torch model): Segmentation model. 40 | config (Config): Parameters. 41 | val_images (list of strings): Image names. 42 | fold (int, optional): Fold index. Defaults to 0. 43 | log_folder (str or None, optional): Folder to save predictions to. Defaults to None. 44 | use_full_size (bool, optional): Whether to use full resolution images. Defaults to True. 45 | global_threshold (float, optional): Threshold for probabilities. Defaults to None. 46 | use_tta (bool, optional): Whether to use tta. Defaults to False. 47 | save (bool, optional): Whether to save predictions. Defaults to False. 48 | save_all_tta (bool, optional): Whether to save predictions for all tta. Defaults to False. 49 | """ 50 | df_info = pd.read_csv(DATA_PATH + "HuBMAP-20-dataset_information.csv") 51 | 52 | if use_full_size: 53 | root = TIFF_PATH 54 | rle_path = DATA_PATH + "train.csv" 55 | reduce_factor = config.reduce_factor 56 | else: 57 | root = DATA_PATH + f"train_{config.reduce_factor}/" 58 | rle_path = DATA_PATH + f"train_{config.reduce_factor}.csv" 59 | reduce_factor = 1 60 | 61 | rles = pd.read_csv(rle_path) 62 | rles_full = pd.read_csv(DATA_PATH + "train.csv") 63 | 64 | print("\n -> Validating \n") 65 | scores = [] 66 | 67 | for img in val_images: 68 | 69 | predict_dataset = InferenceDataset( 70 | f"{root}/{img}.tiff", 71 | rle=rles[rles["id"] == img]["encoding"], 72 | overlap_factor=config.overlap_factor, 73 | reduce_factor=reduce_factor, 74 | tile_size=config.tile_size, 75 | transforms=HE_preprocess_test(augment=False, visualize=False), 76 | ) 77 | 78 | if save_all_tta: 79 | global_pred = predict_entire_mask_downscaled_tta( 80 | predict_dataset, model, batch_size=config.val_bs 81 | ) 82 | np.save( 83 | log_folder + f"pred_{img}.npy", 84 | global_pred.cpu().numpy() 85 | ) 86 | 87 | global_pred = global_pred.mean(0) 88 | 89 | else: 90 | if use_full_size: 91 | global_pred = predict_entire_mask( 92 | predict_dataset, model, batch_size=config.val_bs, tta=use_tta 93 | ) 94 | threshold, score = 0.4, 0 95 | 96 | else: 97 | global_pred = predict_entire_mask_downscaled( 98 | predict_dataset, model, batch_size=config.val_bs, tta=use_tta 99 | ) 100 | 101 | threshold, score = tweak_threshold( 102 | mask=torch.from_numpy(predict_dataset.mask).cuda(), pred=global_pred 103 | ) 104 | print( 105 | f" - Scored {score :.4f} for downscaled" 106 | f"image {img} with threshold {threshold:.2f}" 107 | ) 108 | 109 | shape = df_info[df_info.image_file == img + ".tiff"][ 110 | ["width_pixels", "height_pixels"] 111 | ].values.astype(int)[0] 112 | mask_truth = enc2mask(rles_full[rles_full["id"] == img]["encoding"], shape) 113 | 114 | global_threshold = ( 115 | global_threshold if global_threshold is not None else threshold 116 | ) 117 | 118 | if save and not save_all_tta: 119 | np.save( 120 | log_folder + f"pred_{img}.npy", 121 | global_pred.cpu().numpy() 122 | ) 123 | 124 | if not use_full_size: 125 | global_pred = threshold_resize_torch( 126 | global_pred, shape, threshold=global_threshold 127 | ) 128 | else: 129 | global_pred = (global_pred > global_threshold).cpu().numpy() 130 | 131 | score = dice_scores_img(global_pred, mask_truth) 132 | scores.append(score) 133 | 134 | print( 135 | f" - Scored {score :.4f} for image {img} with threshold {global_threshold:.2f}\n" 136 | ) 137 | 138 | return scores 139 | 140 | 141 | def k_fold_inf( 142 | config, 143 | df, 144 | log_folder=None, 145 | use_full_size=True, 146 | global_threshold=None, 147 | use_tta=False, 148 | save=False, 149 | save_all_tta=False, 150 | ): 151 | """ 152 | Performs a k-fold inference on the train data. 153 | 154 | Args: 155 | config (Config): Parameters. 156 | df (pandas dataframe): Train metadata. Contains image names and rles. 157 | log_folder (None or str, optional): Folder to load the weights from. Defaults to None. 158 | use_full_size (bool, optional): Whether to use full resolution images. Defaults to True. 159 | global_threshold (float, optional): Threshold for probabilities. Defaults to None. 160 | use_tta (bool, optional): Whether to use tta. Defaults to False. 161 | save_all_tta (bool, optional): Whether to save predictions for all tta. Defaults to False. 162 | """ 163 | folds = df[config.cv_column].unique() 164 | scores = [] 165 | 166 | for i, fold in enumerate(folds): 167 | if i in config.selected_folds: 168 | print(f"\n------------- Fold {i + 1} / {len(folds)} -------------\n") 169 | df_val = df[df[config.cv_column] == fold].reset_index() 170 | 171 | val_images = df_val["tile_name"].apply(lambda x: x.split("_")[0]).unique() 172 | 173 | model = define_model( 174 | config.decoder, 175 | config.encoder, 176 | num_classes=config.num_classes, 177 | encoder_weights=config.encoder_weights, 178 | ).to(config.device) 179 | model.zero_grad() 180 | model.eval() 181 | 182 | load_model_weights( 183 | model, log_folder + f"{config.decoder}_{config.encoder}_{i}.pt" 184 | ) 185 | 186 | scores += validate_inf( 187 | model, 188 | config, 189 | val_images, 190 | log_folder=log_folder, 191 | use_full_size=use_full_size, 192 | global_threshold=global_threshold, 193 | use_tta=use_tta, 194 | save=save, 195 | save_all_tta=save_all_tta, 196 | ) 197 | 198 | return scores 199 | -------------------------------------------------------------------------------- /code/inference/main_test.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pandas as pd 3 | 4 | from training.predict import ( 5 | predict_entire_mask_downscaled, 6 | predict_entire_mask, 7 | threshold_resize_torch, 8 | ) 9 | 10 | from model_zoo.models import define_model 11 | 12 | from data.dataset import InferenceDataset 13 | from data.transforms import HE_preprocess_test 14 | from utils.torch import load_model_weights 15 | from params import TIFF_PATH_TEST, DATA_PATH, EXTRA_IMGS_SHAPES 16 | 17 | 18 | def validate_inf_test( 19 | model, 20 | config, 21 | images, 22 | fold=0, 23 | log_folder=None, 24 | use_full_size=True, 25 | global_threshold=None, 26 | use_tta=False, 27 | save=False 28 | ): 29 | """ 30 | Performs inference with a model on a list of test images. 31 | 32 | Args: 33 | model (torch model): Segmentation model. 34 | config (Config): Parameters. 35 | images (list of strings): Image names. 36 | fold (int, optional): Fold index. Defaults to 0. 37 | log_folder (str or None, optional): Folder to save predictions to. Defaults to None. 38 | use_full_size (bool, optional): Whether to use full resolution images. Defaults to True. 39 | global_threshold (float, optional): Threshold for probabilities. Defaults to None. 40 | use_tta (bool, optional): Whether to use tta. Defaults to False. 41 | save (bool, optional): Whether to save predictions. Defaults to False. 42 | """ 43 | df_info = pd.read_csv(DATA_PATH + "HuBMAP-20-dataset_information.csv") 44 | 45 | if use_full_size: 46 | root = TIFF_PATH_TEST 47 | reduce_factor = config.reduce_factor 48 | else: 49 | root = DATA_PATH + f"test_{config.reduce_factor}/" 50 | reduce_factor = 1 51 | 52 | for img in images: 53 | print(f" - Image {img}") 54 | 55 | predict_dataset = InferenceDataset( 56 | f"{root}/{img}.tiff", 57 | rle=None, 58 | overlap_factor=config.overlap_factor, 59 | reduce_factor=reduce_factor, 60 | tile_size=config.tile_size, 61 | transforms=HE_preprocess_test(augment=False, visualize=False), 62 | ) 63 | 64 | if use_full_size: 65 | global_pred = predict_entire_mask( 66 | predict_dataset, model, batch_size=config.val_bs, tta=use_tta 67 | ) 68 | 69 | else: 70 | global_pred = predict_entire_mask_downscaled( 71 | predict_dataset, model, batch_size=config.val_bs, tta=use_tta 72 | ) 73 | 74 | if save: 75 | np.save( 76 | log_folder + f"pred_{img}_{fold}.npy", 77 | global_pred.cpu().numpy() 78 | ) 79 | 80 | if not use_full_size: 81 | try: 82 | shape = df_info[df_info.image_file == img + ".tiff"][ 83 | ["width_pixels", "height_pixels"] 84 | ].values.astype(int)[0] 85 | except IndexError: 86 | shape = EXTRA_IMGS_SHAPES[img] 87 | 88 | global_pred = threshold_resize_torch( 89 | global_pred, shape, threshold=global_threshold 90 | ) 91 | else: 92 | global_pred = (global_pred > global_threshold).cpu().numpy() 93 | 94 | 95 | def k_fold_inf_test( 96 | config, 97 | images, 98 | log_folder=None, 99 | use_full_size=True, 100 | global_threshold=None, 101 | use_tta=False, 102 | save=False, 103 | ): 104 | """ 105 | Performs a k-fold inference on the test data. 106 | 107 | Args: 108 | config (Config): Parameters. 109 | images (list of strings): Image names. 110 | log_folder (None or str, optional): Folder to load the weights from. Defaults to None. 111 | use_full_size (bool, optional): Whether to use full resolution images. Defaults to True. 112 | global_threshold (float, optional): Threshold for probabilities. Defaults to None. 113 | use_tta (bool, optional): Whether to use tta. Defaults to False. 114 | save (bool, optional): Whether to save predictions. Defaults to False. 115 | """ 116 | for fold in range(5): 117 | if fold in config.selected_folds: 118 | print(f"\n------------- Fold {fold + 1} / {5} -------------\n") 119 | 120 | model = define_model( 121 | config.decoder, 122 | config.encoder, 123 | num_classes=config.num_classes, 124 | encoder_weights=config.encoder_weights, 125 | ).to(config.device) 126 | model.zero_grad() 127 | model.eval() 128 | 129 | load_model_weights( 130 | model, log_folder + f"{config.decoder}_{config.encoder}_{fold}.pt" 131 | ) 132 | 133 | validate_inf_test( 134 | model, 135 | config, 136 | images, 137 | fold=fold, 138 | log_folder=log_folder, 139 | use_full_size=use_full_size, 140 | global_threshold=global_threshold, 141 | use_tta=use_tta, 142 | save=save, 143 | ) 144 | -------------------------------------------------------------------------------- /code/model_zoo/models.py: -------------------------------------------------------------------------------- 1 | import segmentation_models_pytorch 2 | from segmentation_models_pytorch.encoders import encoders 3 | 4 | 5 | DECODERS = [ 6 | "Unet", 7 | "Linknet", 8 | "FPN", 9 | "PSPNet", 10 | "DeepLabV3", 11 | "DeepLabV3Plus", 12 | "PAN", 13 | "UnetPlusPlus", 14 | ] 15 | ENCODERS = list(encoders.keys()) 16 | 17 | 18 | def define_model( 19 | decoder_name, 20 | encoder_name, 21 | num_classes=1, 22 | activation=None, 23 | encoder_weights="imagenet", 24 | ): 25 | """ 26 | Loads a segmentation architecture. 27 | 28 | Args: 29 | decoder_name (str): Decoder name. 30 | encoder_name (str): Encoder name. 31 | num_classes (int, optional): Number of classes. Defaults to 1. 32 | pretrained : pretrained original weights 33 | activation (str or None, optional): Activation of the last layer. Defaults to None. 34 | encoder_weights (str, optional): Pretrained weights. Defaults to "imagenet". 35 | 36 | Returns: 37 | torch model: Segmentation model. 38 | """ 39 | assert decoder_name in DECODERS, "Decoder name not supported" 40 | assert encoder_name in ENCODERS, "Encoder name not supported" 41 | 42 | decoder = getattr(segmentation_models_pytorch, decoder_name) 43 | 44 | model = decoder( 45 | encoder_name, 46 | encoder_weights=encoder_weights, 47 | classes=num_classes, 48 | activation=activation, 49 | ) 50 | model.num_classes = num_classes 51 | 52 | return model 53 | -------------------------------------------------------------------------------- /code/params.py: -------------------------------------------------------------------------------- 1 | # Hardcoded stuff, paths are to adapt to your setup 2 | 3 | import torch 4 | import numpy as np 5 | 6 | NUM_WORKERS = 2 7 | 8 | DATA_PATH = "../input/" 9 | TIFF_PATH = DATA_PATH + "train/" 10 | TIFF_PATH_4 = DATA_PATH + "train_4/" 11 | TIFF_PATH_2 = DATA_PATH + "train_2/" 12 | TIFF_PATH_TEST = DATA_PATH + "test/" 13 | 14 | LOG_PATH = "../logs/" 15 | OUT_PATH = "../output/" 16 | 17 | CLASSES = ["ftus"] 18 | NUM_CLASSES = len(CLASSES) 19 | 20 | MEAN = np.array([0.66437738, 0.50478148, 0.70114894]) 21 | STD = np.array([0.15825711, 0.24371008, 0.13832686]) 22 | 23 | DEVICE = "cuda" if torch.cuda.is_available() else "cpu" 24 | DATA_PATH_EXTRA = DATA_PATH + "extra_tiff/" 25 | 26 | 27 | # Additonal images for PL 28 | EXTRA_IMGS = ["VAN0003-LK-32-21-PAS_registered.ome", "VAN0011-RK-3-10-PAS_registered.ome"] 29 | EXTRA_IMGS_SHAPES = { 30 | "VAN0003-LK-32-21-PAS_registered.ome": (41220, 41500), 31 | "VAN0011-RK-3-10-PAS_registered.ome": (37040, 53240), 32 | } 33 | -------------------------------------------------------------------------------- /code/training/lovasz.py: -------------------------------------------------------------------------------- 1 | # Adapted from https://github.com/bermanmaxim/LovaszSoftmax 2 | 3 | import torch 4 | import torch.nn.functional as F 5 | 6 | from torch.autograd import Variable 7 | 8 | 9 | def flatten(scores, labels): 10 | """ 11 | Flattens predictions in the batch (binary case) 12 | """ 13 | return scores.view(-1), labels.view(-1) 14 | 15 | 16 | def lovasz_grad(gt_sorted): 17 | """ 18 | Computes gradient of the Lovasz extension w.r.t sorted errors 19 | See Alg. 1 in paper 20 | """ 21 | p = len(gt_sorted) 22 | gts = gt_sorted.sum() 23 | intersection = gts - gt_sorted.float().cumsum(0) 24 | union = gts + (1 - gt_sorted).float().cumsum(0) 25 | jaccard = 1. - intersection / union 26 | if p > 1: # cover 1-pixel case 27 | jaccard[1:p] = jaccard[1:p] - jaccard[0:-1] 28 | return jaccard 29 | 30 | 31 | def lovasz_hinge_flat(logits, labels): 32 | """ 33 | Binary Lovasz hinge loss 34 | logits: [P] Variable, logits at each prediction (between -infty and +infty) 35 | labels: [P] Tensor, binary ground truth labels (0 or 1) 36 | """ 37 | if len(labels) == 0: 38 | # only void pixels, the gradients should be 0 39 | return logits.sum() * 0. 40 | signs = 2. * labels.float() - 1. 41 | errors = (1. - logits * Variable(signs)) 42 | errors_sorted, perm = torch.sort(errors, dim=0, descending=True) 43 | perm = perm.data 44 | gt_sorted = labels[perm] 45 | grad = lovasz_grad(gt_sorted) 46 | 47 | loss = torch.dot(F.elu(errors_sorted) + 1, Variable(grad)) 48 | return loss 49 | 50 | 51 | def lovasz_hinge(logits, labels, per_image=True): 52 | """ 53 | Binary Lovasz hinge loss 54 | logits: [B, H, W] Variable, logits at each pixel (between -infty and +infty) 55 | labels: [B, H, W] Tensor, binary ground truth masks (0 or 1) 56 | per_image: compute the loss per image instead of per batch 57 | """ 58 | if per_image: 59 | loss = torch.stack([lovasz_hinge_flat( 60 | *flatten(log.unsqueeze(0), lab.unsqueeze(0)) 61 | ) for log, lab in zip(logits, labels)]) 62 | 63 | else: 64 | loss = lovasz_hinge_flat(*flatten(logits, labels)) 65 | return loss 66 | 67 | 68 | def symmetric_lovasz(outputs, targets): 69 | targets = targets.float() 70 | return (lovasz_hinge(outputs, targets) + lovasz_hinge(-outputs, 1 - targets)) / 2 71 | 72 | 73 | def lovasz_loss(x, y): 74 | """ 75 | Computes the symetric lovasz for each class. 76 | 77 | Args: 78 | x (torch tensor [BS x H x W]): Logits. 79 | y (torch tensor [BS x H x W]): Ground truth. 80 | 81 | Returns: 82 | torch tensor [BS]: Loss values. 83 | """ 84 | return symmetric_lovasz(x, y) 85 | -------------------------------------------------------------------------------- /code/training/main.py: -------------------------------------------------------------------------------- 1 | import gc 2 | import time 3 | import torch 4 | import numpy as np 5 | import pandas as pd 6 | 7 | 8 | from params import DATA_PATH 9 | from training.train import fit 10 | from model_zoo.models import define_model 11 | from data.transforms import HE_preprocess 12 | from utils.metrics import tweak_threshold 13 | from training.predict import predict_entire_mask_downscaled 14 | from data.dataset import InMemoryTrainDataset, InferenceDataset 15 | from utils.torch import seed_everything, count_parameters, save_model_weights 16 | 17 | 18 | def train(config, dataset, fold, log_folder=None): 19 | """ 20 | Trains a model. 21 | 22 | Args: 23 | config (Config): Parameters. 24 | dataset (torch Dataset): whole dataset InMemory 25 | fold (int): Selected fold. 26 | log_folder (None or str, optional): Folder to logs results to. Defaults to None. 27 | 28 | Returns: 29 | SegmentationMeter: Meter. 30 | pandas dataframe: Training history. 31 | torch model: Trained segmentation model. 32 | """ 33 | 34 | seed_everything(config.seed) 35 | 36 | model = define_model( 37 | config.decoder, 38 | config.encoder, 39 | num_classes=config.num_classes, 40 | encoder_weights=config.encoder_weights, 41 | ).to(config.device) 42 | model.zero_grad() 43 | 44 | n_parameters = count_parameters(model) 45 | 46 | print(f" -> {n_parameters} trainable parameters") 47 | 48 | # switch dataset to the correct fold 49 | dataset.update_fold_nb(fold) 50 | print(" -> Validation images :", dataset.valid_set, "\n") 51 | 52 | meter, history = fit( 53 | model, 54 | dataset, 55 | optimizer_name=config.optimizer, 56 | loss_name=config.loss, 57 | activation=config.activation, 58 | epochs=config.epochs, 59 | batch_size=config.batch_size, 60 | val_bs=config.val_bs, 61 | lr=config.lr, 62 | warmup_prop=config.warmup_prop, 63 | mix_proba=config.mix_proba, 64 | mix_alpha=config.mix_alpha, 65 | verbose=config.verbose, 66 | first_epoch_eval=config.first_epoch_eval, 67 | device=config.device, 68 | num_classes=config.num_classes, 69 | ) 70 | 71 | if config.save_weights and log_folder is not None: 72 | name = f"{config.decoder}_{config.encoder}_{fold}.pt" 73 | save_model_weights( 74 | model, 75 | name, 76 | cp_folder=log_folder, 77 | ) 78 | 79 | return meter, history, model 80 | 81 | 82 | def validate(model, config, val_images): 83 | """ 84 | Quick model validation on images. 85 | Validation is performed on downscaled images. 86 | 87 | Args: 88 | model (torch model): Trained model. 89 | config (Config): Model config. 90 | val_images (list of strings): Validation image ids. 91 | """ 92 | rles = pd.read_csv(DATA_PATH + f"train_{config.reduce_factor}.csv") 93 | scores = [] 94 | for img in val_images: 95 | 96 | predict_dataset = InferenceDataset( 97 | f"{DATA_PATH}train_{config.reduce_factor}/{img}.tiff", 98 | rle=rles[rles["id"] == img]["encoding"], 99 | overlap_factor=config.overlap_factor, 100 | tile_size=config.tile_size, 101 | reduce_factor=1, 102 | transforms=HE_preprocess(augment=False, visualize=False, size=config.tile_size), 103 | ) 104 | 105 | global_pred = predict_entire_mask_downscaled( 106 | predict_dataset, model, batch_size=config.val_bs, tta=False 107 | ) 108 | 109 | threshold, score = tweak_threshold( 110 | mask=torch.from_numpy(predict_dataset.mask).cuda(), pred=global_pred 111 | ) 112 | 113 | scores.append(score) 114 | print( 115 | f" - Scored {score :.4f} for downscaled image {img} with threshold {threshold:.2f}" 116 | ) 117 | 118 | return scores 119 | 120 | 121 | def k_fold(config, log_folder=None): 122 | """ 123 | Performs a k-fold cross validation. 124 | 125 | Args: 126 | config (Config): Parameters. 127 | log_folder (None or str, optional): Folder to logs results to. Defaults to None. 128 | """ 129 | scores = [] 130 | nb_folds = 5 131 | # Data preparation 132 | print("Creating in-memory dataset ...") 133 | 134 | start_time = time.time() 135 | 136 | if isinstance(config.rle_path, list): 137 | df_rle = [pd.read_csv(path) for path in config.rle_path] 138 | train_img_names = df_rle[0].id.unique() 139 | else: 140 | df_rle = pd.read_csv(config.rle_path) 141 | train_img_names = df_rle.id.unique() 142 | 143 | if isinstance(config.extra_path, list): 144 | df_rle_extra = [pd.read_csv(path) for path in config.extra_path] 145 | else: 146 | df_rle_extra = pd.read_csv(config.extra_path) if config.extra_path is not None else None 147 | 148 | in_mem_dataset = InMemoryTrainDataset( 149 | train_img_names, 150 | df_rle, 151 | train_tile_size=config.tile_size, 152 | reduce_factor=config.reduce_factor, 153 | train_transfo=HE_preprocess(size=config.tile_size), 154 | valid_transfo=HE_preprocess(augment=False, size=config.tile_size), 155 | train_path=f"../input/train_{config.reduce_factor}/", 156 | iter_per_epoch=config.iter_per_epoch, 157 | on_spot_sampling=config.on_spot_sampling, 158 | pl_path=config.pl_path, 159 | use_pl=config.use_pl, 160 | test_path=f"../input/test_{config.reduce_factor}/", 161 | df_rle_extra=df_rle_extra, 162 | use_external=config.use_external, 163 | ) 164 | print(f"Done in {time.time() - start_time :.0f} seconds.") 165 | 166 | for i in config.selected_folds: 167 | print(f"\n------------- Fold {i + 1} / {nb_folds} -------------\n") 168 | 169 | meter, history, model = train(config, in_mem_dataset, i, log_folder=log_folder) 170 | 171 | print("\n -> Validating \n") 172 | 173 | val_images = in_mem_dataset.valid_set 174 | scores += validate(model, config, val_images) 175 | 176 | if log_folder is not None: 177 | history.to_csv(log_folder + f"history_{i}.csv", index=False) 178 | 179 | if log_folder is None or len(config.selected_folds) == 1: 180 | return meter 181 | 182 | del meter 183 | del model 184 | torch.cuda.empty_cache() 185 | gc.collect() 186 | 187 | print(f"\n\n -> Dice CV : {np.mean(scores) :.3f} +/- {np.std(scores) :.3f}") 188 | -------------------------------------------------------------------------------- /code/training/meter.py: -------------------------------------------------------------------------------- 1 | from utils.metrics import dice_score_tensor 2 | 3 | 4 | class SegmentationMeter: 5 | """ 6 | Meter to handle predictions & metrics. 7 | """ 8 | def __init__(self, threshold=0.5): 9 | """ 10 | Constructor 11 | 12 | Args: 13 | threshold (float, optional): Threshold for predictions. Defaults to 0.5. 14 | """ 15 | self.threshold = threshold 16 | self.reset() 17 | 18 | def update(self, y_batch, preds): 19 | """ 20 | Updates the metric. 21 | 22 | Args: 23 | y_batch (tensor): Truths. 24 | preds (tensor): Predictions. 25 | 26 | Raises: 27 | NotImplementedError: Mode not implemented. 28 | """ 29 | self.dice += dice_score_tensor(preds, y_batch, threshold=self.threshold) * preds.size(0) 30 | self.count += preds.size(0) 31 | 32 | def compute(self): 33 | """ 34 | Computes the metrics. 35 | 36 | Returns: 37 | dict: Metrics dictionary. 38 | """ 39 | self.metrics["dice"] = [self.dice / self.count] 40 | return self.metrics 41 | 42 | def reset(self): 43 | """ 44 | Resets everything. 45 | """ 46 | self.dice = 0 47 | self.count = 0 48 | self.metrics = { 49 | "dice": [0], 50 | } 51 | return self.metrics 52 | -------------------------------------------------------------------------------- /code/training/mix.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | 4 | 5 | def rand_bbox(size, lam): 6 | """ 7 | Retuns the coordinate of a random rectangle in the image for cutmix. 8 | 9 | Args: 10 | size (torch tensor [batch_size x c x W x H): Input size. 11 | lam (int): Lambda sampled by the beta distribution. Controls the size of the squares. 12 | 13 | Returns: 14 | int: 4 coordinates of the rectangle. 15 | int: Proportion of the unmasked image. 16 | """ 17 | W = size[2] 18 | H = size[3] 19 | cut_rat = np.sqrt(1.0 - lam) 20 | cut_w = np.int(W * cut_rat) 21 | cut_h = np.int(H * cut_rat) 22 | 23 | # uniform 24 | cx = np.random.randint(W) 25 | cy = np.random.randint(H) 26 | 27 | bbx1 = np.clip(cx - cut_w // 2, 0, W) 28 | bby1 = np.clip(cy - cut_h // 2, 0, H) 29 | bbx2 = np.clip(cx + cut_w // 2, 0, W) 30 | bby2 = np.clip(cy + cut_h // 2, 0, H) 31 | 32 | lam = 1 - ((bbx2 - bbx1) * (bby2 - bby1) / (W * H)) 33 | return bbx1, bby1, bbx2, bby2, lam 34 | 35 | 36 | def cutmix_data(x, y, alpha=1.0, device="cuda"): 37 | """ 38 | Applies cutmix to a sample 39 | 40 | Args: 41 | x (torch tensor [batch_size x input_size]): Input batch. 42 | y (torch tensor [batch_size x num_classes]): Labels. 43 | alpha (float, optional): Parameter of the beta distribution. Defaults to 1.. 44 | device (str, optional): Device for torch. Defaults to "cuda". 45 | 46 | Returns: 47 | torch tensor [batch_size x input_size]: Mixed input. 48 | torch tensor [batch_size x num_classes]: Mixed labels. 49 | float: Probability sampled by the beta distribution. 50 | """ 51 | lam = np.random.beta(alpha, alpha) if alpha > 0 else 1 52 | 53 | index = torch.randperm(x.size()[0]).to(device) 54 | 55 | bbx1, bby1, bbx2, bby2, lam = rand_bbox(x.size(), lam) 56 | 57 | mixed_x = x.clone() 58 | mixed_y = y.clone() 59 | 60 | mixed_x[:, :, bbx1:bbx2, bby1:bby2] = x[index, :, bbx1:bbx2, bby1:bby2] 61 | mixed_y[:, bbx1:bbx2, bby1:bby2] = y[index, bbx1:bbx2, bby1:bby2] 62 | 63 | return mixed_x, mixed_y 64 | -------------------------------------------------------------------------------- /code/training/optim.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from training.lovasz import lovasz_loss 3 | 4 | LOSSES = ["CrossEntropyLoss", "BCELoss", "BCEWithLogitsLoss"] 5 | 6 | 7 | def define_loss(name, device="cuda"): 8 | """ 9 | Defines the loss function associated to the name. 10 | Supports losses in the LOSSES list, as well as the Lovasz, Softdice and Haussdorf losses. 11 | 12 | Args: 13 | name (str): Loss name. 14 | device (str, optional): Device for torch. Defaults to "cuda". 15 | 16 | Raises: 17 | NotImplementedError: Specified loss name is not supported. 18 | 19 | Returns: 20 | torch loss: Loss function 21 | """ 22 | if name in LOSSES: 23 | loss = getattr(torch.nn, name)(reduction="none") 24 | elif name == "lovasz": 25 | loss = lovasz_loss 26 | else: 27 | raise NotImplementedError 28 | 29 | return loss 30 | 31 | 32 | def prepare_for_loss(y_pred, y_batch, loss, device="cuda", train=True): 33 | """ 34 | Reformats predictions to fit a loss function. 35 | 36 | Args: 37 | y_pred (torch tensor): Predictions. 38 | y_batch (torch tensor): Truths. 39 | loss (str): Name of the loss function. 40 | device (str, optional): Device for torch. Defaults to "cuda". 41 | train (bool, optional): Whether it is the training phase. Defaults to True. 42 | Raises: 43 | NotImplementedError: Specified loss name is not supported. 44 | 45 | Returns: 46 | torch tensor: Reformated predictions 47 | torch tensor: Reformated truths 48 | """ 49 | 50 | if loss in ["BCEWithLogitsLoss", "lovasz", "HaussdorfLoss", "SoftDiceLoss"]: 51 | y_batch = y_batch.to(device) 52 | y_pred = y_pred.squeeze(1) 53 | if not train: 54 | y_pred = y_pred.detach() 55 | else: 56 | raise NotImplementedError 57 | 58 | return y_pred, y_batch 59 | 60 | 61 | def define_optimizer(name, params, lr=1e-3): 62 | """ 63 | Defines the loss function associated to the name. 64 | Supports optimizers from torch.nn. 65 | 66 | Args: 67 | name (str): Optimizer name. 68 | params (torch parameters): Model parameters 69 | lr (float, optional): Learning rate. Defaults to 1e-3. 70 | 71 | Raises: 72 | NotImplementedError: Specified optimizer name is not supported. 73 | 74 | Returns: 75 | torch optimizer: Optimizer 76 | """ 77 | try: 78 | optimizer = getattr(torch.optim, name)(params, lr=lr) 79 | except AttributeError: 80 | raise NotImplementedError 81 | 82 | return optimizer 83 | -------------------------------------------------------------------------------- /code/training/predict.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import torch 3 | import numpy as np 4 | from torch.utils.data import DataLoader 5 | 6 | FLIPS = [[-1], [-2], [-2, -1]] 7 | 8 | 9 | def threshold_resize(preds, shape, threshold=0.5): 10 | """ 11 | Thresholds and resizes predictions. 12 | 13 | Args: 14 | preds (np array): Predictions. 15 | shape (tuple [2]): Shape to resize to 16 | threshold (float, optional): Threshold. Defaults to 0.5. 17 | 18 | Returns: 19 | np array: Resized predictions. 20 | """ 21 | preds = (preds > threshold).astype(np.uint8) 22 | 23 | preds = cv2.resize( 24 | preds, 25 | (shape[0], shape[1]), 26 | interpolation=cv2.INTER_AREA, 27 | ) 28 | 29 | return preds 30 | 31 | 32 | def threshold_resize_torch(preds, shape, threshold=0.5): 33 | """ 34 | Thresholds and resizes predictions as a tensor. 35 | 36 | Args: 37 | preds (torch tensor): Predictions. 38 | shape (tuple [2]): Shape to resize to 39 | threshold (float, optional): Threshold. Defaults to 0.5. 40 | 41 | Returns: 42 | np array: Resized predictions. 43 | """ 44 | preds = preds.unsqueeze(0).unsqueeze(0) 45 | preds = torch.nn.functional.interpolate( 46 | preds, (shape[1], shape[0]), mode='bilinear', align_corners=False 47 | ) 48 | return (preds > threshold).cpu().numpy()[0, 0] 49 | 50 | 51 | def get_tile_weighting(size, sigma=1, alpha=1, eps=1e-6): 52 | """ 53 | Gets the weighting of the tile for inference. 54 | Coordinates close to the border have a lower weight. 55 | This helps reduce side effects. We recommend visualizing the output. 56 | 57 | Args: 58 | size (int): Tile size. 59 | sigma (int, optional): Power parameter. Defaults to 1. 60 | alpha (int, optional): Shifting. Defaults to 1. 61 | eps (float], optional): Epsilon to avoid dividing by 0. Defaults to 1e-6. 62 | 63 | Returns: 64 | np float16 array [size x size]: Tile weighting. 65 | """ 66 | half = size // 2 67 | w = np.ones((size, size), np.float32) 68 | 69 | x = np.concatenate([np.mgrid[-half:0], np.mgrid[1: half + 1]])[:, None] 70 | x = np.tile(x, (1, size)) 71 | x = half + 1 - np.abs(x) 72 | y = x.T 73 | 74 | w = np.minimum(x, y) 75 | w = (w / w.max()) ** sigma 76 | w = np.minimum(w, 1) 77 | 78 | w = (w - np.min(w) + eps) / (np.max(w) - np.min(w) + eps) 79 | 80 | w = np.where(w > alpha, 1, w) 81 | w = w / alpha 82 | w = np.clip(w, 1e-3, 1) 83 | 84 | w = np.round(w, 3) 85 | return w.astype(np.float16) 86 | 87 | 88 | def predict_entire_mask(dataset, model, batch_size=32, tta=False): 89 | """ 90 | Performs inference on an image. 91 | 92 | Args: 93 | dataset (InferenceDataset): Inference dataset. 94 | model (torch model): Segmentation model. 95 | batch_size (int, optional): Batch size. Defaults to 32. 96 | tta (bool, optional): Whether to apply tta. Defaults to False. 97 | 98 | Returns: 99 | torch tensor [H x W]: Prediction on the image. 100 | """ 101 | loader = DataLoader(dataset, batch_size=batch_size, shuffle=False, pin_memory=True) 102 | 103 | weighting = torch.from_numpy(get_tile_weighting(dataset.tile_size)) 104 | weighting_cuda = weighting.clone().cuda().unsqueeze(0) 105 | weighting = weighting.cuda().half() 106 | 107 | global_pred = torch.zeros( 108 | (dataset.orig_size[0], dataset.orig_size[1]), 109 | dtype=torch.half, device="cuda" 110 | ) 111 | global_counter = torch.zeros( 112 | (dataset.orig_size[0], dataset.orig_size[1]), 113 | dtype=torch.half, device="cuda" 114 | ) 115 | 116 | model.eval() 117 | with torch.no_grad(): 118 | for img, pos in loader: 119 | img = img.to("cuda") 120 | _, _, h, w = img.shape 121 | 122 | if model.num_classes == 2: 123 | pred = model(img)[:, 0].view(-1, 1, h, w).sigmoid().detach() 124 | else: 125 | pred = model(img).view(-1, 1, h, w).sigmoid().detach() 126 | 127 | if tta: 128 | for f in FLIPS: 129 | pred_flip = model(torch.flip(img, f)) 130 | if model.num_classes == 2: 131 | pred_flip = pred_flip[:, 0] 132 | 133 | pred_flip = torch.flip(pred_flip, f).view(-1, 1, h, w).sigmoid().detach() 134 | pred += pred_flip 135 | pred = torch.div(pred, len(FLIPS) + 1) 136 | 137 | pred = torch.nn.functional.interpolate( 138 | pred, (dataset.tile_size, dataset.tile_size), mode='area' 139 | ).view(-1, dataset.tile_size, dataset.tile_size) 140 | 141 | pred = (pred * weighting_cuda).half() 142 | 143 | for tile_idx, (x0, x1, y0, y1) in enumerate(pos): 144 | global_pred[x0: x1, y0: y1] += pred[tile_idx] 145 | global_counter[x0: x1, y0: y1] += weighting 146 | 147 | for i in range(len(global_pred)): 148 | global_pred[i] = torch.div(global_pred[i], global_counter[i]) 149 | 150 | return global_pred 151 | 152 | 153 | def predict_entire_mask_downscaled(dataset, model, batch_size=32, tta=False): 154 | """ 155 | Performs inference on an image. 156 | The "downscaled" means that the mask is kept at a reduced resolution. 157 | The reduced resolution is the reduce_factor parameter of the dataset. 158 | 159 | Args: 160 | dataset (InferenceDataset): Inference dataset. 161 | model (torch model): Segmentation model. 162 | batch_size (int, optional): Batch size. Defaults to 32. 163 | tta (bool, optional): Whether to apply tta. Defaults to False. 164 | 165 | Returns: 166 | torch tensor [H/reduce_factor x W/reduce_factor]: Prediction on the image. 167 | """ 168 | loader = DataLoader(dataset, batch_size=batch_size, shuffle=False, pin_memory=True) 169 | 170 | weighting = torch.from_numpy(get_tile_weighting(dataset.tile_size)) 171 | weighting_cuda = weighting.clone().cuda().unsqueeze(0) 172 | weighting = weighting.cuda().half() 173 | 174 | global_pred = torch.zeros( 175 | (dataset.orig_size[0], dataset.orig_size[1]), 176 | dtype=torch.half, device="cuda" 177 | ) 178 | global_counter = torch.zeros( 179 | (dataset.orig_size[0], dataset.orig_size[1]), 180 | dtype=torch.half, device="cuda" 181 | ) 182 | 183 | model.eval() 184 | with torch.no_grad(): 185 | for img, pos in loader: 186 | img = img.to("cuda") 187 | _, _, h, w = img.shape 188 | 189 | if model.num_classes == 2: 190 | pred = model(img)[:, 0].view(-1, h, w).sigmoid().detach() 191 | else: 192 | pred = model(img).view(-1, h, w).sigmoid().detach() 193 | 194 | if tta: 195 | for f in FLIPS: 196 | pred_flip = model(torch.flip(img, f)) 197 | if model.num_classes == 2: 198 | pred_flip = pred_flip[:, 0] 199 | pred_flip = torch.flip(pred_flip, f).view(-1, h, w).sigmoid().detach() 200 | pred += pred_flip 201 | pred = torch.div(pred, len(FLIPS) + 1) 202 | 203 | pred = (pred * weighting_cuda).half() 204 | 205 | for tile_idx, (x0, x1, y0, y1) in enumerate(pos): 206 | global_pred[x0: x1, y0: y1] += pred[tile_idx] 207 | global_counter[x0: x1, y0: y1] += weighting 208 | 209 | for i in range(len(global_pred)): 210 | global_pred[i] = torch.div(global_pred[i], global_counter[i]) 211 | 212 | return global_pred 213 | 214 | 215 | def predict_entire_mask_downscaled_tta(dataset, model, batch_size=32): 216 | """ 217 | Performs inference on an image. 218 | The "downscaled" means that the mask is kept at a reduced resolution. 219 | The reduced resolution is the reduce_factor parameter of the dataset. 220 | The "tta" means that it returns predictions for each tta. 221 | 222 | Args: 223 | dataset (InferenceDataset): Inference dataset. 224 | model (torch model): Segmentation model. 225 | batch_size (int, optional): Batch size. Defaults to 32. 226 | 227 | Returns: 228 | torch tensor [4 x H/reduce_factor x W/reduce_factor]: Prediction on the image. 229 | """ 230 | 231 | loader = DataLoader(dataset, batch_size=batch_size, shuffle=False, pin_memory=True) 232 | 233 | weighting = torch.from_numpy(get_tile_weighting(dataset.tile_size)) 234 | weighting_cuda = weighting.clone().cuda().unsqueeze(0).unsqueeze(0) 235 | weighting = weighting.cuda().half() 236 | 237 | global_pred = torch.zeros( 238 | (4, dataset.orig_size[0], dataset.orig_size[1]), 239 | dtype=torch.half, device="cuda" 240 | ) 241 | global_counter = torch.zeros( 242 | (1, dataset.orig_size[0], dataset.orig_size[1]), 243 | dtype=torch.half, device="cuda" 244 | ) 245 | 246 | model.eval() 247 | with torch.no_grad(): 248 | for img, pos in loader: 249 | img = img.to("cuda") 250 | _, _, h, w = img.shape 251 | 252 | preds = [] 253 | if model.num_classes == 2: 254 | pred = model(img)[:, 0].view(1, -1, h, w).sigmoid().detach() 255 | else: 256 | pred = model(img).view(1, -1, h, w).sigmoid().detach() 257 | preds.append(pred) 258 | 259 | for f in FLIPS: 260 | pred_flip = model(torch.flip(img, f)) 261 | if model.num_classes == 2: 262 | pred_flip = pred_flip[:, 0] 263 | pred_flip = torch.flip(pred_flip, f).view(1, -1, h, w).sigmoid().detach() 264 | preds.append(pred_flip) 265 | 266 | pred = torch.cat(preds, 0) 267 | pred = (pred * weighting_cuda).half() 268 | 269 | for tile_idx, (x0, x1, y0, y1) in enumerate(pos): 270 | global_pred[:, x0: x1, y0: y1] += pred[:, tile_idx] 271 | global_counter[:, x0: x1, y0: y1] += weighting 272 | 273 | for i in range(global_pred.size(1)): 274 | global_pred[:, i] = torch.div(global_pred[:, i], global_counter[:, i]) 275 | 276 | return global_pred 277 | -------------------------------------------------------------------------------- /code/training/train.py: -------------------------------------------------------------------------------- 1 | import time 2 | import torch 3 | import numpy as np 4 | 5 | from torch.utils.data import DataLoader 6 | from transformers import get_linear_schedule_with_warmup 7 | 8 | from params import NUM_WORKERS 9 | from training.mix import cutmix_data 10 | from utils.torch import worker_init_fn 11 | from utils.logger import update_history 12 | from training.meter import SegmentationMeter 13 | from training.optim import define_loss, define_optimizer, prepare_for_loss 14 | 15 | 16 | def fit( 17 | model, 18 | dataset, 19 | optimizer_name="Adam", 20 | loss_name="BCEWithLogitsLoss", 21 | activation="sigmoid", 22 | epochs=50, 23 | batch_size=32, 24 | val_bs=32, 25 | warmup_prop=0.1, 26 | lr=1e-3, 27 | mix_proba=0, 28 | mix_alpha=0.4, 29 | verbose=1, 30 | first_epoch_eval=0, 31 | num_classes=1, 32 | device="cuda", 33 | ): 34 | """ 35 | Usual torch fit function. 36 | 37 | Args: 38 | model (torch model): Model to train. 39 | dataset (InMemoryTrainDataset): Dataset. 40 | optimizer_name (str, optional): Optimizer name. Defaults to 'adam'. 41 | loss_name (str, optional): Loss name. Defaults to 'BCEWithLogitsLoss'. 42 | activation (str, optional): Activation function. Defaults to 'sigmoid'. 43 | epochs (int, optional): Number of epochs. Defaults to 50. 44 | batch_size (int, optional): Training batch size. Defaults to 32. 45 | val_bs (int, optional): Validation batch size. Defaults to 32. 46 | warmup_prop (float, optional): Warmup proportion. Defaults to 0.1. 47 | lr (float, optional): Learning rate. Defaults to 1e-3. 48 | mix_proba (float, optional): Probability to apply mixup with. Defaults to 0. 49 | mix_alpha (float, optional): Mixup alpha parameter. Defaults to 0.4. 50 | verbose (int, optional): Period (in epochs) to display logs at. Defaults to 1. 51 | first_epoch_eval (int, optional): Epoch to start evaluating at. Defaults to 0. 52 | num_classes (int, optional): Number of classes. Defaults to 1. 53 | device (str, optional): Device for torch. Defaults to "cuda". 54 | 55 | Returns: 56 | numpy array [len(val_dataset) x num_classes]: Last prediction on the validation data. 57 | pandas dataframe: Training history. 58 | """ 59 | 60 | avg_val_loss = 0.0 61 | history = None 62 | 63 | scaler = torch.cuda.amp.GradScaler() 64 | 65 | optimizer = define_optimizer(optimizer_name, model.parameters(), lr=lr) 66 | 67 | loss_fct = define_loss(loss_name, device=device) 68 | w_fc = 0.2 69 | 70 | data_loader = DataLoader( 71 | dataset, 72 | batch_size=batch_size, 73 | drop_last=False, 74 | num_workers=NUM_WORKERS, 75 | pin_memory=True, 76 | worker_init_fn=worker_init_fn 77 | ) 78 | 79 | meter = SegmentationMeter() 80 | 81 | num_warmup_steps = int(warmup_prop * epochs * len(data_loader)) 82 | num_training_steps = int(epochs * len(data_loader)) 83 | scheduler = get_linear_schedule_with_warmup( 84 | optimizer, num_warmup_steps, num_training_steps 85 | ) 86 | 87 | for epoch in range(epochs): 88 | model.train() 89 | dataset.train(True) 90 | start_time = time.time() 91 | optimizer.zero_grad() 92 | 93 | avg_loss = 0 94 | 95 | for batch in data_loader: 96 | x = batch[0].to(device).float() 97 | y_batch = batch[1].float() 98 | w = batch[2].float().cuda() 99 | 100 | if np.random.random() > mix_proba: 101 | x, y_batch = cutmix_data(x, y_batch, alpha=mix_alpha, device=device) 102 | 103 | with torch.cuda.amp.autocast(): 104 | y_pred = model(x) 105 | 106 | if num_classes == 2: 107 | y_batch, y_batch_fc = y_batch[:, :, :, 0], y_batch[:, :, :, 1] 108 | y_pred, y_pred_fc = y_pred[:, 0], y_pred[:, 1] 109 | 110 | y_pred_fc, y_batch_fc = prepare_for_loss( 111 | y_pred_fc, y_batch_fc, loss_name, device=device 112 | ) 113 | 114 | y_pred, y_batch = prepare_for_loss(y_pred, y_batch, loss_name, device=device) 115 | 116 | loss = loss_fct(y_pred, y_batch).mean() 117 | if num_classes == 2: 118 | loss_fc = loss_fct(y_pred_fc, y_batch_fc).mean(-1).mean(-1) * w 119 | loss_fc = loss_fc.sum() / (w.sum() + 1e-6) 120 | 121 | loss = (loss + w_fc * loss_fc) / (1 + w_fc) 122 | 123 | scaler.scale(loss).backward() 124 | 125 | avg_loss += loss.item() / len(data_loader) 126 | 127 | scaler.step(optimizer) 128 | scaler.update() 129 | 130 | scheduler.step() 131 | for param in model.parameters(): 132 | param.grad = None 133 | 134 | model.eval() 135 | dataset.train(False) 136 | avg_val_loss = 0. 137 | metrics = meter.reset() 138 | 139 | if epoch + 1 >= first_epoch_eval: 140 | with torch.no_grad(): 141 | for batch in data_loader: 142 | x = batch[0].to(device).float() 143 | y_batch = batch[1].float() 144 | 145 | y_pred = model(x) 146 | 147 | if num_classes == 2: # only non-fc 148 | y_batch = y_batch[:, :, :, 0] 149 | y_pred = y_pred[:, 0] 150 | 151 | y_pred, y_batch = prepare_for_loss( 152 | y_pred, 153 | y_batch, 154 | loss_name, 155 | device=device, 156 | train=False 157 | ) 158 | 159 | loss = loss_fct(y_pred, y_batch).mean() 160 | 161 | avg_val_loss += loss / len(data_loader) 162 | 163 | if activation == "sigmoid": 164 | y_pred = torch.sigmoid(y_pred) 165 | elif activation == "softmax": 166 | y_pred = torch.softmax(y_pred, 2) 167 | 168 | meter.update(y_batch, y_pred) 169 | 170 | metrics = meter.compute() 171 | 172 | elapsed_time = time.time() - start_time 173 | if (epoch + 1) % verbose == 0: 174 | elapsed_time = elapsed_time * verbose 175 | lr = scheduler.get_last_lr()[0] 176 | print( 177 | f"Epoch {epoch + 1:02d}/{epochs:02d} \t lr={lr:.1e}\t t={elapsed_time:.0f}s\t" 178 | f"loss={avg_loss:.3f}", 179 | end="\t", 180 | ) 181 | if epoch + 1 >= first_epoch_eval: 182 | print(f"val_loss={avg_val_loss:.3f} \t dice={metrics['dice'][0]:.4f}") 183 | else: 184 | print("") 185 | history = update_history( 186 | history, metrics, epoch + 1, avg_loss, avg_val_loss, elapsed_time 187 | ) 188 | 189 | del (data_loader, y_pred, loss, x, y_batch) 190 | torch.cuda.empty_cache() 191 | 192 | return meter, history 193 | -------------------------------------------------------------------------------- /code/utils/logger.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import json 4 | import datetime 5 | import pandas as pd 6 | 7 | from params import LOG_PATH 8 | 9 | LOGGED_IN_CONFIG = [ 10 | "encoder", 11 | "decoder", 12 | "num_classes", 13 | "activation", 14 | "loss", 15 | "optimizer", 16 | "batch_size", 17 | "epochs", 18 | "lr", 19 | "warmup_prop", 20 | "k", 21 | "random_state", 22 | ] 23 | 24 | 25 | class Logger(object): 26 | """ 27 | Simple logger that saves what is printed in a file 28 | """ 29 | 30 | def __init__(self, *files): 31 | self.files = files 32 | 33 | def write(self, obj): 34 | for f in self.files: 35 | f.write(obj) 36 | f.flush() 37 | 38 | def flush(self): 39 | for f in self.files: 40 | f.flush() 41 | 42 | 43 | def create_logger(directory="", name="logs.txt"): 44 | """ 45 | Creates a logger to log output in a chosen file 46 | 47 | Args: 48 | directory (str, optional): Path to save logs at. Defaults to "". 49 | name (str, optional): Name of the file to save the logs in. Defaults to "logs.txt". 50 | """ 51 | 52 | log = open(directory + name, "a", encoding="utf-8") 53 | file_logger = Logger(sys.stdout, log) 54 | 55 | sys.stdout = file_logger 56 | sys.stderr = file_logger 57 | 58 | 59 | def prepare_log_folder(log_path): 60 | """ 61 | Creates the directory for logging. 62 | Logs will be saved at log_path/date_of_day/exp_id 63 | 64 | Args: 65 | log_path ([str]): Directory 66 | 67 | Returns: 68 | str: Path to the created log folder 69 | """ 70 | today = str(datetime.date.today()) 71 | log_today = f"{log_path}{today}/" 72 | 73 | if not os.path.exists(log_today): 74 | os.mkdir(log_today) 75 | 76 | exp_id = len(os.listdir(log_today)) 77 | log_folder = log_today + f"{exp_id}/" 78 | 79 | if not os.path.exists(log_folder): 80 | os.mkdir(log_folder) 81 | else: 82 | print("Experiment already exists") 83 | 84 | return log_folder 85 | 86 | 87 | def update_history(history, metrics, epoch, loss, val_loss, time): 88 | """ 89 | Updates a training history dataframe. 90 | 91 | Args: 92 | history (pandas dataframe or None): Previous history. 93 | metrics (dict): Metrics dictionary. 94 | epoch (int): Epoch. 95 | loss (float): Training loss. 96 | val_loss (float): Validation loss. 97 | time (float): Epoch duration. 98 | 99 | Returns: 100 | pandas dataframe: history 101 | """ 102 | new_history = { 103 | "epoch": [epoch], 104 | "time": [time], 105 | "loss": [loss], 106 | "val_loss": [val_loss], 107 | } 108 | new_history.update(metrics) 109 | 110 | new_history = pd.DataFrame.from_dict(new_history) 111 | 112 | if history is not None: 113 | return pd.concat([history, new_history]).reset_index(drop=True) 114 | else: 115 | return new_history 116 | 117 | 118 | def save_config(config, path): 119 | """ 120 | Saves a config as a json and pandas dataframe 121 | 122 | Args: 123 | config (Config): Config. 124 | path (str): Path to save at. 125 | 126 | Returns: 127 | pandas dataframe: Config as a dataframe 128 | """ 129 | dic = config.__dict__.copy() 130 | del dic["__doc__"], dic["__module__"], dic["__dict__"], dic["__weakref__"] 131 | 132 | with open(path, "w") as f: 133 | json.dump(dic, f) 134 | 135 | 136 | def update_overall_logs(metrics, config_df, log_path): 137 | """ 138 | Updates a .csv containing logs for several experiments. 139 | 140 | Args: 141 | metrics (pandas dataframe): Metrics dataframe. 142 | config_df (pandas dataframe): Config as a dataframe as returned by the save_config function. 143 | log_path (str): Path to save at. 144 | 145 | Returns: 146 | pandas dataframe: Updated dataframe containing logs. 147 | """ 148 | filename = ( 149 | f"{LOG_PATH}logs_{config_df['mode'][0]}_{config_df['target_name'][0]}.csv" 150 | ) 151 | 152 | metrics = metrics[["auc", "accuracy", "f1"]] 153 | config_df = config_df[LOGGED_IN_CONFIG] 154 | df = pd.concat([config_df, metrics], axis=1) 155 | df["path"] = log_path 156 | 157 | try: 158 | logs = pd.read_csv(filename) 159 | logs = pd.concat([logs, df], sort=False).reset_index(drop=True) 160 | except FileNotFoundError: 161 | logs = df 162 | 163 | logs.to_csv(filename, index=False) 164 | 165 | return logs 166 | -------------------------------------------------------------------------------- /code/utils/metrics.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | def dice_scores_img(pred, truth, eps=1e-8): 5 | """ 6 | Dice metric for a single image as array. 7 | 8 | Args: 9 | pred (np array): Predictions. 10 | truth (np array): Ground truths. 11 | eps (float, optional): epsilon to avoid dividing by 0. Defaults to 1e-8. 12 | 13 | Returns: 14 | np array : dice value for each class. 15 | """ 16 | pred = pred.reshape(-1) > 0 17 | truth = truth.reshape(-1) > 0 18 | intersect = (pred & truth).sum(-1) 19 | union = pred.sum(-1) + truth.sum(-1) 20 | 21 | dice = (2.0 * intersect + eps) / (union + eps) 22 | return dice 23 | 24 | 25 | def dice_scores_img_tensor(pred, truth, eps=1e-8): 26 | """ 27 | Dice metric for a single image as tensor. 28 | 29 | Args: 30 | pred (torch tensor): Predictions. 31 | truth (torch tensor): Ground truths. 32 | eps (float, optional): epsilon to avoid dividing by 0. Defaults to 1e-8. 33 | 34 | Returns: 35 | np array : dice value for each class. 36 | """ 37 | pred = pred.view(-1) > 0 38 | truth = truth.contiguous().view(-1) > 0 39 | intersect = (pred & truth).sum(-1) 40 | union = pred.sum(-1) + truth.sum(-1) 41 | 42 | dice = (2.0 * intersect + eps) / (union + eps) 43 | return float(dice) 44 | 45 | 46 | def dice_score(pred, truth, eps=1e-8, threshold=0.5): 47 | """ 48 | Dice metric. Only classes that are present are weighted. 49 | 50 | Args: 51 | pred (np array): Predictions. 52 | truth (np array): Ground truths. 53 | eps (float, optional): epsilon to avoid dividing by 0. Defaults to 1e-8. 54 | threshold (float, optional): Threshold for predictions. Defaults to 0.5. 55 | 56 | Returns: 57 | float: dice value. 58 | """ 59 | pred = (pred.reshape((truth.shape[0], -1)) > threshold).astype(int) 60 | truth = truth.reshape((truth.shape[0], -1)).astype(int) 61 | intersect = (pred + truth == 2).sum(-1) 62 | union = pred.sum(-1) + truth.sum(-1) 63 | dice = (2.0 * intersect + eps) / (union + eps) 64 | return dice.mean() 65 | 66 | 67 | def dice_score_tensor(pred, truth, eps=1e-8, threshold=0.5): 68 | """ 69 | Dice metric for tensors. Only classes that are present are weighted. 70 | 71 | Args: 72 | pred (torch tensor): Predictions. 73 | truth (torch tensor): Ground truths. 74 | eps (float, optional): epsilon to avoid dividing by 0. Defaults to 1e-8. 75 | threshold (float, optional): Threshold for predictions. Defaults to 0.5. 76 | 77 | Returns: 78 | float: dice value. 79 | """ 80 | pred = (pred.view((truth.size(0), -1)) > threshold).int() 81 | truth = truth.view((truth.size(0), -1)).int() 82 | intersect = (pred + truth == 2).sum(-1) 83 | union = pred.sum(-1) + truth.sum(-1) 84 | dice = (2.0 * intersect + eps) / (union + eps) 85 | return dice.mean() 86 | 87 | 88 | def tweak_threshold(mask, pred): 89 | """ 90 | Tweaks the threshold to maximise the score. 91 | 92 | Args: 93 | mask (torch tensor): Ground truths. 94 | pred (torch tensor): Predictions. 95 | 96 | Returns: 97 | float: Best threshold. 98 | float: Best score. 99 | """ 100 | thresholds = [] 101 | scores = [] 102 | for threshold in np.linspace(0.2, 0.7, 11): 103 | 104 | dice_score = dice_scores_img_tensor(pred=pred > threshold, truth=mask) 105 | thresholds.append(threshold) 106 | scores.append(dice_score) 107 | 108 | return thresholds[np.argmax(scores)], np.max(scores) 109 | -------------------------------------------------------------------------------- /code/utils/plots.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | import plotly.express as px 4 | 5 | 6 | def overlay_heatmap(heatmap, image, alpha=0.5, colormap=cv2.COLORMAP_OCEAN): 7 | """ 8 | Overlays an heatmat with an image. 9 | 10 | Args: 11 | heatmap (numpy array): Attention map. 12 | image (numpy array): Image. 13 | alpha (float, optional): Transparency. Defaults to 0.5. 14 | colormap (cv2 colormap, optional): Colormap. Defaults to cv2.COLORMAP_OCEAN. 15 | 16 | Returns: 17 | numpy array: image with the colormap. 18 | """ 19 | if np.max(image) <= 1: 20 | image = (image * 255).astype(np.uint8) 21 | 22 | if heatmap.shape != image.shape: 23 | heatmap = cv2.resize(heatmap, (image.shape[1], image.shape[0])) 24 | 25 | heatmap = np.clip(heatmap * 255, 0, 255).astype(np.uint8) 26 | heatmap[0, 0] = 255 27 | 28 | heatmap = cv2.applyColorMap(heatmap, colormap) 29 | output = cv2.addWeighted(image[:, :, [2, 1, 0]], alpha, heatmap, 1 - alpha, 0) 30 | 31 | return output[:, :, [2, 1, 0]] 32 | 33 | 34 | def plot_contours_preds(img, preds, mask=None, w=1, downsize=1): 35 | """ 36 | Plots the contours of mask predictions (in green) and of a mask (in red). 37 | 38 | Args: 39 | img (numpy array [H x W x C]): Image. 40 | preds (numpy int array [H x W] or None): Predicted mask. 41 | mask (numpy array [H x W] or None): Mask. 42 | w (int, optional): Contour width. Defaults to 1. 43 | downsize (int, optional): Downsizing factor. Defaults to 1. 44 | 45 | Returns: 46 | px.imshow: Ploty plot. 47 | """ 48 | img = img.copy() 49 | if img.max() > 1: 50 | img = (img / 255).astype(float) 51 | if mask is not None: 52 | if mask.max() > 1: 53 | mask = (mask / 255).astype(float) 54 | mask = (mask * 255).astype(np.uint8) 55 | 56 | if preds.max() > 1: 57 | preds = (preds / 255).astype(float) 58 | preds = (preds * 255).astype(np.uint8) 59 | 60 | if downsize > 1: 61 | new_shape = (preds.shape[1] // downsize, preds.shape[0] // downsize) 62 | 63 | if mask is not None: 64 | mask = cv2.resize( 65 | mask, 66 | new_shape, 67 | interpolation=cv2.INTER_NEAREST, 68 | ) 69 | img = cv2.resize( 70 | img, 71 | new_shape, 72 | interpolation=cv2.INTER_LINEAR, 73 | ) 74 | preds = cv2.resize( 75 | preds, 76 | new_shape, 77 | interpolation=cv2.INTER_NEAREST, 78 | ) 79 | 80 | contours_preds, _ = cv2.findContours(preds, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE) 81 | cv2.polylines(img, contours_preds, True, (0.0, 1.0, 0.0), w) 82 | 83 | if mask is not None: 84 | img_gt = img.copy() 85 | contours, _ = cv2.findContours(mask, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE) 86 | cv2.polylines(img_gt, contours, True, (1.0, 0.0, 0.0), w) 87 | img = (img + img_gt) / 2 88 | 89 | return px.imshow(img) 90 | 91 | 92 | def plot_heatmap_preds(img, preds, mask=None, w=1, downsize=1): 93 | """ 94 | Plots the heatmap of predictions and the contours of a mask (in red). 95 | 96 | Args: 97 | img (numpy array [H x W x 3]): Image. 98 | preds (numpy float array [H x W] or None): Predicted probabilities. 99 | mask (numpy array [H x W] or None): Mask. 100 | w (int, optional): Contour width. Defaults to 1. 101 | downsize (int, optional): Downsizing factor. Defaults to 1. 102 | 103 | Returns: 104 | px.imshow: Ploty plot. 105 | """ 106 | img = img.copy() 107 | if img.max() > 1: 108 | img = (img / 255).astype(float) 109 | if mask is not None: 110 | if mask.max() > 1: 111 | mask = (mask / 255).astype(float) 112 | mask = (mask * 255).astype(np.uint8) 113 | 114 | if downsize > 1: 115 | new_shape = (preds.shape[1] // downsize, preds.shape[0] // downsize) 116 | 117 | if mask is not None: 118 | mask = cv2.resize( 119 | mask, 120 | new_shape, 121 | interpolation=cv2.INTER_NEAREST, 122 | ) 123 | 124 | img = cv2.resize( 125 | img, 126 | new_shape, 127 | interpolation=cv2.INTER_LINEAR, 128 | ) 129 | preds = cv2.resize( 130 | preds, 131 | new_shape, 132 | interpolation=cv2.INTER_LINEAR, 133 | ) 134 | 135 | if mask is not None: 136 | contours, _ = cv2.findContours(mask, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE) 137 | cv2.polylines(img, contours, True, (1.0, 0.0, 0.0), w) 138 | 139 | heatmap = 1 - preds / 2 140 | heatmap[0, 0] = 1 141 | img = overlay_heatmap(heatmap, img.copy(), alpha=0.7, colormap=cv2.COLORMAP_HOT) 142 | 143 | return px.imshow(img) 144 | -------------------------------------------------------------------------------- /code/utils/rle.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | def enc2mask(encs, shape): 5 | """ 6 | Decodes a rle. 7 | 8 | Args: 9 | encs (list of str): Rles for each class. 10 | shape (tuple [2]): Mask size. 11 | 12 | Returns: 13 | np array [shape]: Mask. 14 | """ 15 | img = np.zeros(shape[0] * shape[1], dtype=np.uint8) 16 | for m, enc in enumerate(encs): 17 | if isinstance(enc, np.float) and np.isnan(enc): 18 | continue 19 | enc_split = enc.split() 20 | for i in range(len(enc_split) // 2): 21 | start = int(enc_split[2 * i]) - 1 22 | length = int(enc_split[2 * i + 1]) 23 | img[start: start + length] = 1 + m 24 | return img.reshape(shape).T 25 | 26 | 27 | def mask2enc(mask, n=1): 28 | """ 29 | Encodes a mask to rle 30 | 31 | Args: 32 | mask (np array [H x W]): Mask 33 | n (int, optional): Number fo classes. Defaults to 1. 34 | 35 | Returns: 36 | list of strings: Rle encodings. 37 | """ 38 | pixels = mask.T.flatten() 39 | encs = [] 40 | for i in range(1, n + 1): 41 | p = (pixels == i).astype(np.int8) 42 | if p.sum() == 0: 43 | encs.append(np.nan) 44 | else: 45 | p = np.concatenate([[0], p, [0]]) 46 | runs = np.where(p[1:] != p[:-1])[0] + 1 47 | runs[1::2] -= runs[::2] 48 | encs.append(" ".join(str(x) for x in runs)) 49 | return encs 50 | 51 | 52 | def rle_encode_less_memory(mask): 53 | """ 54 | From https://www.kaggle.com/bguberfain/memory-aware-rle-encoding 55 | mask: numpy array, 1 - mask, 0 - background 56 | Returns run length as string formated 57 | This simplified method requires first and last pixel to be zero 58 | """ 59 | pixels = mask.T.flatten() 60 | 61 | # This simplified method requires first and last pixel to be zero 62 | pixels[0] = 0 63 | pixels[-1] = 0 64 | runs = np.where(pixels[1:] != pixels[:-1])[0] + 2 65 | runs[1::2] -= runs[::2] 66 | 67 | return " ".join(str(x) for x in runs) 68 | -------------------------------------------------------------------------------- /code/utils/torch.py: -------------------------------------------------------------------------------- 1 | import os 2 | import torch 3 | import random 4 | import numpy as np 5 | 6 | 7 | def seed_everything(seed): 8 | """ 9 | Seeds basic parameters for reproductibility of results. 10 | 11 | Args: 12 | seed (int): Number of the seed. 13 | """ 14 | random.seed(seed) 15 | os.environ["PYTHONHASHSEED"] = str(seed) 16 | np.random.seed(seed) 17 | torch.manual_seed(seed) 18 | torch.cuda.manual_seed(seed) 19 | torch.backends.cudnn.deterministic = True 20 | torch.backends.cudnn.benchmark = False 21 | 22 | 23 | def save_model_weights(model, filename, verbose=1, cp_folder=""): 24 | """ 25 | Saves the weights of a PyTorch model. 26 | 27 | Args: 28 | model (torch model): Model to save the weights of. 29 | filename (str): Name of the checkpoint. 30 | verbose (int, optional): Whether to display infos. Defaults to 1. 31 | cp_folder (str, optional): Folder to save to. Defaults to "". 32 | """ 33 | 34 | if verbose: 35 | print(f"\n -> Saving weights to {os.path.join(cp_folder, filename)}\n") 36 | torch.save(model.state_dict(), os.path.join(cp_folder, filename)) 37 | 38 | 39 | def load_model_weights(model, filename, verbose=1, cp_folder=""): 40 | """ 41 | Loads the weights of a PyTorch model. The exception handles cpu/gpu incompatibilities. 42 | 43 | Args: 44 | model (torch model): Model to load the weights to. 45 | filename (str): Name of the checkpoint. 46 | verbose (int, optional): Whether to display infos. Defaults to 1. 47 | cp_folder (str, optional): Folder to load from. Defaults to "". 48 | 49 | Returns: 50 | torch model: Model with loaded weights. 51 | """ 52 | 53 | if verbose: 54 | print(f"\n -> Loading weights from {os.path.join(cp_folder,filename)}\n") 55 | try: 56 | model.load_state_dict(os.path.join(cp_folder, filename), strict=True) 57 | except BaseException: 58 | model.load_state_dict( 59 | torch.load(os.path.join(cp_folder, filename), map_location="cpu"), 60 | strict=True, 61 | ) 62 | return model 63 | 64 | 65 | def count_parameters(model, all=False): 66 | """ 67 | Count the parameters of a model. 68 | 69 | Args: 70 | model (torch model): Model to count the parameters of. 71 | all (bool, optional): Whether to count not trainable parameters. Defaults to False. 72 | 73 | Returns: 74 | int: Number of parameters. 75 | """ 76 | 77 | if all: 78 | return sum(p.numel() for p in model.parameters()) 79 | else: 80 | return sum(p.numel() for p in model.parameters() if p.requires_grad) 81 | 82 | 83 | def worker_init_fn(worker_id): 84 | """ 85 | Handles PyTorch x Numpy seeding issues. 86 | 87 | Args: 88 | worker_id (int]): Id of the worker. 89 | """ 90 | np.random.seed(np.random.get_state()[1][0] + worker_id) 91 | -------------------------------------------------------------------------------- /input/annotation_v3/095bf7a1f.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "type": "Feature", 4 | "id": "PathAnnotationObject", 5 | "geometry": { 6 | "type": "Polygon", 7 | "coordinates": [ 8 | [ 9 | [13779, 18508], 10 | [13741, 18520], 11 | [13718, 18539], 12 | [13712, 18587], 13 | [13714, 18587], 14 | [13734, 18636], 15 | [13767, 18661], 16 | [13833, 18661], 17 | [13874, 18632], 18 | [13902, 18588], 19 | [13882, 18543], 20 | [13882, 18541], 21 | [13839, 18511], 22 | [13779, 18508] 23 | ] 24 | ] 25 | }, 26 | "properties": { 27 | "classification": { 28 | "name": "FC", 29 | "colorRGB": -1296326 30 | }, 31 | "isLocked": false, 32 | "measurements": [] 33 | } 34 | }, 35 | { 36 | "type": "Feature", 37 | "id": "PathAnnotationObject", 38 | "geometry": { 39 | "type": "Polygon", 40 | "coordinates": [ 41 | [ 42 | [13614, 19875], 43 | [13582, 19893], 44 | [13563, 19943], 45 | [13557, 19996], 46 | [13581, 20039], 47 | [13624, 20054], 48 | [13624, 20053], 49 | [13659, 20029], 50 | [13660, 20029], 51 | [13670, 19995], 52 | [13671, 19938], 53 | [13655, 19894], 54 | [13614, 19875] 55 | ] 56 | ] 57 | }, 58 | "properties": { 59 | "classification": { 60 | "name": "FC", 61 | "colorRGB": -1296326 62 | }, 63 | "isLocked": false, 64 | "measurements": [] 65 | } 66 | }, 67 | { 68 | "type": "Feature", 69 | "id": "PathAnnotationObject", 70 | "geometry": { 71 | "type": "MultiPolygon", 72 | "coordinates": [ 73 | [ 74 | [ 75 | [16988, 29847], 76 | [16989.83, 29847.17], 77 | [16990, 29847], 78 | [16988, 29847] 79 | ] 80 | ], 81 | [ 82 | [ 83 | [16989.83, 29847.17], 84 | [16955, 29882], 85 | [16956, 29882], 86 | [16941, 29947], 87 | [16965, 29987], 88 | [17013, 29998], 89 | [17014, 29997], 90 | [17068, 29996], 91 | [17072, 29996], 92 | [17098, 29952], 93 | [17100, 29952], 94 | [17102, 29889], 95 | [17063, 29854], 96 | [16989.83, 29847.17] 97 | ] 98 | ] 99 | ] 100 | }, 101 | "properties": { 102 | "classification": { 103 | "name": "FC", 104 | "colorRGB": -1296326 105 | }, 106 | "isLocked": false, 107 | "measurements": [] 108 | } 109 | }, 110 | { 111 | "type": "Feature", 112 | "id": "PathAnnotationObject", 113 | "geometry": { 114 | "type": "MultiPolygon", 115 | "coordinates": [ 116 | [ 117 | [ 118 | [23900, 28598], 119 | [23849, 28618], 120 | [23806, 28653], 121 | [23764, 28681], 122 | [23742, 28719], 123 | [23745, 28770], 124 | [23770, 28809], 125 | [23816, 28820], 126 | [23880, 28815], 127 | [23930, 28793], 128 | [23960, 28746], 129 | [23960, 28686], 130 | [23947, 28626], 131 | [23947.6, 28626], 132 | [23900, 28598] 133 | ] 134 | ], 135 | [ 136 | [ 137 | [23947.6, 28626], 138 | [23951, 28628], 139 | [23951, 28627], 140 | [23950, 28626], 141 | [23947.6, 28626] 142 | ] 143 | ] 144 | ] 145 | }, 146 | "properties": { 147 | "classification": { 148 | "name": "G", 149 | "colorRGB": -4596839 150 | }, 151 | "isLocked": false, 152 | "measurements": [] 153 | } 154 | }, 155 | { 156 | "type": "Feature", 157 | "id": "PathAnnotationObject", 158 | "geometry": { 159 | "type": "Polygon", 160 | "coordinates": [ 161 | [ 162 | [16941, 28082], 163 | [16868, 28102], 164 | [16850, 28154], 165 | [16875, 28216], 166 | [16950, 28246], 167 | [16987, 28232], 168 | [16991, 28232], 169 | [17026, 28203], 170 | [17040, 28141], 171 | [17041, 28141], 172 | [17029, 28097], 173 | [17029, 28096], 174 | [17029, 28095], 175 | [17028, 28095], 176 | [16943, 28083], 177 | [16943, 28082], 178 | [16941, 28082] 179 | ] 180 | ] 181 | }, 182 | "properties": { 183 | "classification": { 184 | "name": "FC", 185 | "colorRGB": -1296326 186 | }, 187 | "isLocked": false, 188 | "measurements": [] 189 | } 190 | }, 191 | { 192 | "type": "Feature", 193 | "id": "PathAnnotationObject", 194 | "geometry": { 195 | "type": "MultiPolygon", 196 | "coordinates": [ 197 | [ 198 | [ 199 | [7297.53, 28791.86], 200 | [7297, 28792], 201 | [7298, 28792], 202 | [7297.53, 28791.86] 203 | ] 204 | ], 205 | [ 206 | [ 207 | [7297.53, 28791.86], 208 | [7343, 28780], 209 | [7381, 28729], 210 | [7378, 28662], 211 | [7332, 28648], 212 | [7274, 28663], 213 | [7237, 28715], 214 | [7248, 28777], 215 | [7297.53, 28791.86] 216 | ] 217 | ] 218 | ] 219 | }, 220 | "properties": { 221 | "classification": { 222 | "name": "FC", 223 | "colorRGB": -1296326 224 | }, 225 | "isLocked": false, 226 | "measurements": [] 227 | } 228 | }, 229 | { 230 | "type": "Feature", 231 | "id": "PathAnnotationObject", 232 | "geometry": { 233 | "type": "Polygon", 234 | "coordinates": [ 235 | [ 236 | [8977, 29047], 237 | [8902, 29075], 238 | [8844, 29161], 239 | [8837, 29248], 240 | [8837, 29249], 241 | [8859, 29293], 242 | [8903, 29304], 243 | [8946, 29272], 244 | [8993, 29226], 245 | [9028, 29164], 246 | [9027, 29081], 247 | [8977, 29047] 248 | ] 249 | ] 250 | }, 251 | "properties": { 252 | "classification": { 253 | "name": "FC", 254 | "colorRGB": -1296326 255 | }, 256 | "isLocked": false, 257 | "measurements": [] 258 | } 259 | }, 260 | { 261 | "type": "Feature", 262 | "id": "PathAnnotationObject", 263 | "geometry": { 264 | "type": "Polygon", 265 | "coordinates": [ 266 | [ 267 | [4379, 22025], 268 | [4339, 22038], 269 | [4335, 22075], 270 | [4342, 22124], 271 | [4343, 22124], 272 | [4343, 22125], 273 | [4375, 22169], 274 | [4411, 22168], 275 | [4447, 22152], 276 | [4445, 22107], 277 | [4446, 22107], 278 | [4424, 22043], 279 | [4379, 22025] 280 | ] 281 | ] 282 | }, 283 | "properties": { 284 | "classification": { 285 | "name": "FC", 286 | "colorRGB": -1296326 287 | }, 288 | "isLocked": false, 289 | "measurements": [] 290 | } 291 | }, 292 | { 293 | "type": "Feature", 294 | "id": "PathAnnotationObject", 295 | "geometry": { 296 | "type": "Polygon", 297 | "coordinates": [ 298 | [ 299 | [4229, 27767], 300 | [4218, 27805], 301 | [4217, 27854], 302 | [4218, 27854], 303 | [4232, 27911], 304 | [4249, 27940], 305 | [4278, 27956], 306 | [4278, 27955], 307 | [4277, 27955], 308 | [4304, 27929], 309 | [4322, 27887], 310 | [4323, 27887], 311 | [4325, 27851], 312 | [4325, 27850], 313 | [4312, 27805], 314 | [4265, 27768], 315 | [4229, 27767] 316 | ] 317 | ] 318 | }, 319 | "properties": { 320 | "classification": { 321 | "name": "FC", 322 | "colorRGB": -1296326 323 | }, 324 | "isLocked": false, 325 | "measurements": [] 326 | } 327 | }, 328 | { 329 | "type": "Feature", 330 | "id": "PathAnnotationObject", 331 | "geometry": { 332 | "type": "Polygon", 333 | "coordinates": [ 334 | [ 335 | [4643, 22400], 336 | [4612, 22421], 337 | [4604, 22462], 338 | [4617, 22511], 339 | [4642, 22552], 340 | [4678, 22571], 341 | [4728, 22581], 342 | [4756, 22565], 343 | [4765, 22534], 344 | [4765, 22492], 345 | [4724, 22458], 346 | [4707, 22427], 347 | [4676, 22404], 348 | [4643, 22400] 349 | ] 350 | ] 351 | }, 352 | "properties": { 353 | "classification": { 354 | "name": "FC", 355 | "colorRGB": -1296326 356 | }, 357 | "isLocked": false, 358 | "measurements": [] 359 | } 360 | } 361 | ] -------------------------------------------------------------------------------- /input/annotation_v3/095bf7a1f.tiff - default.qpdata: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Optimox/HubMap/16da543e4978b50dc02c7eb9158cef7f83f82d78/input/annotation_v3/095bf7a1f.tiff - default.qpdata -------------------------------------------------------------------------------- /input/annotation_v3/1e2425f28.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "type": "Feature", 4 | "id": "PathAnnotationObject", 5 | "geometry": { 6 | "type": "Polygon", 7 | "coordinates": [ 8 | [ 9 | [21492, 16174], 10 | [21422, 16177], 11 | [21343, 16210], 12 | [21327, 16281], 13 | [21334, 16355], 14 | [21381, 16392], 15 | [21382, 16392], 16 | [21431, 16415], 17 | [21480, 16430], 18 | [21549, 16413], 19 | [21580, 16384], 20 | [21597, 16321], 21 | [21584, 16247], 22 | [21583, 16247], 23 | [21559, 16194], 24 | [21493, 16174], 25 | [21492, 16174] 26 | ] 27 | ] 28 | }, 29 | "properties": { 30 | "classification": { 31 | "name": "G", 32 | "colorRGB": -4596839 33 | }, 34 | "isLocked": false, 35 | "measurements": [] 36 | } 37 | }, 38 | { 39 | "type": "Feature", 40 | "id": "PathAnnotationObject", 41 | "geometry": { 42 | "type": "Polygon", 43 | "coordinates": [ 44 | [ 45 | [15226, 21919], 46 | [15179, 21952], 47 | [15154, 22026], 48 | [15166, 22114], 49 | [15218, 22147], 50 | [15282, 22156], 51 | [15333, 22159], 52 | [15402, 22125], 53 | [15389, 22057], 54 | [15387, 22057], 55 | [15406, 22024], 56 | [15407, 21968], 57 | [15371, 21928], 58 | [15369, 21928], 59 | [15294, 21921], 60 | [15294, 21920], 61 | [15289, 21920], 62 | [15226, 21919] 63 | ] 64 | ] 65 | }, 66 | "properties": { 67 | "classification": { 68 | "name": "FC", 69 | "colorRGB": -1296326 70 | }, 71 | "isLocked": false, 72 | "measurements": [] 73 | } 74 | }, 75 | { 76 | "type": "Feature", 77 | "id": "PathAnnotationObject", 78 | "geometry": { 79 | "type": "Polygon", 80 | "coordinates": [ 81 | [ 82 | [22258, 16856], 83 | [22172, 16889], 84 | [22123, 16943], 85 | [22126, 16943], 86 | [22081, 17000], 87 | [22105, 17094], 88 | [22154, 17143], 89 | [22155, 17143], 90 | [22232, 17151], 91 | [22240, 17151], 92 | [22241, 17150], 93 | [22261, 17137], 94 | [22262, 17137], 95 | [22278, 17132], 96 | [22278, 17131], 97 | [22278, 17129], 98 | [22278, 17128], 99 | [22278, 17127], 100 | [22300, 17109], 101 | [22379, 17083], 102 | [22416, 17018], 103 | [22400, 16919], 104 | [22360, 16882], 105 | [22258, 16856] 106 | ] 107 | ] 108 | }, 109 | "properties": { 110 | "classification": { 111 | "name": "G", 112 | "colorRGB": -4596839 113 | }, 114 | "isLocked": false, 115 | "measurements": [] 116 | } 117 | }, 118 | { 119 | "type": "Feature", 120 | "id": "PathAnnotationObject", 121 | "geometry": { 122 | "type": "MultiPolygon", 123 | "coordinates": [ 124 | [ 125 | [ 126 | [18133, 21416], 127 | [18057, 21422], 128 | [18030, 21474], 129 | [18028, 21474], 130 | [18027, 21475], 131 | [18025, 21545], 132 | [18057, 21588], 133 | [18105, 21608], 134 | [18152.31, 21615.88], 135 | [18195, 21600], 136 | [18237, 21575], 137 | [18260, 21517], 138 | [18239, 21464], 139 | [18238, 21464], 140 | [18238, 21463], 141 | [18235, 21463], 142 | [18204, 21426], 143 | [18133, 21417], 144 | [18133, 21416] 145 | ] 146 | ], 147 | [ 148 | [ 149 | [18152.31, 21615.88], 150 | [18152, 21616], 151 | [18153, 21616], 152 | [18152.31, 21615.88] 153 | ] 154 | ] 155 | ] 156 | }, 157 | "properties": { 158 | "classification": { 159 | "name": "FC", 160 | "colorRGB": -1296326 161 | }, 162 | "isLocked": false, 163 | "measurements": [] 164 | } 165 | } 166 | ] -------------------------------------------------------------------------------- /input/annotation_v3/1e2425f28.tiff - default.qpdata: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Optimox/HubMap/16da543e4978b50dc02c7eb9158cef7f83f82d78/input/annotation_v3/1e2425f28.tiff - default.qpdata -------------------------------------------------------------------------------- /input/annotation_v3/2f6ecfcdf.tiff - VAN0014-LK-207-2-PAS_FFPE_pyr.qpdata: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Optimox/HubMap/16da543e4978b50dc02c7eb9158cef7f83f82d78/input/annotation_v3/2f6ecfcdf.tiff - VAN0014-LK-207-2-PAS_FFPE_pyr.qpdata -------------------------------------------------------------------------------- /input/annotation_v3/4ef6695ce.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "type": "Feature", 4 | "id": "PathAnnotationObject", 5 | "geometry": { 6 | "type": "Polygon", 7 | "coordinates": [ 8 | [ 9 | [32062, 35189], 10 | [32009, 35191], 11 | [31971, 35230], 12 | [31962, 35263], 13 | [31962, 35264], 14 | [31961, 35264], 15 | [31963, 35322], 16 | [31961, 35322], 17 | [31980, 35379], 18 | [31978, 35379], 19 | [31997, 35399], 20 | [32014, 35414], 21 | [32035, 35440], 22 | [32082, 35463], 23 | [32126, 35454], 24 | [32158, 35421], 25 | [32183, 35366], 26 | [32190, 35311], 27 | [32192, 35261], 28 | [32162, 35224], 29 | [32111, 35205], 30 | [32062, 35189] 31 | ] 32 | ] 33 | }, 34 | "properties": { 35 | "classification": { 36 | "name": "G", 37 | "colorRGB": -4596839 38 | }, 39 | "isLocked": false, 40 | "measurements": [] 41 | } 42 | } 43 | ] -------------------------------------------------------------------------------- /input/annotation_v3/4ef6695ce.tiff - default.qpdata: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Optimox/HubMap/16da543e4978b50dc02c7eb9158cef7f83f82d78/input/annotation_v3/4ef6695ce.tiff - default.qpdata -------------------------------------------------------------------------------- /input/annotation_v3/54f2eec69.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "type": "Feature", 4 | "id": "PathAnnotationObject", 5 | "geometry": { 6 | "type": "Polygon", 7 | "coordinates": [ 8 | [ 9 | [12081, 20379], 10 | [11970, 20430], 11 | [11941, 20489], 12 | [11961, 20539], 13 | [12019, 20618], 14 | [12104, 20653], 15 | [12106, 20653], 16 | [12170, 20682], 17 | [12253, 20671], 18 | [12252, 20671], 19 | [12252, 20670], 20 | [12328, 20649], 21 | [12359, 20608], 22 | [12396, 20560], 23 | [12369, 20520], 24 | [12306, 20475], 25 | [12241, 20432], 26 | [12205, 20382], 27 | [12081, 20379] 28 | ] 29 | ] 30 | }, 31 | "properties": { 32 | "classification": { 33 | "name": "G", 34 | "colorRGB": -4596839 35 | }, 36 | "isLocked": false, 37 | "measurements": [] 38 | } 39 | }, 40 | { 41 | "type": "Feature", 42 | "id": "PathAnnotationObject", 43 | "geometry": { 44 | "type": "Polygon", 45 | "coordinates": [ 46 | [ 47 | [15848, 17526], 48 | [15793, 17527], 49 | [15729, 17585], 50 | [15696, 17655], 51 | [15717, 17703], 52 | [15759, 17738], 53 | [15798, 17747], 54 | [15848, 17733], 55 | [15895, 17706], 56 | [15904, 17669], 57 | [15911, 17634], 58 | [15894, 17579], 59 | [15892, 17579], 60 | [15849, 17526], 61 | [15848, 17526] 62 | ] 63 | ] 64 | }, 65 | "properties": { 66 | "classification": { 67 | "name": "FC", 68 | "colorRGB": -1296326 69 | }, 70 | "isLocked": false, 71 | "measurements": [] 72 | } 73 | } 74 | ] -------------------------------------------------------------------------------- /input/annotation_v3/54f2eec69.tiff - default.qpdata: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Optimox/HubMap/16da543e4978b50dc02c7eb9158cef7f83f82d78/input/annotation_v3/54f2eec69.tiff - default.qpdata -------------------------------------------------------------------------------- /input/annotation_v3/8242609fa.tiff - VAN0016-LK-208-2-PAS_FFPE_pyr.qpdata: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Optimox/HubMap/16da543e4978b50dc02c7eb9158cef7f83f82d78/input/annotation_v3/8242609fa.tiff - VAN0016-LK-208-2-PAS_FFPE_pyr.qpdata -------------------------------------------------------------------------------- /input/annotation_v3/PAS_I_4_S_1_ROI_3.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "type": "Feature", 4 | "id": "PathAnnotationObject", 5 | "geometry": { 6 | "type": "MultiPolygon", 7 | "coordinates": [ 8 | [ 9 | [ 10 | [9441, 4089], 11 | [9441, 4089.51], 12 | [9443, 4089], 13 | [9441, 4089] 14 | ] 15 | ], 16 | [ 17 | [ 18 | [9441, 4089.51], 19 | [9376, 4106], 20 | [9338, 4117], 21 | [9339, 4117], 22 | [9298, 4156], 23 | [9253, 4181], 24 | [9231, 4229], 25 | [9190, 4262], 26 | [9177, 4317], 27 | [9182, 4380], 28 | [9216, 4411], 29 | [9228, 4412], 30 | [9574, 4409], 31 | [9573, 4096], 32 | [9573, 4095], 33 | [9574, 4095], 34 | [9574, 4094], 35 | [9557, 4093], 36 | [9521, 4081], 37 | [9477, 4084], 38 | [9441, 4091], 39 | [9441, 4089.51] 40 | ] 41 | ] 42 | ] 43 | }, 44 | "properties": { 45 | "classification": { 46 | "name": "G", 47 | "colorRGB": -4596839 48 | }, 49 | "isLocked": false, 50 | "measurements": [] 51 | } 52 | }, 53 | { 54 | "type": "Feature", 55 | "id": "PathAnnotationObject", 56 | "geometry": { 57 | "type": "Polygon", 58 | "coordinates": [ 59 | [ 60 | [7366, 4200], 61 | [7295, 4212], 62 | [7237, 4268], 63 | [7187, 4328], 64 | [7162, 4377], 65 | [7144, 4412], 66 | [7597, 4412], 67 | [7580, 4385], 68 | [7565, 4337], 69 | [7555, 4291], 70 | [7556, 4291], 71 | [7556, 4290], 72 | [7555, 4290], 73 | [7555, 4289], 74 | [7525, 4256], 75 | [7484, 4250], 76 | [7483, 4250], 77 | [7440, 4230], 78 | [7439, 4230], 79 | [7366, 4200] 80 | ] 81 | ] 82 | }, 83 | "properties": { 84 | "classification": { 85 | "name": "G", 86 | "colorRGB": -4596839 87 | }, 88 | "isLocked": false, 89 | "measurements": [] 90 | } 91 | }, 92 | { 93 | "type": "Feature", 94 | "id": "PathAnnotationObject", 95 | "geometry": { 96 | "type": "Polygon", 97 | "coordinates": [ 98 | [ 99 | [9451, 3481], 100 | [9409, 3514], 101 | [9379, 3540], 102 | [9376, 3583], 103 | [9382, 3617], 104 | [9401, 3667], 105 | [9423, 3714], 106 | [9449, 3748], 107 | [9490, 3777], 108 | [9492, 3777], 109 | [9492, 3778], 110 | [9494, 3778], 111 | [9545, 3792], 112 | [9573, 3788], 113 | [9574, 3548], 114 | [9561, 3542], 115 | [9526, 3502], 116 | [9502, 3489], 117 | [9451, 3481] 118 | ] 119 | ] 120 | }, 121 | "properties": { 122 | "classification": { 123 | "name": "G", 124 | "colorRGB": -4596839 125 | }, 126 | "isLocked": false, 127 | "measurements": [] 128 | } 129 | } 130 | ] -------------------------------------------------------------------------------- /input/annotation_v3/PAS_I_4_S_1_ROI_3.qpdata: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Optimox/HubMap/16da543e4978b50dc02c7eb9158cef7f83f82d78/input/annotation_v3/PAS_I_4_S_1_ROI_3.qpdata -------------------------------------------------------------------------------- /input/annotation_v3/PAS_I_4_S_2_ROI_1.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "type": "Feature", 4 | "id": "PathAnnotationObject", 5 | "geometry": { 6 | "type": "Polygon", 7 | "coordinates": [ 8 | [ 9 | [6018, 3046], 10 | [5991, 3057], 11 | [5989, 3057], 12 | [5946, 3094], 13 | [5910, 3117], 14 | [5874, 3142], 15 | [5852, 3184], 16 | [5850, 3213], 17 | [5844, 3256], 18 | [5863, 3277], 19 | [5887, 3302], 20 | [5922, 3309], 21 | [5924, 3309], 22 | [5947, 3313], 23 | [5949, 3313], 24 | [5990, 3303], 25 | [6024, 3287], 26 | [6044, 3256], 27 | [6044, 3093], 28 | [6041, 3062], 29 | [6040, 3062], 30 | [6018, 3046] 31 | ] 32 | ] 33 | }, 34 | "properties": { 35 | "classification": { 36 | "name": "G", 37 | "colorRGB": -4596839 38 | }, 39 | "isLocked": false, 40 | "measurements": [] 41 | } 42 | } 43 | ] -------------------------------------------------------------------------------- /input/annotation_v3/PAS_I_4_S_2_ROI_1.qpdata: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Optimox/HubMap/16da543e4978b50dc02c7eb9158cef7f83f82d78/input/annotation_v3/PAS_I_4_S_2_ROI_1.qpdata -------------------------------------------------------------------------------- /input/annotation_v3/PAS_I_4_S_2_ROI_3.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "type": "Feature", 4 | "id": "PathAnnotationObject", 5 | "geometry": { 6 | "type": "Polygon", 7 | "coordinates": [ 8 | [ 9 | [5495, 3046], 10 | [5495, 3047], 11 | [5458, 3055], 12 | [5457, 3055], 13 | [5457, 3056], 14 | [5427, 3063], 15 | [5391, 3082], 16 | [5389, 3082], 17 | [5362, 3110], 18 | [5358, 3138], 19 | [5382, 3184], 20 | [5419, 3215], 21 | [5423, 3215], 22 | [5469, 3226], 23 | [5504, 3247], 24 | [5502, 3046], 25 | [5495, 3046] 26 | ] 27 | ] 28 | }, 29 | "properties": { 30 | "classification": { 31 | "name": "G", 32 | "colorRGB": -4596839 33 | }, 34 | "isLocked": false, 35 | "measurements": [] 36 | } 37 | } 38 | ] -------------------------------------------------------------------------------- /input/annotation_v3/PAS_I_4_S_2_ROI_3.qpdata: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Optimox/HubMap/16da543e4978b50dc02c7eb9158cef7f83f82d78/input/annotation_v3/PAS_I_4_S_2_ROI_3.qpdata -------------------------------------------------------------------------------- /input/annotation_v3/PAS_I_4_S_3_ROI_1.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "type": "Feature", 4 | "id": "PathAnnotationObject", 5 | "geometry": { 6 | "type": "Polygon", 7 | "coordinates": [ 8 | [ 9 | [4835, 4218], 10 | [4773, 4249], 11 | [4716, 4284], 12 | [4671, 4325], 13 | [4635, 4373], 14 | [5000, 4372], 15 | [4986, 4327], 16 | [4963, 4276], 17 | [4908, 4233], 18 | [4836, 4218], 19 | [4835, 4218] 20 | ] 21 | ] 22 | }, 23 | "properties": { 24 | "classification": { 25 | "name": "G", 26 | "colorRGB": -4596839 27 | }, 28 | "isLocked": false, 29 | "measurements": [] 30 | } 31 | } 32 | ] -------------------------------------------------------------------------------- /input/annotation_v3/PAS_I_4_S_3_ROI_1.qpdata: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Optimox/HubMap/16da543e4978b50dc02c7eb9158cef7f83f82d78/input/annotation_v3/PAS_I_4_S_3_ROI_1.qpdata -------------------------------------------------------------------------------- /input/annotation_v3/PAS_I_4_S_3_ROI_2.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "type": "Feature", 4 | "id": "PathAnnotationObject", 5 | "geometry": { 6 | "type": "Polygon", 7 | "coordinates": [ 8 | [ 9 | [5167, 1438], 10 | [5160, 1441], 11 | [5099, 1466], 12 | [5043, 1480], 13 | [5042, 1480], 14 | [4974, 1514], 15 | [4947, 1536], 16 | [4947, 1594], 17 | [4995, 1648], 18 | [5049, 1691], 19 | [5097, 1725], 20 | [5101, 1725], 21 | [5167, 1763], 22 | [5167, 1438] 23 | ] 24 | ] 25 | }, 26 | "properties": { 27 | "isLocked": false, 28 | "measurements": [] 29 | } 30 | }, 31 | { 32 | "type": "Feature", 33 | "id": "PathAnnotationObject", 34 | "geometry": { 35 | "type": "Polygon", 36 | "coordinates": [ 37 | [ 38 | [2138, 4645], 39 | [2105, 4662], 40 | [2072, 4701], 41 | [2057, 4727], 42 | [2058, 4756], 43 | [2273, 4756], 44 | [2272, 4752], 45 | [2237, 4683], 46 | [2207, 4668], 47 | [2206, 4668], 48 | [2167, 4653], 49 | [2138, 4645] 50 | ] 51 | ] 52 | }, 53 | "properties": { 54 | "classification": { 55 | "name": "G", 56 | "colorRGB": -4596839 57 | }, 58 | "isLocked": false, 59 | "measurements": [] 60 | } 61 | }, 62 | { 63 | "type": "Feature", 64 | "id": "PathAnnotationObject", 65 | "geometry": { 66 | "type": "Polygon", 67 | "coordinates": [ 68 | [ 69 | [5167, 2927], 70 | [5108, 2961], 71 | [5067, 3017], 72 | [5056, 3076], 73 | [5056, 3077], 74 | [5040, 3155], 75 | [5039, 3155], 76 | [5045, 3224], 77 | [5045, 3225], 78 | [5073, 3281], 79 | [5099, 3308], 80 | [5118, 3322], 81 | [5167, 3324], 82 | [5167, 2927] 83 | ] 84 | ] 85 | }, 86 | "properties": { 87 | "classification": { 88 | "name": "G", 89 | "colorRGB": -4596839 90 | }, 91 | "isLocked": false, 92 | "measurements": [] 93 | } 94 | } 95 | ] -------------------------------------------------------------------------------- /input/annotation_v3/PAS_I_4_S_3_ROI_2.qpdata: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Optimox/HubMap/16da543e4978b50dc02c7eb9158cef7f83f82d78/input/annotation_v3/PAS_I_4_S_3_ROI_2.qpdata -------------------------------------------------------------------------------- /input/annotation_v3/PAS_I_4_S_4_ROI_1.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "type": "Feature", 4 | "id": "PathAnnotationObject", 5 | "geometry": { 6 | "type": "Polygon", 7 | "coordinates": [ 8 | [ 9 | [3094, 4225], 10 | [3056, 4243], 11 | [3010, 4279], 12 | [2988, 4339], 13 | [3012, 4383], 14 | [3062, 4404], 15 | [3107, 4401], 16 | [3131, 4392], 17 | [3165, 4373], 18 | [3187, 4344], 19 | [3178, 4291], 20 | [3175, 4291], 21 | [3145, 4245], 22 | [3094, 4225] 23 | ] 24 | ] 25 | }, 26 | "properties": { 27 | "classification": { 28 | "name": "G", 29 | "colorRGB": -4596839 30 | }, 31 | "isLocked": false, 32 | "measurements": [] 33 | } 34 | } 35 | ] -------------------------------------------------------------------------------- /input/annotation_v3/PAS_I_4_S_4_ROI_1.qpdata: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Optimox/HubMap/16da543e4978b50dc02c7eb9158cef7f83f82d78/input/annotation_v3/PAS_I_4_S_4_ROI_1.qpdata -------------------------------------------------------------------------------- /input/annotation_v3/PAS_I_4_S_4_ROI_3.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "type": "Feature", 4 | "id": "PathAnnotationObject", 5 | "geometry": { 6 | "type": "Polygon", 7 | "coordinates": [ 8 | [ 9 | [0, 3848], 10 | [0, 4248], 11 | [22, 4231], 12 | [55, 4191], 13 | [76, 4171], 14 | [96, 4140], 15 | [111, 4095], 16 | [114, 4058], 17 | [100, 4015], 18 | [88, 3972], 19 | [75, 3938], 20 | [63, 3910], 21 | [42, 3879], 22 | [18, 3857], 23 | [0, 3848] 24 | ] 25 | ] 26 | }, 27 | "properties": { 28 | "classification": { 29 | "name": "G", 30 | "colorRGB": -4596839 31 | }, 32 | "isLocked": false, 33 | "measurements": [] 34 | } 35 | }, 36 | { 37 | "type": "Feature", 38 | "id": "PathAnnotationObject", 39 | "geometry": { 40 | "type": "Polygon", 41 | "coordinates": [ 42 | [ 43 | [10367, 634], 44 | [10320, 635], 45 | [10281, 660], 46 | [10256, 708], 47 | [10230, 757], 48 | [10228, 798], 49 | [10242, 869], 50 | [10257, 921], 51 | [10275, 963], 52 | [10311, 1002], 53 | [10347, 1027], 54 | [10367, 1045], 55 | [10367, 634] 56 | ] 57 | ] 58 | }, 59 | "properties": { 60 | "classification": { 61 | "name": "G", 62 | "colorRGB": -4596839 63 | }, 64 | "isLocked": false, 65 | "measurements": [] 66 | } 67 | }, 68 | { 69 | "type": "Feature", 70 | "id": "PathAnnotationObject", 71 | "geometry": { 72 | "type": "Polygon", 73 | "coordinates": [ 74 | [ 75 | [9622, 0], 76 | [9624, 41], 77 | [9642, 102], 78 | [9670, 130], 79 | [9717, 165], 80 | [9765, 176], 81 | [9797, 160], 82 | [9839, 149], 83 | [9840, 149], 84 | [9872, 125], 85 | [9885, 94], 86 | [9885, 93], 87 | [9887, 93], 88 | [9887, 92], 89 | [9901, 53], 90 | [9911, 15], 91 | [9916, 1], 92 | [9622, 0] 93 | ] 94 | ] 95 | }, 96 | "properties": { 97 | "classification": { 98 | "name": "G", 99 | "colorRGB": -4596839 100 | }, 101 | "isLocked": false, 102 | "measurements": [] 103 | } 104 | }, 105 | { 106 | "type": "Feature", 107 | "id": "PathAnnotationObject", 108 | "geometry": { 109 | "type": "Polygon", 110 | "coordinates": [ 111 | [ 112 | [4694, 5102], 113 | [4638, 5120], 114 | [4586, 5158], 115 | [4554, 5196], 116 | [4875, 5196], 117 | [4869, 5186], 118 | [4831, 5141], 119 | [4830, 5141], 120 | [4781, 5115], 121 | [4780, 5115], 122 | [4695, 5102], 123 | [4694, 5102] 124 | ] 125 | ] 126 | }, 127 | "properties": { 128 | "classification": { 129 | "name": "G", 130 | "colorRGB": -4596839 131 | }, 132 | "isLocked": false, 133 | "measurements": [] 134 | } 135 | } 136 | ] -------------------------------------------------------------------------------- /input/annotation_v3/PAS_I_4_S_4_ROI_3.qpdata: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Optimox/HubMap/16da543e4978b50dc02c7eb9158cef7f83f82d78/input/annotation_v3/PAS_I_4_S_4_ROI_3.qpdata -------------------------------------------------------------------------------- /input/annotation_v3/PAS_I_4_S_4_ROI_4.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "type": "Feature", 4 | "id": "PathAnnotationObject", 5 | "geometry": { 6 | "type": "Polygon", 7 | "coordinates": [ 8 | [ 9 | [1885, 3870], 10 | [1824, 3874], 11 | [1781, 3904], 12 | [1781, 3905], 13 | [1772, 3965], 14 | [1784, 4029], 15 | [1826, 4057], 16 | [1855, 4077], 17 | [1904, 4107], 18 | [1941, 4106], 19 | [1980, 4088], 20 | [2016, 4051], 21 | [2043, 4010], 22 | [2048, 3959], 23 | [1996, 3896], 24 | [1942, 3878], 25 | [1885, 3870] 26 | ] 27 | ] 28 | }, 29 | "properties": { 30 | "classification": { 31 | "name": "FC", 32 | "colorRGB": -1296326 33 | }, 34 | "isLocked": false, 35 | "measurements": [] 36 | } 37 | }, 38 | { 39 | "type": "Feature", 40 | "id": "PathAnnotationObject", 41 | "geometry": { 42 | "type": "Polygon", 43 | "coordinates": [ 44 | [ 45 | [9550, 0], 46 | [9575, 63], 47 | [9628, 121], 48 | [9693, 144], 49 | [9771, 131], 50 | [9821, 93], 51 | [9868, 60], 52 | [9898, 0], 53 | [9550, 0] 54 | ] 55 | ] 56 | }, 57 | "properties": { 58 | "classification": { 59 | "name": "G", 60 | "colorRGB": -4596839 61 | }, 62 | "isLocked": false, 63 | "measurements": [] 64 | } 65 | }, 66 | { 67 | "type": "Feature", 68 | "id": "PathAnnotationObject", 69 | "geometry": { 70 | "type": "Polygon", 71 | "coordinates": [ 72 | [ 73 | [10459, 5974], 74 | [10432, 6015], 75 | [10414, 6053], 76 | [10403, 6105], 77 | [10402, 6158], 78 | [10421, 6207], 79 | [10425, 6207], 80 | [10459, 6241], 81 | [10459, 5974] 82 | ] 83 | ] 84 | }, 85 | "properties": { 86 | "classification": { 87 | "name": "G", 88 | "colorRGB": -4596839 89 | }, 90 | "isLocked": false, 91 | "measurements": [] 92 | } 93 | }, 94 | { 95 | "type": "Feature", 96 | "id": "PathAnnotationObject", 97 | "geometry": { 98 | "type": "Polygon", 99 | "coordinates": [ 100 | [ 101 | [10145, 0], 102 | [10153, 37], 103 | [10172, 84], 104 | [10206, 118], 105 | [10259, 145], 106 | [10319, 154], 107 | [10360, 138], 108 | [10407, 100], 109 | [10460, 74], 110 | [10460, 0], 111 | [10145, 0] 112 | ] 113 | ] 114 | }, 115 | "properties": { 116 | "classification": { 117 | "name": "G", 118 | "colorRGB": -4596839 119 | }, 120 | "isLocked": false, 121 | "measurements": [] 122 | } 123 | } 124 | ] -------------------------------------------------------------------------------- /input/annotation_v3/PAS_I_4_S_4_ROI_4.qpdata: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Optimox/HubMap/16da543e4978b50dc02c7eb9158cef7f83f82d78/input/annotation_v3/PAS_I_4_S_4_ROI_4.qpdata -------------------------------------------------------------------------------- /input/annotation_v3/PAS_I_4_S_5_ROI_1.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "type": "Feature", 4 | "id": "PathAnnotationObject", 5 | "geometry": { 6 | "type": "Polygon", 7 | "coordinates": [ 8 | [ 9 | [3687, 4359], 10 | [3653, 4361], 11 | [3617, 4361], 12 | [3587, 4377], 13 | [3564, 4401], 14 | [3550, 4430], 15 | [3550, 4454], 16 | [3560, 4491], 17 | [3576, 4517], 18 | [3601, 4521], 19 | [3632, 4518], 20 | [3662, 4495], 21 | [3701, 4453], 22 | [3720, 4437], 23 | [3732, 4407], 24 | [3731, 4382], 25 | [3707, 4363], 26 | [3687, 4360], 27 | [3687, 4359] 28 | ] 29 | ] 30 | }, 31 | "properties": { 32 | "classification": { 33 | "name": "FC", 34 | "colorRGB": -1296326 35 | }, 36 | "isLocked": false, 37 | "measurements": [] 38 | } 39 | }, 40 | { 41 | "type": "Feature", 42 | "id": "PathAnnotationObject", 43 | "geometry": { 44 | "type": "Polygon", 45 | "coordinates": [ 46 | [ 47 | [4499, 4095], 48 | [4461, 4097], 49 | [4461, 4096], 50 | [4460, 4096], 51 | [4430, 4108], 52 | [4429, 4108], 53 | [4410, 4144], 54 | [4401, 4185], 55 | [4410, 4225], 56 | [4438, 4259], 57 | [4460, 4285], 58 | [4486, 4289], 59 | [4519, 4284], 60 | [4546, 4257], 61 | [4566, 4212], 62 | [4566, 4154], 63 | [4538, 4110], 64 | [4499, 4095] 65 | ] 66 | ] 67 | }, 68 | "properties": { 69 | "classification": { 70 | "name": "G", 71 | "colorRGB": -4596839 72 | }, 73 | "isLocked": false, 74 | "measurements": [] 75 | } 76 | } 77 | ] -------------------------------------------------------------------------------- /input/annotation_v3/PAS_I_4_S_5_ROI_1.qpdata: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Optimox/HubMap/16da543e4978b50dc02c7eb9158cef7f83f82d78/input/annotation_v3/PAS_I_4_S_5_ROI_1.qpdata -------------------------------------------------------------------------------- /input/annotation_v3/PAS_I_4_S_5_ROI_4.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "type": "Feature", 4 | "id": "PathAnnotationObject", 5 | "geometry": { 6 | "type": "Polygon", 7 | "coordinates": [ 8 | [ 9 | [2622, 4429], 10 | [2596, 4445], 11 | [2562, 4500], 12 | [2563, 4500], 13 | [2536, 4558], 14 | [2537, 4558], 15 | [2537, 4559], 16 | [2529, 4619], 17 | [2530, 4619], 18 | [2565, 4667], 19 | [2570, 4714], 20 | [2575, 4768], 21 | [2587, 4777], 22 | [2842, 4777], 23 | [2860, 4743], 24 | [2843, 4685], 25 | [2798, 4660], 26 | [2767, 4634], 27 | [2767, 4633], 28 | [2766, 4633], 29 | [2728, 4599], 30 | [2728, 4598], 31 | [2699, 4551], 32 | [2701, 4551], 33 | [2701, 4550], 34 | [2703, 4550], 35 | [2678, 4493], 36 | [2647, 4462], 37 | [2622, 4429] 38 | ] 39 | ] 40 | }, 41 | "properties": { 42 | "classification": { 43 | "name": "G", 44 | "colorRGB": -4596839 45 | }, 46 | "isLocked": false, 47 | "measurements": [] 48 | } 49 | }, 50 | { 51 | "type": "Feature", 52 | "id": "PathAnnotationObject", 53 | "geometry": { 54 | "type": "Polygon", 55 | "coordinates": [ 56 | [ 57 | [1463, 935], 58 | [1420, 954], 59 | [1386, 976], 60 | [1373, 1001], 61 | [1373, 1036], 62 | [1372, 1037], 63 | [1372, 1079], 64 | [1381, 1113], 65 | [1406, 1144], 66 | [1405, 1144], 67 | [1439, 1159], 68 | [1487, 1156], 69 | [1529, 1145], 70 | [1567, 1124], 71 | [1595, 1072], 72 | [1587, 1019], 73 | [1561, 980], 74 | [1537, 952], 75 | [1507, 938], 76 | [1463, 935] 77 | ] 78 | ] 79 | }, 80 | "properties": { 81 | "classification": { 82 | "name": "FC", 83 | "colorRGB": -1296326 84 | }, 85 | "isLocked": false, 86 | "measurements": [] 87 | } 88 | } 89 | ] -------------------------------------------------------------------------------- /input/annotation_v3/PAS_I_4_S_5_ROI_4.qpdata: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Optimox/HubMap/16da543e4978b50dc02c7eb9158cef7f83f82d78/input/annotation_v3/PAS_I_4_S_5_ROI_4.qpdata -------------------------------------------------------------------------------- /input/annotation_v3/PAS_I_4_S_6_ROI_3.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "type": "Feature", 4 | "id": "PathAnnotationObject", 5 | "geometry": { 6 | "type": "Polygon", 7 | "coordinates": [ 8 | [ 9 | [1260, 3077], 10 | [1211, 3081], 11 | [1166, 3089], 12 | [1128, 3126], 13 | [1132, 3160], 14 | [1150, 3181], 15 | [1166, 3208], 16 | [1173, 3239], 17 | [1204, 3253], 18 | [1236, 3258], 19 | [1271, 3256], 20 | [1317, 3238], 21 | [1349, 3212], 22 | [1350, 3155], 23 | [1320, 3112], 24 | [1260, 3078], 25 | [1260, 3077] 26 | ] 27 | ] 28 | }, 29 | "properties": { 30 | "classification": { 31 | "name": "FC", 32 | "colorRGB": -1296326 33 | }, 34 | "isLocked": false, 35 | "measurements": [] 36 | } 37 | } 38 | ] -------------------------------------------------------------------------------- /input/annotation_v3/PAS_I_4_S_6_ROI_3.qpdata: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Optimox/HubMap/16da543e4978b50dc02c7eb9158cef7f83f82d78/input/annotation_v3/PAS_I_4_S_6_ROI_3.qpdata -------------------------------------------------------------------------------- /input/annotation_v3/PAS_I_4_S_6_ROI_4.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "type": "Feature", 4 | "id": "PathAnnotationObject", 5 | "geometry": { 6 | "type": "Polygon", 7 | "coordinates": [ 8 | [ 9 | [2825, 4689], 10 | [2726, 4715], 11 | [2725, 4715], 12 | [2673, 4765], 13 | [2670, 4765], 14 | [2670, 4766], 15 | [2669, 4766], 16 | [2609, 4813], 17 | [2608, 4813], 18 | [2582, 4859], 19 | [3000, 4859], 20 | [2987, 4820], 21 | [2957, 4755], 22 | [2935, 4708], 23 | [2897, 4697], 24 | [2896, 4697], 25 | [2825, 4689] 26 | ] 27 | ] 28 | }, 29 | "properties": { 30 | "classification": { 31 | "name": "G", 32 | "colorRGB": -4596839 33 | }, 34 | "isLocked": false, 35 | "measurements": [] 36 | } 37 | }, 38 | { 39 | "type": "Feature", 40 | "id": "PathAnnotationObject", 41 | "geometry": { 42 | "type": "Polygon", 43 | "coordinates": [ 44 | [ 45 | [424, 4729], 46 | [424, 4730], 47 | [385, 4740], 48 | [385, 4741], 49 | [349, 4770], 50 | [317, 4808], 51 | [300, 4859], 52 | [590, 4858], 53 | [592, 4858], 54 | [580, 4827], 55 | [580, 4828], 56 | [561, 4795], 57 | [560, 4795], 58 | [529, 4764], 59 | [528, 4764], 60 | [471, 4739], 61 | [424, 4729] 62 | ] 63 | ] 64 | }, 65 | "properties": { 66 | "classification": { 67 | "name": "FC", 68 | "colorRGB": -1296326 69 | }, 70 | "isLocked": false, 71 | "measurements": [] 72 | } 73 | }, 74 | { 75 | "type": "Feature", 76 | "id": "PathAnnotationObject", 77 | "geometry": { 78 | "type": "Polygon", 79 | "coordinates": [ 80 | [ 81 | [306, 1594], 82 | [251, 1603], 83 | [216, 1635], 84 | [179, 1660], 85 | [182, 1711], 86 | [191, 1757], 87 | [253, 1793], 88 | [303, 1783], 89 | [353, 1739], 90 | [368, 1693], 91 | [367, 1693], 92 | [370, 1648], 93 | [355, 1620], 94 | [355, 1619], 95 | [352, 1619], 96 | [306, 1594] 97 | ] 98 | ] 99 | }, 100 | "properties": { 101 | "classification": { 102 | "name": "G", 103 | "colorRGB": -4596839 104 | }, 105 | "isLocked": false, 106 | "measurements": [] 107 | } 108 | }, 109 | { 110 | "type": "Feature", 111 | "id": "PathAnnotationObject", 112 | "geometry": { 113 | "type": "Polygon", 114 | "coordinates": [ 115 | [ 116 | [6669, 0], 117 | [6713, 23], 118 | [6714, 23], 119 | [6772, 62], 120 | [6773, 62], 121 | [6773, 63], 122 | [6774, 63], 123 | [6844, 77], 124 | [6933, 76], 125 | [6980, 70], 126 | [7024, 45], 127 | [7063, 31], 128 | [7082, 23], 129 | [7092, 0], 130 | [6669, 0] 131 | ] 132 | ] 133 | }, 134 | "properties": { 135 | "classification": { 136 | "name": "G", 137 | "colorRGB": -4596839 138 | }, 139 | "isLocked": false, 140 | "measurements": [] 141 | } 142 | }, 143 | { 144 | "type": "Feature", 145 | "id": "PathAnnotationObject", 146 | "geometry": { 147 | "type": "Polygon", 148 | "coordinates": [ 149 | [ 150 | [1966, 4584], 151 | [1930, 4605], 152 | [1876, 4638], 153 | [1850, 4670], 154 | [1856, 4702], 155 | [1885, 4745], 156 | [1926, 4760], 157 | [1927, 4761], 158 | [1973, 4763], 159 | [2022, 4751], 160 | [2054, 4730], 161 | [2054, 4729], 162 | [2061, 4702], 163 | [2064, 4665], 164 | [2064, 4664], 165 | [2063, 4664], 166 | [2052, 4612], 167 | [2052, 4611], 168 | [2053, 4611], 169 | [2053, 4610], 170 | [2039, 4587], 171 | [2010, 4584], 172 | [1966, 4584] 173 | ] 174 | ] 175 | }, 176 | "properties": { 177 | "classification": { 178 | "name": "FC", 179 | "colorRGB": -1296326 180 | }, 181 | "isLocked": false, 182 | "measurements": [] 183 | } 184 | } 185 | ] -------------------------------------------------------------------------------- /input/annotation_v3/PAS_I_4_S_6_ROI_4.qpdata: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Optimox/HubMap/16da543e4978b50dc02c7eb9158cef7f83f82d78/input/annotation_v3/PAS_I_4_S_6_ROI_4.qpdata -------------------------------------------------------------------------------- /input/annotation_v3/PAS_I_4_S_6_ROI_6.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "type": "Feature", 4 | "id": "PathAnnotationObject", 5 | "geometry": { 6 | "type": "Polygon", 7 | "coordinates": [ 8 | [ 9 | [4082, 4340], 10 | [4082, 4341], 11 | [4044, 4361], 12 | [4044, 4362], 13 | [4043, 4362], 14 | [4004, 4380], 15 | [3983, 4400], 16 | [3965, 4429], 17 | [3947, 4452], 18 | [4239, 4452], 19 | [4231, 4441], 20 | [4212, 4405], 21 | [4212, 4404], 22 | [4162, 4357], 23 | [4124, 4340], 24 | [4082, 4340] 25 | ] 26 | ] 27 | }, 28 | "properties": { 29 | "isLocked": false, 30 | "measurements": [] 31 | } 32 | } 33 | ] -------------------------------------------------------------------------------- /input/annotation_v3/PAS_I_4_S_6_ROI_6.qpdata: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Optimox/HubMap/16da543e4978b50dc02c7eb9158cef7f83f82d78/input/annotation_v3/PAS_I_4_S_6_ROI_6.qpdata -------------------------------------------------------------------------------- /input/annotation_v3/SAS_21883_001.qpdata: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Optimox/HubMap/16da543e4978b50dc02c7eb9158cef7f83f82d78/input/annotation_v3/SAS_21883_001.qpdata -------------------------------------------------------------------------------- /input/annotation_v3/SAS_21891_001.qpdata: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Optimox/HubMap/16da543e4978b50dc02c7eb9158cef7f83f82d78/input/annotation_v3/SAS_21891_001.qpdata -------------------------------------------------------------------------------- /input/annotation_v3/SAS_21896_001.qpdata: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Optimox/HubMap/16da543e4978b50dc02c7eb9158cef7f83f82d78/input/annotation_v3/SAS_21896_001.qpdata -------------------------------------------------------------------------------- /input/annotation_v3/SAS_21904_001.qpdata: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Optimox/HubMap/16da543e4978b50dc02c7eb9158cef7f83f82d78/input/annotation_v3/SAS_21904_001.qpdata -------------------------------------------------------------------------------- /input/annotation_v3/SAS_21908_001.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "type": "Feature", 4 | "id": "PathAnnotationObject", 5 | "geometry": { 6 | "type": "MultiPolygon", 7 | "coordinates": [ 8 | [ 9 | [ 10 | [2293, 21453], 11 | [2223, 21488], 12 | [2191, 21533], 13 | [2195, 21587], 14 | [2235, 21648], 15 | [2293, 21684], 16 | [2325, 21727], 17 | [2360, 21763], 18 | [2400, 21818], 19 | [2432, 21871], 20 | [2433, 21895], 21 | [2445.92, 21915.88], 22 | [2497, 21909], 23 | [2538, 21862], 24 | [2559, 21815], 25 | [2565, 21758], 26 | [2564, 21691], 27 | [2545, 21616], 28 | [2493, 21554], 29 | [2435, 21506], 30 | [2352, 21463], 31 | [2293, 21453] 32 | ] 33 | ], 34 | [ 35 | [ 36 | [2445.92, 21915.88], 37 | [2445, 21916], 38 | [2446, 21916], 39 | [2445.92, 21915.88] 40 | ] 41 | ] 42 | ] 43 | }, 44 | "properties": { 45 | "classification": { 46 | "name": "G", 47 | "colorRGB": -4596839 48 | }, 49 | "isLocked": false, 50 | "measurements": [] 51 | } 52 | }, 53 | { 54 | "type": "Feature", 55 | "id": "PathAnnotationObject", 56 | "geometry": { 57 | "type": "Polygon", 58 | "coordinates": [ 59 | [ 60 | [2752, 10854], 61 | [2638, 10871], 62 | [2566, 10903], 63 | [2528, 10916], 64 | [2488, 10980], 65 | [2488, 10982], 66 | [2449, 11042], 67 | [2427, 11119], 68 | [2441, 11166], 69 | [2533, 11157], 70 | [2537, 11157], 71 | [2701, 11161], 72 | [2795, 11121], 73 | [2869, 11056], 74 | [2890, 11016], 75 | [2887, 10930], 76 | [2846, 10874], 77 | [2752, 10854] 78 | ] 79 | ] 80 | }, 81 | "properties": { 82 | "classification": { 83 | "name": "G", 84 | "colorRGB": -4596839 85 | }, 86 | "isLocked": false, 87 | "measurements": [] 88 | } 89 | }, 90 | { 91 | "type": "Feature", 92 | "id": "PathAnnotationObject", 93 | "geometry": { 94 | "type": "Polygon", 95 | "coordinates": [ 96 | [ 97 | [2992, 25618], 98 | [2933, 25642], 99 | [2885, 25708], 100 | [2866, 25784], 101 | [2884, 25861], 102 | [2929, 25913], 103 | [3003, 25924], 104 | [3060, 25897], 105 | [3107, 25859], 106 | [3143, 25806], 107 | [3140, 25734], 108 | [3106, 25673], 109 | [3104, 25673], 110 | [3058, 25624], 111 | [3056, 25624], 112 | [2992, 25618] 113 | ] 114 | ] 115 | }, 116 | "properties": { 117 | "classification": { 118 | "name": "G", 119 | "colorRGB": -4596839 120 | }, 121 | "isLocked": false, 122 | "measurements": [] 123 | } 124 | }, 125 | { 126 | "type": "Feature", 127 | "id": "PathAnnotationObject", 128 | "geometry": { 129 | "type": "MultiPolygon", 130 | "coordinates": [ 131 | [ 132 | [ 133 | [2721, 18452], 134 | [2632, 18490], 135 | [2632, 18492], 136 | [2625, 18492], 137 | [2574, 18553], 138 | [2544, 18628], 139 | [2534, 18699], 140 | [2571, 18783], 141 | [2609, 18849], 142 | [2645, 18885], 143 | [2685, 18917], 144 | [2741, 18945], 145 | [2805, 18962], 146 | [2887, 18958], 147 | [2965, 18934], 148 | [3006, 18897], 149 | [3044, 18828], 150 | [3040, 18771], 151 | [3049, 18717], 152 | [3051, 18665], 153 | [3051, 18664], 154 | [3030, 18621], 155 | [3002, 18575], 156 | [2977, 18549], 157 | [2912, 18509], 158 | [2848, 18483], 159 | [2813.29, 18471.07], 160 | [2721, 18452] 161 | ] 162 | ], 163 | [ 164 | [ 165 | [2813.29, 18471.07], 166 | [2842, 18477], 167 | [2843, 18477], 168 | [2784, 18461], 169 | [2813.29, 18471.07] 170 | ] 171 | ] 172 | ] 173 | }, 174 | "properties": { 175 | "classification": { 176 | "name": "G", 177 | "colorRGB": -4596839 178 | }, 179 | "isLocked": false, 180 | "measurements": [] 181 | } 182 | }, 183 | { 184 | "type": "Feature", 185 | "id": "PathAnnotationObject", 186 | "geometry": { 187 | "type": "Polygon", 188 | "coordinates": [ 189 | [ 190 | [1500, 14878], 191 | [1465, 14910], 192 | [1420, 14969], 193 | [1375, 15050], 194 | [1361, 15095], 195 | [1362, 15147], 196 | [1372, 15194], 197 | [1369, 15226], 198 | [1362, 15252], 199 | [1418, 15232], 200 | [1474, 15232], 201 | [1529, 15212], 202 | [1578, 15200], 203 | [1579, 15200], 204 | [1610, 15207], 205 | [1643, 15198], 206 | [1656, 15157], 207 | [1665, 15105], 208 | [1662, 15041], 209 | [1656, 14985], 210 | [1659, 14985], 211 | [1646, 14943], 212 | [1610, 14918], 213 | [1549, 14891], 214 | [1500, 14878] 215 | ] 216 | ] 217 | }, 218 | "properties": { 219 | "classification": { 220 | "name": "G", 221 | "colorRGB": -4596839 222 | }, 223 | "isLocked": false, 224 | "measurements": [] 225 | } 226 | }, 227 | { 228 | "type": "Feature", 229 | "id": "PathAnnotationObject", 230 | "geometry": { 231 | "type": "Polygon", 232 | "coordinates": [ 233 | [ 234 | [1588, 13849], 235 | [1522, 13886], 236 | [1463, 13934], 237 | [1449, 14006], 238 | [1441, 14105], 239 | [1451, 14180], 240 | [1466, 14225], 241 | [1466, 14226], 242 | [1463, 14226], 243 | [1501, 14249], 244 | [1560, 14257], 245 | [1617, 14281], 246 | [1619, 14281], 247 | [1673, 14283], 248 | [1715, 14251], 249 | [1753, 14193], 250 | [1776, 14123], 251 | [1793, 14059], 252 | [1778, 13989], 253 | [1736, 13918], 254 | [1680, 13872], 255 | [1679, 13872], 256 | [1679, 13871], 257 | [1676, 13871], 258 | [1588, 13849] 259 | ] 260 | ] 261 | }, 262 | "properties": { 263 | "classification": { 264 | "name": "G", 265 | "colorRGB": -4596839 266 | }, 267 | "isLocked": false, 268 | "measurements": [] 269 | } 270 | }, 271 | { 272 | "type": "Feature", 273 | "id": "PathAnnotationObject", 274 | "geometry": { 275 | "type": "Polygon", 276 | "coordinates": [ 277 | [ 278 | [2526, 17515], 279 | [2480, 17543], 280 | [2435, 17645], 281 | [2418, 17692], 282 | [2407, 17740], 283 | [2375, 17834], 284 | [2375, 17944], 285 | [2430, 18024], 286 | [2428, 18024], 287 | [2437, 18056], 288 | [2441, 18056], 289 | [2496, 18045], 290 | [2555, 18031], 291 | [2620, 18021], 292 | [2680, 18029], 293 | [2728, 18001], 294 | [2776, 17980], 295 | [2765, 17900], 296 | [2766, 17822], 297 | [2749, 17747], 298 | [2719, 17676], 299 | [2687, 17622], 300 | [2664, 17566], 301 | [2644, 17552], 302 | [2592, 17534], 303 | [2526, 17515] 304 | ] 305 | ] 306 | }, 307 | "properties": { 308 | "classification": { 309 | "name": "G", 310 | "colorRGB": -4596839 311 | }, 312 | "isLocked": false, 313 | "measurements": [] 314 | } 315 | } 316 | ] -------------------------------------------------------------------------------- /input/annotation_v3/SAS_21908_001.qpdata: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Optimox/HubMap/16da543e4978b50dc02c7eb9158cef7f83f82d78/input/annotation_v3/SAS_21908_001.qpdata -------------------------------------------------------------------------------- /input/annotation_v3/SAS_21915_001.qpdata: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Optimox/HubMap/16da543e4978b50dc02c7eb9158cef7f83f82d78/input/annotation_v3/SAS_21915_001.qpdata -------------------------------------------------------------------------------- /input/annotation_v3/SAS_21924_001.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "type": "Feature", 4 | "id": "PathAnnotationObject", 5 | "geometry": { 6 | "type": "Polygon", 7 | "coordinates": [ 8 | [ 9 | [3087, 8516], 10 | [3051, 8526], 11 | [3052, 8526], 12 | [3014, 8551], 13 | [2974, 8579], 14 | [2901, 8586], 15 | [2855, 8582], 16 | [2811, 8618], 17 | [2787, 8666], 18 | [2803, 8743], 19 | [2803, 8744], 20 | [2804, 8744], 21 | [2834, 8808], 22 | [2889, 8846], 23 | [2946, 8887], 24 | [2946, 8888], 25 | [3019, 8925], 26 | [3021, 8925], 27 | [3117, 8919], 28 | [3121, 8919], 29 | [3211, 8897], 30 | [3286, 8872], 31 | [3317, 8838], 32 | [3324, 8790], 33 | [3308, 8729], 34 | [3308, 8728], 35 | [3281, 8680], 36 | [3281, 8679], 37 | [3241, 8634], 38 | [3241, 8633], 39 | [3220, 8606], 40 | [3170, 8579], 41 | [3171, 8578], 42 | [3171, 8577], 43 | [3173, 8577], 44 | [3173, 8575], 45 | [3138, 8531], 46 | [3138, 8529], 47 | [3087, 8516] 48 | ] 49 | ] 50 | }, 51 | "properties": { 52 | "classification": { 53 | "name": "G", 54 | "colorRGB": -4596839 55 | }, 56 | "isLocked": false, 57 | "measurements": [] 58 | } 59 | }, 60 | { 61 | "type": "Feature", 62 | "id": "PathAnnotationObject", 63 | "geometry": { 64 | "type": "MultiPolygon", 65 | "coordinates": [ 66 | [ 67 | [ 68 | [8714, 5477], 69 | [8651, 5488], 70 | [8651, 5487], 71 | [8591, 5502], 72 | [8551, 5543], 73 | [8515, 5583], 74 | [8496, 5624], 75 | [8479, 5664], 76 | [8480, 5714], 77 | [8516, 5751], 78 | [8538, 5816], 79 | [8579, 5853], 80 | [8600, 5879], 81 | [8631, 5895], 82 | [8707, 5882], 83 | [8740, 5849], 84 | [8781, 5834], 85 | [8836, 5837], 86 | [8880, 5813], 87 | [8912, 5778], 88 | [8926, 5728], 89 | [8926, 5727], 90 | [8924, 5727], 91 | [8944.86, 5672.38], 92 | [8938, 5643], 93 | [8937, 5643], 94 | [8922, 5619], 95 | [8908, 5590], 96 | [8889, 5544], 97 | [8876, 5519], 98 | [8833, 5502], 99 | [8781, 5498], 100 | [8758, 5481], 101 | [8714, 5478], 102 | [8714, 5477] 103 | ] 104 | ], 105 | [ 106 | [ 107 | [8944.86, 5672.38], 108 | [8945, 5673], 109 | [8945, 5672], 110 | [8944.86, 5672.38] 111 | ] 112 | ] 113 | ] 114 | }, 115 | "properties": { 116 | "classification": { 117 | "name": "G", 118 | "colorRGB": -4596839 119 | }, 120 | "isLocked": false, 121 | "measurements": [] 122 | } 123 | }, 124 | { 125 | "type": "Feature", 126 | "id": "PathAnnotationObject", 127 | "geometry": { 128 | "type": "Polygon", 129 | "coordinates": [ 130 | [ 131 | [7943, 5961], 132 | [7847, 5991], 133 | [7789, 6031], 134 | [7711, 6118], 135 | [7698, 6193], 136 | [7709, 6272], 137 | [7727, 6363], 138 | [7793, 6411], 139 | [7884, 6442], 140 | [7990, 6431], 141 | [7996, 6431], 142 | [8082, 6379], 143 | [8158, 6309], 144 | [8175, 6225], 145 | [8156, 6146], 146 | [8123, 6071], 147 | [8077, 6013], 148 | [8009, 5966], 149 | [7943, 5961] 150 | ] 151 | ] 152 | }, 153 | "properties": { 154 | "classification": { 155 | "name": "FC", 156 | "colorRGB": -1296326 157 | }, 158 | "isLocked": false, 159 | "measurements": [] 160 | } 161 | }, 162 | { 163 | "type": "Feature", 164 | "id": "PathAnnotationObject", 165 | "geometry": { 166 | "type": "Polygon", 167 | "coordinates": [ 168 | [ 169 | [8518, 6022], 170 | [8443, 6060], 171 | [8399, 6107], 172 | [8366, 6129], 173 | [8365, 6130], 174 | [8338, 6167], 175 | [8305, 6219], 176 | [8297, 6265], 177 | [8301, 6303], 178 | [8312, 6353], 179 | [8333, 6382], 180 | [8364, 6395], 181 | [8417, 6395], 182 | [8459, 6409], 183 | [8518, 6408], 184 | [8519, 6408], 185 | [8542, 6376], 186 | [8565, 6350], 187 | [8595, 6363], 188 | [8633, 6358], 189 | [8672, 6297], 190 | [8692, 6245], 191 | [8721, 6206], 192 | [8722, 6206], 193 | [8731, 6147], 194 | [8687, 6105], 195 | [8623, 6032], 196 | [8623, 6030], 197 | [8520, 6022], 198 | [8518, 6022] 199 | ] 200 | ] 201 | }, 202 | "properties": { 203 | "classification": { 204 | "name": "FC", 205 | "colorRGB": -1296326 206 | }, 207 | "isLocked": false, 208 | "measurements": [] 209 | } 210 | }, 211 | { 212 | "type": "Feature", 213 | "id": "PathAnnotationObject", 214 | "geometry": { 215 | "type": "Polygon", 216 | "coordinates": [ 217 | [ 218 | [12638, 4633], 219 | [12588, 4641], 220 | [12542, 4658], 221 | [12543, 4658], 222 | [12532, 4683], 223 | [12498, 4693], 224 | [12467, 4702], 225 | [12435, 4727], 226 | [12406, 4765], 227 | [12389, 4840], 228 | [12389, 4841], 229 | [12389, 4842], 230 | [12385, 4896], 231 | [12385, 4897], 232 | [12385, 4898], 233 | [12375, 4938], 234 | [12374, 4938], 235 | [12378, 4971], 236 | [12409, 5007], 237 | [12432, 5036], 238 | [12484, 5039], 239 | [12541, 5018], 240 | [12609, 4989], 241 | [12610, 4989], 242 | [12683, 4971], 243 | [12706, 4962], 244 | [12720, 4939], 245 | [12764, 4938], 246 | [12789, 4915], 247 | [12784, 4878], 248 | [12778, 4851], 249 | [12778, 4850], 250 | [12759, 4811], 251 | [12746, 4786], 252 | [12734, 4745], 253 | [12722, 4707], 254 | [12706, 4680], 255 | [12706, 4679], 256 | [12689, 4651], 257 | [12638, 4633] 258 | ] 259 | ] 260 | }, 261 | "properties": { 262 | "classification": { 263 | "name": "FC", 264 | "colorRGB": -1296326 265 | }, 266 | "isLocked": false, 267 | "measurements": [] 268 | } 269 | }, 270 | { 271 | "type": "Feature", 272 | "id": "PathAnnotationObject", 273 | "geometry": { 274 | "type": "Polygon", 275 | "coordinates": [ 276 | [ 277 | [11735, 4542], 278 | [11668, 4553], 279 | [11660, 4553], 280 | [11595, 4582], 281 | [11594, 4582], 282 | [11593, 4584], 283 | [11516, 4650], 284 | [11516, 4651], 285 | [11513, 4651], 286 | [11513, 4653], 287 | [11513, 4654], 288 | [11512, 4654], 289 | [11512, 4656], 290 | [11511, 4657], 291 | [11490, 4709], 292 | [11489, 4710], 293 | [11520, 4760], 294 | [11560, 4812], 295 | [11592, 4853], 296 | [11622, 4881], 297 | [11667, 4872], 298 | [11707, 4889], 299 | [11707, 4890], 300 | [11709, 4890], 301 | [11751, 4908], 302 | [11816, 4899], 303 | [11890, 4878], 304 | [11936, 4842], 305 | [11946, 4794], 306 | [11962, 4738], 307 | [11962, 4737], 308 | [11964, 4679], 309 | [11964, 4678], 310 | [11955, 4656], 311 | [11904, 4636], 312 | [11858, 4623], 313 | [11840, 4600], 314 | [11802, 4559], 315 | [11735, 4542] 316 | ] 317 | ] 318 | }, 319 | "properties": { 320 | "classification": { 321 | "name": "G", 322 | "colorRGB": -4596839 323 | }, 324 | "isLocked": false, 325 | "measurements": [] 326 | } 327 | } 328 | ] -------------------------------------------------------------------------------- /input/annotation_v3/SAS_21924_001.qpdata: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Optimox/HubMap/16da543e4978b50dc02c7eb9158cef7f83f82d78/input/annotation_v3/SAS_21924_001.qpdata -------------------------------------------------------------------------------- /input/annotation_v3/SAS_21930_001.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "type": "Feature", 4 | "id": "PathAnnotationObject", 5 | "geometry": { 6 | "type": "Polygon", 7 | "coordinates": [ 8 | [ 9 | [12548, 2200], 10 | [12497, 2212], 11 | [12461, 2245], 12 | [12436, 2292], 13 | [12436, 2358], 14 | [12446, 2428], 15 | [12474, 2507], 16 | [12501, 2548], 17 | [12576, 2577], 18 | [12651, 2567], 19 | [12716, 2520], 20 | [12757, 2414], 21 | [12739, 2330], 22 | [12697, 2268], 23 | [12648, 2212], 24 | [12602, 2204], 25 | [12548, 2200] 26 | ] 27 | ] 28 | }, 29 | "properties": { 30 | "classification": { 31 | "name": "G", 32 | "colorRGB": -4596839 33 | }, 34 | "isLocked": false, 35 | "measurements": [] 36 | } 37 | }, 38 | { 39 | "type": "Feature", 40 | "id": "PathAnnotationObject", 41 | "geometry": { 42 | "type": "Polygon", 43 | "coordinates": [ 44 | [ 45 | [10726, 2257], 46 | [10657, 2291], 47 | [10590, 2327], 48 | [10547, 2385], 49 | [10526, 2437], 50 | [10535, 2512], 51 | [10570, 2559], 52 | [10589, 2616], 53 | [10641, 2702], 54 | [10668, 2777], 55 | [10720, 2803], 56 | [10791, 2809], 57 | [10856, 2792], 58 | [10857, 2791], 59 | [10883, 2753], 60 | [10883, 2752], 61 | [10881, 2682], 62 | [10882, 2681], 63 | [10919, 2666], 64 | [10913, 2572], 65 | [10913, 2571], 66 | [10900, 2431], 67 | [10895, 2359], 68 | [10895, 2358], 69 | [10897, 2317], 70 | [10897, 2316], 71 | [10896, 2314], 72 | [10895, 2314], 73 | [10840, 2272], 74 | [10728, 2257], 75 | [10726, 2257] 76 | ] 77 | ] 78 | }, 79 | "properties": { 80 | "classification": { 81 | "name": "G", 82 | "colorRGB": -4596839 83 | }, 84 | "isLocked": false, 85 | "measurements": [] 86 | } 87 | }, 88 | { 89 | "type": "Feature", 90 | "id": "PathAnnotationObject", 91 | "geometry": { 92 | "type": "Polygon", 93 | "coordinates": [ 94 | [ 95 | [11944, 2025], 96 | [11877, 2057], 97 | [11845, 2110], 98 | [11852, 2196], 99 | [11871, 2292], 100 | [11897, 2345], 101 | [11957, 2399], 102 | [12013, 2461], 103 | [12065, 2477], 104 | [12094, 2479], 105 | [12163, 2474], 106 | [12242, 2453], 107 | [12325, 2406], 108 | [12315, 2341], 109 | [12351, 2252], 110 | [12376, 2190], 111 | [12345, 2159], 112 | [12345, 2157], 113 | [12269, 2108], 114 | [12183, 2064], 115 | [12051, 2025], 116 | [11944, 2025] 117 | ] 118 | ] 119 | }, 120 | "properties": { 121 | "classification": { 122 | "name": "G", 123 | "colorRGB": -4596839 124 | }, 125 | "isLocked": false, 126 | "measurements": [] 127 | } 128 | }, 129 | { 130 | "type": "Feature", 131 | "id": "PathAnnotationObject", 132 | "geometry": { 133 | "type": "Polygon", 134 | "coordinates": [ 135 | [ 136 | [11677, 2250], 137 | [11613, 2253], 138 | [11553, 2279], 139 | [11495, 2344], 140 | [11464, 2414], 141 | [11463, 2414], 142 | [11460, 2452], 143 | [11468, 2497], 144 | [11476, 2536], 145 | [11474, 2567], 146 | [11482, 2610], 147 | [11499, 2656], 148 | [11535, 2678], 149 | [11571, 2686], 150 | [11626, 2683], 151 | [11674, 2657], 152 | [11717, 2636], 153 | [11771, 2607], 154 | [11807, 2581], 155 | [11807, 2582], 156 | [11844, 2566], 157 | [11866, 2535], 158 | [11877, 2492], 159 | [11878, 2439], 160 | [11880, 2439], 161 | [11846, 2378], 162 | [11824, 2339], 163 | [11824, 2338], 164 | [11794, 2300], 165 | [11736, 2264], 166 | [11677, 2250] 167 | ] 168 | ] 169 | }, 170 | "properties": { 171 | "classification": { 172 | "name": "G", 173 | "colorRGB": -4596839 174 | }, 175 | "isLocked": false, 176 | "measurements": [] 177 | } 178 | }, 179 | { 180 | "type": "Feature", 181 | "id": "PathAnnotationObject", 182 | "geometry": { 183 | "type": "Polygon", 184 | "coordinates": [ 185 | [ 186 | [8917, 2809], 187 | [8800, 2827], 188 | [8727, 2866], 189 | [8727, 2867], 190 | [8725, 2867], 191 | [8666, 2920], 192 | [8688, 2969], 193 | [8728, 3009], 194 | [8786, 3061], 195 | [8813, 3105], 196 | [8869, 3168], 197 | [8847, 3205], 198 | [8856, 3256], 199 | [8897, 3318], 200 | [8999, 3312], 201 | [9000, 3312], 202 | [9053, 3283], 203 | [9067, 3263], 204 | [9111, 3247], 205 | [9110, 3247], 206 | [9175, 3192], 207 | [9205, 3109], 208 | [9236, 3009], 209 | [9196, 2898], 210 | [9111, 2853], 211 | [9109, 2853], 212 | [9033, 2820], 213 | [9030, 2820], 214 | [8917, 2811], 215 | [8917, 2809] 216 | ] 217 | ] 218 | }, 219 | "properties": { 220 | "classification": { 221 | "name": "G", 222 | "colorRGB": -4596839 223 | }, 224 | "isLocked": false, 225 | "measurements": [] 226 | } 227 | } 228 | ] -------------------------------------------------------------------------------- /input/annotation_v3/SAS_21930_001.qpdata: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Optimox/HubMap/16da543e4978b50dc02c7eb9158cef7f83f82d78/input/annotation_v3/SAS_21930_001.qpdata -------------------------------------------------------------------------------- /input/annotation_v3/SAS_21937_001.qpdata: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Optimox/HubMap/16da543e4978b50dc02c7eb9158cef7f83f82d78/input/annotation_v3/SAS_21937_001.qpdata -------------------------------------------------------------------------------- /input/annotation_v3/SAS_21942_001.qpdata: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Optimox/HubMap/16da543e4978b50dc02c7eb9158cef7f83f82d78/input/annotation_v3/SAS_21942_001.qpdata -------------------------------------------------------------------------------- /input/annotation_v3/SESCAM_102.qpdata: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Optimox/HubMap/16da543e4978b50dc02c7eb9158cef7f83f82d78/input/annotation_v3/SESCAM_102.qpdata -------------------------------------------------------------------------------- /input/annotation_v3/SESCAM_1_0.qpdata: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Optimox/HubMap/16da543e4978b50dc02c7eb9158cef7f83f82d78/input/annotation_v3/SESCAM_1_0.qpdata -------------------------------------------------------------------------------- /input/annotation_v3/SESCAM_2_0.qpdata: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Optimox/HubMap/16da543e4978b50dc02c7eb9158cef7f83f82d78/input/annotation_v3/SESCAM_2_0.qpdata -------------------------------------------------------------------------------- /input/annotation_v3/SESCAM_3_0.qpdata: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Optimox/HubMap/16da543e4978b50dc02c7eb9158cef7f83f82d78/input/annotation_v3/SESCAM_3_0.qpdata -------------------------------------------------------------------------------- /input/annotation_v3/SESCAM_4_0.qpdata: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Optimox/HubMap/16da543e4978b50dc02c7eb9158cef7f83f82d78/input/annotation_v3/SESCAM_4_0.qpdata -------------------------------------------------------------------------------- /input/annotation_v3/SESCAM_5_0.qpdata: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Optimox/HubMap/16da543e4978b50dc02c7eb9158cef7f83f82d78/input/annotation_v3/SESCAM_5_0.qpdata -------------------------------------------------------------------------------- /input/annotation_v3/SESCAM_6_0.qpdata: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Optimox/HubMap/16da543e4978b50dc02c7eb9158cef7f83f82d78/input/annotation_v3/SESCAM_6_0.qpdata -------------------------------------------------------------------------------- /input/annotation_v3/SESCAM_7_0.qpdata: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Optimox/HubMap/16da543e4978b50dc02c7eb9158cef7f83f82d78/input/annotation_v3/SESCAM_7_0.qpdata -------------------------------------------------------------------------------- /input/annotation_v3/SESCAM_8_0.qpdata: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Optimox/HubMap/16da543e4978b50dc02c7eb9158cef7f83f82d78/input/annotation_v3/SESCAM_8_0.qpdata -------------------------------------------------------------------------------- /input/annotation_v3/SESCAM_9_0.qpdata: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Optimox/HubMap/16da543e4978b50dc02c7eb9158cef7f83f82d78/input/annotation_v3/SESCAM_9_0.qpdata -------------------------------------------------------------------------------- /input/annotation_v3/VUHSK_1272.qpdata: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Optimox/HubMap/16da543e4978b50dc02c7eb9158cef7f83f82d78/input/annotation_v3/VUHSK_1272.qpdata -------------------------------------------------------------------------------- /input/annotation_v3/VUHSK_1352.qpdata: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Optimox/HubMap/16da543e4978b50dc02c7eb9158cef7f83f82d78/input/annotation_v3/VUHSK_1352.qpdata -------------------------------------------------------------------------------- /input/annotation_v3/VUHSK_1432.qpdata: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Optimox/HubMap/16da543e4978b50dc02c7eb9158cef7f83f82d78/input/annotation_v3/VUHSK_1432.qpdata -------------------------------------------------------------------------------- /input/annotation_v3/VUHSK_1502.qpdata: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Optimox/HubMap/16da543e4978b50dc02c7eb9158cef7f83f82d78/input/annotation_v3/VUHSK_1502.qpdata -------------------------------------------------------------------------------- /input/annotation_v3/VUHSK_1622.qpdata: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Optimox/HubMap/16da543e4978b50dc02c7eb9158cef7f83f82d78/input/annotation_v3/VUHSK_1622.qpdata -------------------------------------------------------------------------------- /input/annotation_v3/VUHSK_1702.qpdata: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Optimox/HubMap/16da543e4978b50dc02c7eb9158cef7f83f82d78/input/annotation_v3/VUHSK_1702.qpdata -------------------------------------------------------------------------------- /input/annotation_v3/VUHSK_1762.qpdata: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Optimox/HubMap/16da543e4978b50dc02c7eb9158cef7f83f82d78/input/annotation_v3/VUHSK_1762.qpdata -------------------------------------------------------------------------------- /input/annotation_v3/VUHSK_1832.qpdata: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Optimox/HubMap/16da543e4978b50dc02c7eb9158cef7f83f82d78/input/annotation_v3/VUHSK_1832.qpdata -------------------------------------------------------------------------------- /input/annotation_v3/VUHSK_1912.qpdata: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Optimox/HubMap/16da543e4978b50dc02c7eb9158cef7f83f82d78/input/annotation_v3/VUHSK_1912.qpdata -------------------------------------------------------------------------------- /input/annotation_v3/VUHSK_1992.qpdata: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Optimox/HubMap/16da543e4978b50dc02c7eb9158cef7f83f82d78/input/annotation_v3/VUHSK_1992.qpdata -------------------------------------------------------------------------------- /input/annotation_v3/VUHSK_2072.qpdata: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Optimox/HubMap/16da543e4978b50dc02c7eb9158cef7f83f82d78/input/annotation_v3/VUHSK_2072.qpdata -------------------------------------------------------------------------------- /input/annotation_v3/aaa6a05cc.tiff - VAN0003-LK-33-2-PAS_FFPE_pyr.qpdata: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Optimox/HubMap/16da543e4978b50dc02c7eb9158cef7f83f82d78/input/annotation_v3/aaa6a05cc.tiff - VAN0003-LK-33-2-PAS_FFPE_pyr.qpdata -------------------------------------------------------------------------------- /input/annotation_v3/afa5e8098.qpdata: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Optimox/HubMap/16da543e4978b50dc02c7eb9158cef7f83f82d78/input/annotation_v3/afa5e8098.qpdata -------------------------------------------------------------------------------- /input/annotation_v3/b2dc8411c.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "type": "Feature", 4 | "id": "PathAnnotationObject", 5 | "geometry": { 6 | "type": "Polygon", 7 | "coordinates": [ 8 | [ 9 | [11297, 10509], 10 | [11269, 10510], 11 | [11248, 10522], 12 | [11229, 10536], 13 | [11221, 10554], 14 | [11218, 10575], 15 | [11220, 10594], 16 | [11229, 10618], 17 | [11244, 10626], 18 | [11269, 10629], 19 | [11293, 10649], 20 | [11316, 10661], 21 | [11337, 10652], 22 | [11354, 10635], 23 | [11356, 10615], 24 | [11366, 10591], 25 | [11373, 10563], 26 | [11368, 10542], 27 | [11354, 10523], 28 | [11323, 10513], 29 | [11297, 10509] 30 | ] 31 | ] 32 | }, 33 | "properties": { 34 | "classification": { 35 | "name": "FC", 36 | "colorRGB": -1296326 37 | }, 38 | "isLocked": false, 39 | "measurements": [] 40 | } 41 | }, 42 | { 43 | "type": "Feature", 44 | "id": "PathAnnotationObject", 45 | "geometry": { 46 | "type": "Polygon", 47 | "coordinates": [ 48 | [ 49 | [10897, 5009], 50 | [10852, 5014], 51 | [10854, 5014], 52 | [10825, 5036], 53 | [10811, 5059], 54 | [10807, 5091], 55 | [10819, 5114], 56 | [10840, 5125], 57 | [10841, 5125], 58 | [10887, 5127], 59 | [10922, 5114], 60 | [10946, 5090], 61 | [10953, 5071], 62 | [10950, 5052], 63 | [10921, 5022], 64 | [10897, 5009] 65 | ] 66 | ] 67 | }, 68 | "properties": { 69 | "classification": { 70 | "name": "FC", 71 | "colorRGB": -1296326 72 | }, 73 | "isLocked": false, 74 | "measurements": [] 75 | } 76 | }, 77 | { 78 | "type": "Feature", 79 | "id": "PathAnnotationObject", 80 | "geometry": { 81 | "type": "Polygon", 82 | "coordinates": [ 83 | [ 84 | [12284, 6654], 85 | [12270, 6665], 86 | [12245, 6683], 87 | [12235, 6708], 88 | [12234, 6737], 89 | [12248, 6763], 90 | [12263, 6780], 91 | [12288, 6781], 92 | [12321, 6786], 93 | [12344, 6777], 94 | [12361, 6759], 95 | [12362, 6759], 96 | [12374, 6741], 97 | [12373, 6713], 98 | [12376, 6694], 99 | [12364, 6684], 100 | [12346, 6671], 101 | [12333, 6659], 102 | [12312, 6656], 103 | [12284, 6654] 104 | ] 105 | ] 106 | }, 107 | "properties": { 108 | "classification": { 109 | "name": "FC", 110 | "colorRGB": -1296326 111 | }, 112 | "isLocked": false, 113 | "measurements": [] 114 | } 115 | }, 116 | { 117 | "type": "Feature", 118 | "id": "PathAnnotationObject", 119 | "geometry": { 120 | "type": "Polygon", 121 | "coordinates": [ 122 | [ 123 | [5637, 7176], 124 | [5611, 7189], 125 | [5611, 7190], 126 | [5594, 7211], 127 | [5594, 7237], 128 | [5628, 7288], 129 | [5659, 7315], 130 | [5696, 7313], 131 | [5723, 7284], 132 | [5728, 7242], 133 | [5722, 7218], 134 | [5696, 7196], 135 | [5677, 7177], 136 | [5637, 7176] 137 | ] 138 | ] 139 | }, 140 | "properties": { 141 | "classification": { 142 | "name": "FC", 143 | "colorRGB": -1296326 144 | }, 145 | "isLocked": false, 146 | "measurements": [] 147 | } 148 | }, 149 | { 150 | "type": "Feature", 151 | "id": "PathAnnotationObject", 152 | "geometry": { 153 | "type": "Polygon", 154 | "coordinates": [ 155 | [ 156 | [11176, 4798], 157 | [11159, 4803], 158 | [11148, 4810], 159 | [11143, 4819], 160 | [11138, 4834], 161 | [11130, 4845], 162 | [11128, 4864], 163 | [11128, 4865], 164 | [11128, 4866], 165 | [11132, 4885], 166 | [11137, 4901], 167 | [11144, 4916], 168 | [11157, 4929], 169 | [11173, 4945], 170 | [11202, 4950], 171 | [11231, 4943], 172 | [11250, 4933], 173 | [11263, 4922], 174 | [11266, 4902], 175 | [11268, 4880], 176 | [11265, 4858], 177 | [11259, 4841], 178 | [11252, 4826], 179 | [11238, 4816], 180 | [11224, 4803], 181 | [11210, 4800], 182 | [11194, 4798], 183 | [11176, 4798] 184 | ] 185 | ] 186 | }, 187 | "properties": { 188 | "classification": { 189 | "name": "FC", 190 | "colorRGB": -1296326 191 | }, 192 | "isLocked": false, 193 | "measurements": [] 194 | } 195 | }, 196 | { 197 | "type": "Feature", 198 | "id": "PathAnnotationObject", 199 | "geometry": { 200 | "type": "Polygon", 201 | "coordinates": [ 202 | [ 203 | [6077, 8529], 204 | [6061, 8539], 205 | [6026, 8559], 206 | [6012, 8582], 207 | [6005, 8614], 208 | [6005, 8650], 209 | [6014, 8673], 210 | [6015, 8673], 211 | [6042, 8699], 212 | [6042, 8698], 213 | [6045, 8698], 214 | [6049, 8724], 215 | [6060, 8748], 216 | [6087, 8765], 217 | [6085, 8765], 218 | [6112, 8775], 219 | [6141, 8785], 220 | [6184, 8788], 221 | [6186, 8788], 222 | [6218, 8776], 223 | [6237, 8750], 224 | [6246, 8725], 225 | [6239, 8707], 226 | [6233, 8677], 227 | [6232, 8645], 228 | [6224, 8615], 229 | [6205, 8594], 230 | [6188, 8574], 231 | [6177, 8559], 232 | [6151, 8555], 233 | [6150, 8555], 234 | [6129, 8542], 235 | [6107, 8535], 236 | [6077, 8529] 237 | ] 238 | ] 239 | }, 240 | "properties": { 241 | "classification": { 242 | "name": "G", 243 | "colorRGB": -4596839 244 | }, 245 | "isLocked": false, 246 | "measurements": [] 247 | } 248 | }, 249 | { 250 | "type": "Feature", 251 | "id": "PathAnnotationObject", 252 | "geometry": { 253 | "type": "Polygon", 254 | "coordinates": [ 255 | [ 256 | [10477, 10698], 257 | [10467, 10707], 258 | [10456, 10708], 259 | [10441, 10713], 260 | [10432, 10717], 261 | [10415, 10728], 262 | [10400, 10738], 263 | [10386, 10748], 264 | [10376, 10762], 265 | [10369, 10769], 266 | [10361, 10772], 267 | [10356, 10782], 268 | [10350, 10796], 269 | [10347, 10810], 270 | [10350, 10824], 271 | [10357, 10837], 272 | [10365, 10848], 273 | [10377, 10856], 274 | [10392, 10860], 275 | [10403, 10860], 276 | [10416, 10857], 277 | [10428, 10856], 278 | [10438, 10860], 279 | [10456, 10861], 280 | [10471, 10854], 281 | [10477, 10844], 282 | [10478, 10831], 283 | [10487, 10822], 284 | [10495, 10808], 285 | [10500, 10794], 286 | [10510, 10777], 287 | [10521, 10761], 288 | [10532, 10749], 289 | [10540, 10734], 290 | [10540, 10735], 291 | [10542, 10719], 292 | [10532, 10711], 293 | [10531, 10711], 294 | [10517, 10707], 295 | [10504, 10699], 296 | [10491, 10698], 297 | [10477, 10698] 298 | ] 299 | ] 300 | }, 301 | "properties": { 302 | "classification": { 303 | "name": "FC", 304 | "colorRGB": -1296326 305 | }, 306 | "isLocked": false, 307 | "measurements": [] 308 | } 309 | } 310 | ] -------------------------------------------------------------------------------- /input/annotation_v3/b2dc8411c.tiff - VAN0009-LK-106-2-PAS_FFPE_pyr.qpdata: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Optimox/HubMap/16da543e4978b50dc02c7eb9158cef7f83f82d78/input/annotation_v3/b2dc8411c.tiff - VAN0009-LK-106-2-PAS_FFPE_pyr.qpdata -------------------------------------------------------------------------------- /input/annotation_v3/b9a3865fc.tiff - VAN0005-RK-8-2-PAS_FFPE_pyr.qpdata: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Optimox/HubMap/16da543e4978b50dc02c7eb9158cef7f83f82d78/input/annotation_v3/b9a3865fc.tiff - VAN0005-RK-8-2-PAS_FFPE_pyr.qpdata -------------------------------------------------------------------------------- /input/annotation_v3/c68fe75ea.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "type": "Feature", 4 | "id": "PathAnnotationObject", 5 | "geometry": { 6 | "type": "Polygon", 7 | "coordinates": [ 8 | [ 9 | [44260, 12513], 10 | [44175, 12560], 11 | [44107, 12620], 12 | [44108, 12620], 13 | [44100, 12676], 14 | [44132, 12725], 15 | [44157, 12746], 16 | [44203, 12769], 17 | [44271, 12765], 18 | [44351, 12753], 19 | [44351, 12754], 20 | [44398, 12693], 21 | [44412, 12633], 22 | [44398, 12575], 23 | [44398, 12574], 24 | [44398, 12573], 25 | [44343, 12522], 26 | [44260, 12513] 27 | ] 28 | ] 29 | }, 30 | "properties": { 31 | "classification": { 32 | "name": "FC", 33 | "colorRGB": -1296326 34 | }, 35 | "isLocked": false, 36 | "measurements": [] 37 | } 38 | }, 39 | { 40 | "type": "Feature", 41 | "id": "PathAnnotationObject", 42 | "geometry": { 43 | "type": "Polygon", 44 | "coordinates": [ 45 | [ 46 | [42513, 15424], 47 | [42435, 15434], 48 | [42365, 15468], 49 | [42288, 15500], 50 | [42228, 15557], 51 | [42201, 15645], 52 | [42234, 15720], 53 | [42263, 15786], 54 | [42267, 15849], 55 | [42319, 15914], 56 | [42357, 15926], 57 | [42410, 15898], 58 | [42476, 15852], 59 | [42543, 15801], 60 | [42595, 15716], 61 | [42610, 15622], 62 | [42592, 15534], 63 | [42574, 15459], 64 | [42513, 15424] 65 | ] 66 | ] 67 | }, 68 | "properties": { 69 | "classification": { 70 | "name": "G", 71 | "colorRGB": -4596839 72 | }, 73 | "isLocked": false, 74 | "measurements": [] 75 | } 76 | }, 77 | { 78 | "type": "Feature", 79 | "id": "PathAnnotationObject", 80 | "geometry": { 81 | "type": "MultiPolygon", 82 | "coordinates": [ 83 | [ 84 | [ 85 | [32792.7, 24244.85], 86 | [32791, 24245], 87 | [32793, 24245], 88 | [32792.7, 24244.85] 89 | ] 90 | ], 91 | [ 92 | [ 93 | [32792.7, 24244.85], 94 | [32848, 24240], 95 | [32923, 24211], 96 | [32976, 24138], 97 | [33010, 24060], 98 | [33003, 23982], 99 | [32938, 23962], 100 | [32937, 23962], 101 | [32866, 23958], 102 | [32814, 23958], 103 | [32742, 24007], 104 | [32741, 24007], 105 | [32698, 24062], 106 | [32698, 24135], 107 | [32723, 24210], 108 | [32792.7, 24244.85] 109 | ] 110 | ] 111 | ] 112 | }, 113 | "properties": { 114 | "classification": { 115 | "name": "G", 116 | "colorRGB": -4596839 117 | }, 118 | "isLocked": false, 119 | "measurements": [] 120 | } 121 | }, 122 | { 123 | "type": "Feature", 124 | "id": "PathAnnotationObject", 125 | "geometry": { 126 | "type": "Polygon", 127 | "coordinates": [ 128 | [ 129 | [8680, 26499], 130 | [8667, 26520], 131 | [8666, 26520], 132 | [8653, 26556], 133 | [8640, 26608], 134 | [8637, 26642], 135 | [8637, 26643], 136 | [8650, 26685], 137 | [8676, 26725], 138 | [8721, 26739], 139 | [8775, 26727], 140 | [8826, 26698], 141 | [8831, 26646], 142 | [8826, 26601], 143 | [8806, 26550], 144 | [8806, 26549], 145 | [8789, 26524], 146 | [8755, 26511], 147 | [8726, 26505], 148 | [8680, 26499] 149 | ] 150 | ] 151 | }, 152 | "properties": { 153 | "classification": { 154 | "name": "FC", 155 | "colorRGB": -1296326 156 | }, 157 | "isLocked": false, 158 | "measurements": [] 159 | } 160 | } 161 | ] -------------------------------------------------------------------------------- /input/annotation_v3/c68fe75ea.tiff - default.qpdata: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Optimox/HubMap/16da543e4978b50dc02c7eb9158cef7f83f82d78/input/annotation_v3/c68fe75ea.tiff - default.qpdata -------------------------------------------------------------------------------- /input/annotation_v3/cb2d976f4.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "type": "Feature", 4 | "id": "PathAnnotationObject", 5 | "geometry": { 6 | "type": "Polygon", 7 | "coordinates": [ 8 | [ 9 | [7453, 17329], 10 | [7387, 17348], 11 | [7387, 17349], 12 | [7341, 17414], 13 | [7309, 17498], 14 | [7259, 17583], 15 | [7259, 17584], 16 | [7257, 17584], 17 | [7256, 17585], 18 | [7213, 17673], 19 | [7200, 17748], 20 | [7216, 17803], 21 | [7291, 17819], 22 | [7370, 17808], 23 | [7437, 17780], 24 | [7498, 17712], 25 | [7499, 17712], 26 | [7499, 17711], 27 | [7503, 17708], 28 | [7517, 17665], 29 | [7517, 17664], 30 | [7540, 17568], 31 | [7540, 17567], 32 | [7537, 17567], 33 | [7537, 17566], 34 | [7532, 17480], 35 | [7532, 17479], 36 | [7528, 17374], 37 | [7528, 17373], 38 | [7455, 17329], 39 | [7453, 17329] 40 | ] 41 | ] 42 | }, 43 | "properties": { 44 | "classification": { 45 | "name": "FC", 46 | "colorRGB": -1296326 47 | }, 48 | "isLocked": false, 49 | "measurements": [] 50 | } 51 | }, 52 | { 53 | "type": "Feature", 54 | "id": "PathAnnotationObject", 55 | "geometry": { 56 | "type": "Polygon", 57 | "coordinates": [ 58 | [ 59 | [6543, 26322], 60 | [6483, 26330], 61 | [6482, 26331], 62 | [6421, 26371], 63 | [6407, 26437], 64 | [6407, 26438], 65 | [6394, 26486], 66 | [6393, 26487], 67 | [6393, 26488], 68 | [6372, 26550], 69 | [6376, 26618], 70 | [6376, 26619], 71 | [6383, 26656], 72 | [6401, 26692], 73 | [6450, 26711], 74 | [6450, 26712], 75 | [6487, 26719], 76 | [6523, 26713], 77 | [6571, 26676], 78 | [6617, 26604], 79 | [6617, 26603], 80 | [6631, 26534], 81 | [6632, 26534], 82 | [6632, 26465], 83 | [6607, 26406], 84 | [6587, 26340], 85 | [6585, 26340], 86 | [6544, 26322], 87 | [6543, 26322] 88 | ] 89 | ] 90 | }, 91 | "properties": { 92 | "classification": { 93 | "name": "G", 94 | "colorRGB": -4596839 95 | }, 96 | "isLocked": false, 97 | "measurements": [] 98 | } 99 | }, 100 | { 101 | "type": "Feature", 102 | "id": "PathAnnotationObject", 103 | "geometry": { 104 | "type": "Polygon", 105 | "coordinates": [ 106 | [ 107 | [19664, 12851], 108 | [19621, 12876], 109 | [19573, 12912], 110 | [19546, 12948], 111 | [19542, 13002], 112 | [19556, 13048], 113 | [19561, 13055], 114 | [19569, 13061], 115 | [19608, 13091], 116 | [19641, 13091], 117 | [19676, 13077], 118 | [19721, 13045], 119 | [19749, 13002], 120 | [19762, 12925], 121 | [19732, 12858], 122 | [19664, 12851] 123 | ] 124 | ] 125 | }, 126 | "properties": { 127 | "classification": { 128 | "name": "FC", 129 | "colorRGB": -1296326 130 | }, 131 | "isLocked": false, 132 | "measurements": [] 133 | } 134 | }, 135 | { 136 | "type": "Feature", 137 | "id": "PathAnnotationObject", 138 | "geometry": { 139 | "type": "Polygon", 140 | "coordinates": [ 141 | [ 142 | [3709, 16520], 143 | [3671, 16521], 144 | [3669, 16521], 145 | [3630, 16537], 146 | [3613, 16573], 147 | [3599, 16609], 148 | [3597, 16639], 149 | [3617, 16674], 150 | [3650, 16706], 151 | [3701, 16706], 152 | [3732, 16691], 153 | [3758, 16667], 154 | [3773, 16641], 155 | [3784, 16611], 156 | [3781, 16587], 157 | [3778, 16566], 158 | [3777, 16566], 159 | [3755, 16538], 160 | [3709, 16520] 161 | ] 162 | ] 163 | }, 164 | "properties": { 165 | "classification": { 166 | "name": "FC", 167 | "colorRGB": -1296326 168 | }, 169 | "isLocked": false, 170 | "measurements": [] 171 | } 172 | } 173 | ] -------------------------------------------------------------------------------- /input/annotation_v3/cb2d976f4.tiff - VAN0006-LK-7-2-PAS_FFPE_pyr.qpdata: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Optimox/HubMap/16da543e4978b50dc02c7eb9158cef7f83f82d78/input/annotation_v3/cb2d976f4.tiff - VAN0006-LK-7-2-PAS_FFPE_pyr.qpdata -------------------------------------------------------------------------------- /input/annotation_v3/e79de561c.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "type": "Feature", 4 | "id": "PathAnnotationObject", 5 | "geometry": { 6 | "type": "Polygon", 7 | "coordinates": [ 8 | [ 9 | [17583, 7192], 10 | [17503, 7195], 11 | [17440, 7226], 12 | [17427, 7284], 13 | [17427, 7285], 14 | [17427, 7286], 15 | [17426, 7286], 16 | [17426, 7287], 17 | [17425, 7287], 18 | [17422, 7356], 19 | [17420, 7356], 20 | [17420, 7357], 21 | [17449, 7390], 22 | [17450, 7390], 23 | [17502, 7409], 24 | [17542, 7400], 25 | [17552, 7400], 26 | [17563, 7396], 27 | [17589, 7386], 28 | [17622, 7365], 29 | [17657, 7321], 30 | [17656, 7272], 31 | [17644, 7214], 32 | [17583, 7192] 33 | ] 34 | ] 35 | }, 36 | "properties": { 37 | "classification": { 38 | "name": "FC", 39 | "colorRGB": -1296326 40 | }, 41 | "isLocked": false, 42 | "measurements": [] 43 | } 44 | }, 45 | { 46 | "type": "Feature", 47 | "id": "PathAnnotationObject", 48 | "geometry": { 49 | "type": "Polygon", 50 | "coordinates": [ 51 | [ 52 | [18441, 4688], 53 | [18390, 4714], 54 | [18389, 4786], 55 | [18374, 4845], 56 | [18374, 4870], 57 | [18376, 4870], 58 | [18382, 4882], 59 | [18428, 4898], 60 | [18455, 4889], 61 | [18459, 4889], 62 | [18459, 4888], 63 | [18460, 4888], 64 | [18523, 4866], 65 | [18524, 4866], 66 | [18542, 4825], 67 | [18545, 4750], 68 | [18545, 4749], 69 | [18545, 4748], 70 | [18544, 4748], 71 | [18517, 4703], 72 | [18517, 4702], 73 | [18517, 4701], 74 | [18518, 4701], 75 | [18518, 4700], 76 | [18517, 4700], 77 | [18517, 4699], 78 | [18441, 4688] 79 | ] 80 | ] 81 | }, 82 | "properties": { 83 | "classification": { 84 | "name": "FC", 85 | "colorRGB": -1296326 86 | }, 87 | "isLocked": false, 88 | "measurements": [] 89 | } 90 | }, 91 | { 92 | "type": "Feature", 93 | "id": "PathAnnotationObject", 94 | "geometry": { 95 | "type": "Polygon", 96 | "coordinates": [ 97 | [ 98 | [10956, 4905], 99 | [10900, 4967], 100 | [10862, 5045], 101 | [10862, 5046], 102 | [10880, 5110], 103 | [10934, 5125], 104 | [10936, 5125], 105 | [11013, 5119], 106 | [11057, 5084], 107 | [11078, 5048], 108 | [11088, 4978], 109 | [11034, 4911], 110 | [11034, 4910], 111 | [10957, 4905], 112 | [10956, 4905] 113 | ] 114 | ] 115 | }, 116 | "properties": { 117 | "classification": { 118 | "name": "FC", 119 | "colorRGB": -1296326 120 | }, 121 | "isLocked": false, 122 | "measurements": [] 123 | } 124 | }, 125 | { 126 | "type": "Feature", 127 | "id": "PathAnnotationObject", 128 | "geometry": { 129 | "type": "MultiPolygon", 130 | "coordinates": [ 131 | [ 132 | [ 133 | [20218, 4579], 134 | [20162, 4631], 135 | [20175, 4700], 136 | [20218, 4731], 137 | [20275, 4753], 138 | [20276, 4753], 139 | [20276, 4753.88], 140 | [20297, 4753], 141 | [20302, 4751], 142 | [20312, 4740], 143 | [20317, 4729], 144 | [20320, 4712], 145 | [20323, 4703], 146 | [20324, 4693], 147 | [20326, 4684], 148 | [20327, 4664], 149 | [20295, 4597], 150 | [20218, 4579] 151 | ] 152 | ], 153 | [ 154 | [ 155 | [20276, 4753.88], 156 | [20273, 4754], 157 | [20276, 4754], 158 | [20276, 4753.88] 159 | ] 160 | ] 161 | ] 162 | }, 163 | "properties": { 164 | "classification": { 165 | "name": "FC", 166 | "colorRGB": -1296326 167 | }, 168 | "isLocked": false, 169 | "measurements": [] 170 | } 171 | } 172 | ] -------------------------------------------------------------------------------- /input/annotation_v3/e79de561c.tiff - default.qpdata: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Optimox/HubMap/16da543e4978b50dc02c7eb9158cef7f83f82d78/input/annotation_v3/e79de561c.tiff - default.qpdata -------------------------------------------------------------------------------- /notebooks/Image downscaling.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "**About** : This notebook is used to downscale images in the train and test set, in order to speed-up training and inference\n", 8 | " - Use the `FACTOR` parameter to specify the downscaling factor. We recommend generating data of downscaling 2 and 4.\n", 9 | " - For training data, we save extra time by also computing downscaling rles. Use the `NAME` parameter to specify which rle to downscale.\n", 10 | " - It is only require to save the downscaled images once, use the `SAVE_IMG` parameters to this extent." 11 | ] 12 | }, 13 | { 14 | "cell_type": "code", 15 | "execution_count": null, 16 | "metadata": {}, 17 | "outputs": [], 18 | "source": [ 19 | "%load_ext autoreload\n", 20 | "%autoreload 2" 21 | ] 22 | }, 23 | { 24 | "cell_type": "markdown", 25 | "metadata": {}, 26 | "source": [ 27 | "### Imports" 28 | ] 29 | }, 30 | { 31 | "cell_type": "code", 32 | "execution_count": null, 33 | "metadata": {}, 34 | "outputs": [], 35 | "source": [ 36 | "import os\n", 37 | "import gc\n", 38 | "import cv2\n", 39 | "import sys\n", 40 | "import tifffile\n", 41 | "import numpy as np\n", 42 | "import pandas as pd\n", 43 | "\n", 44 | "from tqdm.notebook import tqdm\n", 45 | "from collections import Counter\n", 46 | "from matplotlib import pyplot as plt\n", 47 | "\n", 48 | "sys.path.append(\"../code/\")" 49 | ] 50 | }, 51 | { 52 | "cell_type": "code", 53 | "execution_count": null, 54 | "metadata": {}, 55 | "outputs": [], 56 | "source": [ 57 | "from data.dataset import load_image\n", 58 | "from utils.rle import *\n", 59 | "from params import *" 60 | ] 61 | }, 62 | { 63 | "cell_type": "code", 64 | "execution_count": null, 65 | "metadata": {}, 66 | "outputs": [], 67 | "source": [ 68 | "FACTOR = 2" 69 | ] 70 | }, 71 | { 72 | "cell_type": "markdown", 73 | "metadata": {}, 74 | "source": [ 75 | "### Train" 76 | ] 77 | }, 78 | { 79 | "cell_type": "code", 80 | "execution_count": null, 81 | "metadata": {}, 82 | "outputs": [], 83 | "source": [ 84 | "out_dir = DATA_PATH + f\"train_{FACTOR}/\"\n", 85 | "if not os.path.exists(out_dir):\n", 86 | " os.mkdir(out_dir)" 87 | ] 88 | }, 89 | { 90 | "cell_type": "code", 91 | "execution_count": null, 92 | "metadata": {}, 93 | "outputs": [], 94 | "source": [ 95 | "# NAME = \"_onlyfc\" # unhealthy class\n", 96 | "NAME = \"_fix\" # healthy class with fixed issues\n", 97 | "# NAME = \"\" # original data\n", 98 | "SAVE_IMG = False\n", 99 | "\n", 100 | "df_masks = pd.read_csv(DATA_PATH + \"train\" + NAME + \".csv\").set_index(\"id\")" 101 | ] 102 | }, 103 | { 104 | "cell_type": "code", 105 | "execution_count": null, 106 | "metadata": {}, 107 | "outputs": [], 108 | "source": [ 109 | "masks = {}\n", 110 | "\n", 111 | "for index, encs in tqdm(df_masks.iterrows(), total=len(df_masks)):\n", 112 | " # read image and generate the mask\n", 113 | " img = load_image(os.path.join(TIFF_PATH, index + \".tiff\"))\n", 114 | " mask = enc2mask(encs, (img.shape[1], img.shape[0]))\n", 115 | "\n", 116 | " if SAVE_IMG:\n", 117 | " img = cv2.resize(\n", 118 | " img,\n", 119 | " (img.shape[1] // FACTOR, img.shape[0] // FACTOR),\n", 120 | " interpolation=cv2.INTER_AREA,\n", 121 | " )\n", 122 | " tifffile.imsave(out_dir + f\"{index}.tiff\", img)\n", 123 | "\n", 124 | " mask = cv2.resize(\n", 125 | " mask,\n", 126 | " (mask.shape[1] // FACTOR, mask.shape[0] // FACTOR),\n", 127 | " interpolation=cv2.INTER_NEAREST,\n", 128 | " )\n", 129 | " \n", 130 | " rle = mask2enc(mask)\n", 131 | " \n", 132 | " masks[index] = rle\n", 133 | " \n", 134 | "# break" 135 | ] 136 | }, 137 | { 138 | "cell_type": "code", 139 | "execution_count": null, 140 | "metadata": {}, 141 | "outputs": [], 142 | "source": [ 143 | "df_masks = pd.DataFrame.from_dict(masks).T.reset_index().rename(columns={0: \"encoding\", \"index\": \"id\"})\n", 144 | "\n", 145 | "df_masks.to_csv(f\"{DATA_PATH}train_{FACTOR}{NAME}.csv\", index=False)\n", 146 | "\n", 147 | "print(f\"Saved data to {DATA_PATH}train_{FACTOR}{NAME}.csv\")" 148 | ] 149 | }, 150 | { 151 | "cell_type": "markdown", 152 | "metadata": {}, 153 | "source": [ 154 | "### Test" 155 | ] 156 | }, 157 | { 158 | "cell_type": "code", 159 | "execution_count": null, 160 | "metadata": {}, 161 | "outputs": [], 162 | "source": [ 163 | "out_dir = DATA_PATH + f\"test_{FACTOR}/\"\n", 164 | "\n", 165 | "if not os.path.exists(out_dir):\n", 166 | " os.mkdir(out_dir)" 167 | ] 168 | }, 169 | { 170 | "cell_type": "code", 171 | "execution_count": null, 172 | "metadata": {}, 173 | "outputs": [], 174 | "source": [ 175 | "df = pd.read_csv(DATA_PATH + \"sample_submission.csv\")" 176 | ] 177 | }, 178 | { 179 | "cell_type": "code", 180 | "execution_count": null, 181 | "metadata": {}, 182 | "outputs": [], 183 | "source": [ 184 | "for index in tqdm(df['id']):\n", 185 | " # read image and generate the mask\n", 186 | " img = load_image(os.path.join(TIFF_PATH_TEST, index + \".tiff\"))\n", 187 | "\n", 188 | " img = cv2.resize(\n", 189 | " img,\n", 190 | " (img.shape[1] // FACTOR, img.shape[0] // FACTOR),\n", 191 | " interpolation=cv2.INTER_AREA,\n", 192 | " )\n", 193 | " \n", 194 | " tifffile.imsave(out_dir + f\"{index}.tiff\", img)" 195 | ] 196 | }, 197 | { 198 | "cell_type": "code", 199 | "execution_count": null, 200 | "metadata": {}, 201 | "outputs": [], 202 | "source": [ 203 | "for index in tqdm(EXTRA_IMGS):\n", 204 | " # read image and generate the mask\n", 205 | " img = load_image(os.path.join(TIFF_PATH_TEST, index + \".tiff\"))\n", 206 | "\n", 207 | " img = cv2.resize(\n", 208 | " img,\n", 209 | " (img.shape[1] // FACTOR, img.shape[0] // FACTOR),\n", 210 | " interpolation=cv2.INTER_AREA,\n", 211 | " )\n", 212 | " \n", 213 | "# tifffile.imsave(out_dir + f\"{index}.tiff\", img)" 214 | ] 215 | } 216 | ], 217 | "metadata": { 218 | "kernelspec": { 219 | "display_name": "venv", 220 | "language": "python", 221 | "name": "venv" 222 | }, 223 | "language_info": { 224 | "codemirror_mode": { 225 | "name": "ipython", 226 | "version": 3 227 | }, 228 | "file_extension": ".py", 229 | "mimetype": "text/x-python", 230 | "name": "python", 231 | "nbconvert_exporter": "python", 232 | "pygments_lexer": "ipython3", 233 | "version": "3.8.5" 234 | } 235 | }, 236 | "nbformat": 4, 237 | "nbformat_minor": 4 238 | } 239 | -------------------------------------------------------------------------------- /notebooks/Inference Test.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "**About** : This notebook performs inference on the test data. It is used to generate pseudo-labels.\n", 8 | " - Use the `log_folder` parameter to specify the experiment.\n", 9 | " - Use the `use_tta` parameter to speciy whether to use test time augmentations.\n", 10 | " - Use the `save` parameter to save predictions." 11 | ] 12 | }, 13 | { 14 | "cell_type": "code", 15 | "execution_count": null, 16 | "metadata": {}, 17 | "outputs": [], 18 | "source": [ 19 | "# %load_ext nb_black\n", 20 | "%load_ext autoreload\n", 21 | "%autoreload 2" 22 | ] 23 | }, 24 | { 25 | "cell_type": "markdown", 26 | "metadata": {}, 27 | "source": [ 28 | "## Initialization" 29 | ] 30 | }, 31 | { 32 | "cell_type": "markdown", 33 | "metadata": {}, 34 | "source": [ 35 | "### Imports" 36 | ] 37 | }, 38 | { 39 | "cell_type": "code", 40 | "execution_count": null, 41 | "metadata": {}, 42 | "outputs": [], 43 | "source": [ 44 | "import os\n", 45 | "import sys\n", 46 | "import json\n", 47 | "import torch\n", 48 | "import warnings\n", 49 | "import numpy as np\n", 50 | "import pandas as pd\n", 51 | "\n", 52 | "from tqdm.notebook import tqdm\n", 53 | "\n", 54 | "sys.path.append(\"../code/\")\n", 55 | "# warnings.simplefilter(\"ignore\", UserWarning)" 56 | ] 57 | }, 58 | { 59 | "cell_type": "code", 60 | "execution_count": null, 61 | "metadata": {}, 62 | "outputs": [], 63 | "source": [ 64 | "from params import *\n", 65 | "from inference.main_test import k_fold_inf_test" 66 | ] 67 | }, 68 | { 69 | "cell_type": "markdown", 70 | "metadata": {}, 71 | "source": [ 72 | "### Inference" 73 | ] 74 | }, 75 | { 76 | "cell_type": "code", 77 | "execution_count": null, 78 | "metadata": {}, 79 | "outputs": [], 80 | "source": [ 81 | "log_folder = \"../logs/2021-05-01/2/\" # b1 512 2 fix\n", 82 | "# log_folder = \"../logs/2021-05-06/1/\" # b1 512 2 fix debug" 83 | ] 84 | }, 85 | { 86 | "cell_type": "code", 87 | "execution_count": null, 88 | "metadata": {}, 89 | "outputs": [], 90 | "source": [ 91 | "class Config:\n", 92 | " def __init__(self, **entries):\n", 93 | " self.__dict__.update(entries)\n", 94 | "\n", 95 | "config = json.load(open(log_folder + 'config.json', 'r'))\n", 96 | "config = Config(**config)" 97 | ] 98 | }, 99 | { 100 | "cell_type": "code", 101 | "execution_count": null, 102 | "metadata": {}, 103 | "outputs": [], 104 | "source": [ 105 | "config.overlap_factor = 1.5\n", 106 | "use_tta = False\n", 107 | "global_threshold = 0.5\n", 108 | "config.selected_folds = [0, 1, 2, 3, 4]\n", 109 | "save = False" 110 | ] 111 | }, 112 | { 113 | "cell_type": "code", 114 | "execution_count": null, 115 | "metadata": {}, 116 | "outputs": [], 117 | "source": [ 118 | "df = pd.read_csv(DATA_PATH + \"sample_submission.csv\")\n", 119 | "images = df['id'].values.tolist() + [\"VAN0003-LK-32-21-PAS_registered.ome\", \"VAN0011-RK-3-10-PAS_registered.ome\"]" 120 | ] 121 | }, 122 | { 123 | "cell_type": "code", 124 | "execution_count": null, 125 | "metadata": { 126 | "scrolled": false 127 | }, 128 | "outputs": [], 129 | "source": [ 130 | "%%time\n", 131 | "scores = k_fold_inf_test(\n", 132 | " config,\n", 133 | " images,\n", 134 | " log_folder=log_folder,\n", 135 | " use_full_size=False,\n", 136 | " global_threshold=global_threshold,\n", 137 | " use_tta=use_tta,\n", 138 | " save=save,\n", 139 | ")" 140 | ] 141 | } 142 | ], 143 | "metadata": { 144 | "kernelspec": { 145 | "display_name": "venv", 146 | "language": "python", 147 | "name": "venv" 148 | }, 149 | "language_info": { 150 | "codemirror_mode": { 151 | "name": "ipython", 152 | "version": 3 153 | }, 154 | "file_extension": ".py", 155 | "mimetype": "text/x-python", 156 | "name": "python", 157 | "nbconvert_exporter": "python", 158 | "pygments_lexer": "ipython3", 159 | "version": "3.8.5" 160 | } 161 | }, 162 | "nbformat": 4, 163 | "nbformat_minor": 4 164 | } 165 | -------------------------------------------------------------------------------- /notebooks/Inference.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "**About** : This notebook is used to do inference on the train data, and evaluate models.\n", 8 | " - Use the `log_folder` parameter to specify the experiment.\n", 9 | " - Use the `use_tta` parameter to speciy whether to use test time augmentations.\n", 10 | " - Use the `save` parameter to save predictions.\n", 11 | " - Use the `save_all_tta` parameter to save predictions for each tta.\n", 12 | " - Use the `global_threshold` parameter to tweak the threshold." 13 | ] 14 | }, 15 | { 16 | "cell_type": "code", 17 | "execution_count": null, 18 | "metadata": {}, 19 | "outputs": [], 20 | "source": [ 21 | "# %load_ext nb_black\n", 22 | "%load_ext autoreload\n", 23 | "%autoreload 2" 24 | ] 25 | }, 26 | { 27 | "cell_type": "markdown", 28 | "metadata": {}, 29 | "source": [ 30 | "## Initialization" 31 | ] 32 | }, 33 | { 34 | "cell_type": "markdown", 35 | "metadata": {}, 36 | "source": [ 37 | "### Imports" 38 | ] 39 | }, 40 | { 41 | "cell_type": "code", 42 | "execution_count": null, 43 | "metadata": {}, 44 | "outputs": [], 45 | "source": [ 46 | "import os\n", 47 | "import sys\n", 48 | "import json\n", 49 | "import torch\n", 50 | "import warnings\n", 51 | "import numpy as np\n", 52 | "import pandas as pd\n", 53 | "import seaborn as sns\n", 54 | "\n", 55 | "from tqdm.notebook import tqdm\n", 56 | "from matplotlib import pyplot as plt\n", 57 | "\n", 58 | "sys.path.append(\"../code/\")\n", 59 | "# warnings.simplefilter(\"ignore\", UserWarning)" 60 | ] 61 | }, 62 | { 63 | "cell_type": "code", 64 | "execution_count": null, 65 | "metadata": {}, 66 | "outputs": [], 67 | "source": [ 68 | "from params import *\n", 69 | "\n", 70 | "from inference.main import k_fold_inf\n", 71 | "\n", 72 | "from data.dataset import InferenceDataset\n", 73 | "from data.transforms import HE_preprocess_test" 74 | ] 75 | }, 76 | { 77 | "cell_type": "markdown", 78 | "metadata": {}, 79 | "source": [ 80 | "### Main" 81 | ] 82 | }, 83 | { 84 | "cell_type": "code", 85 | "execution_count": null, 86 | "metadata": {}, 87 | "outputs": [], 88 | "source": [ 89 | "log_folder = \"../logs/2021-05-06/1/\" # 940\n", 90 | "log_folder = \"../logs/2021-05-09/2/\" # b1 last" 91 | ] 92 | }, 93 | { 94 | "cell_type": "code", 95 | "execution_count": null, 96 | "metadata": {}, 97 | "outputs": [], 98 | "source": [ 99 | "class Config:\n", 100 | " def __init__(self, **entries):\n", 101 | " self.__dict__.update(entries)\n", 102 | "\n", 103 | "config = json.load(open(log_folder + 'config.json', 'r'))\n", 104 | "config = Config(**config)" 105 | ] 106 | }, 107 | { 108 | "cell_type": "code", 109 | "execution_count": null, 110 | "metadata": { 111 | "scrolled": true 112 | }, 113 | "outputs": [], 114 | "source": [ 115 | "df = pd.read_csv(OUT_PATH + f\"df_images_256_4.csv\")" 116 | ] 117 | }, 118 | { 119 | "cell_type": "code", 120 | "execution_count": null, 121 | "metadata": {}, 122 | "outputs": [], 123 | "source": [ 124 | "config.overlap_factor = 1.5\n", 125 | "use_tta = False\n", 126 | "global_threshold = 0.5\n", 127 | "config.selected_folds = [0, 1, 2, 3, 4]\n", 128 | "save = False\n", 129 | "save_all_tta = False" 130 | ] 131 | }, 132 | { 133 | "cell_type": "code", 134 | "execution_count": null, 135 | "metadata": { 136 | "scrolled": false 137 | }, 138 | "outputs": [], 139 | "source": [ 140 | "%%time\n", 141 | "scores = k_fold_inf(\n", 142 | " config,\n", 143 | " df,\n", 144 | " log_folder=log_folder,\n", 145 | " use_full_size=False,\n", 146 | " global_threshold=global_threshold,\n", 147 | " use_tta=use_tta,\n", 148 | " save=save,\n", 149 | " save_all_tta=save_all_tta,\n", 150 | ")" 151 | ] 152 | }, 153 | { 154 | "cell_type": "code", 155 | "execution_count": null, 156 | "metadata": { 157 | "scrolled": false 158 | }, 159 | "outputs": [], 160 | "source": [ 161 | "print(f'Local CV score is {np.mean(scores):.4f} for threshold {global_threshold}') # 1.5" 162 | ] 163 | } 164 | ], 165 | "metadata": { 166 | "kernelspec": { 167 | "display_name": "venv", 168 | "language": "python", 169 | "name": "venv" 170 | }, 171 | "language_info": { 172 | "codemirror_mode": { 173 | "name": "ipython", 174 | "version": 3 175 | }, 176 | "file_extension": ".py", 177 | "mimetype": "text/x-python", 178 | "name": "python", 179 | "nbconvert_exporter": "python", 180 | "pygments_lexer": "ipython3", 181 | "version": "3.8.5" 182 | } 183 | }, 184 | "nbformat": 4, 185 | "nbformat_minor": 4 186 | } 187 | -------------------------------------------------------------------------------- /notebooks/Training.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "**About** : This notebook is used to train models.\n", 8 | " - Use the `DEBUG` parameter to launch the code in debug mode (single fold, no logging)\n", 9 | " - Specify the training parameters in the `Config` class. Feel free to experiment with the parameters, here are the main ones :\n", 10 | " - `tile_size` : Tile size\n", 11 | " - `reduce_factor` : Downscaling factor\n", 12 | " - `on_spot_sampling` : Probability to accept a random tile with in the dataset\n", 13 | " - `overlap_factor` : Tile overlapping during inference\n", 14 | " - `selected_folds` : Folds to run computations for.\n", 15 | " - `encoder` : Encoder as defined in [Segmentation Models PyTorch](https://github.com/qubvel/segmentation_models.pytorch)\n", 16 | " - `decoder` : Decoders from [Segmentation Models PyTorch](https://github.com/qubvel/segmentation_models.pytorch)\n", 17 | " - `num_classes` : Number of classes. Keep it at 2 to use the healthy and unhealthy classes\n", 18 | " - `loss` : Loss function. We use the BCE but the lovasz is also interesting\n", 19 | " - `optimizer` : Optimizer name\n", 20 | " - `batch_size` : Training batch size, adapt the `BATCH_SIZES` dictionary to your gpu\n", 21 | " - `val_bs` : Validation batch size\n", 22 | " - `epochs` : Number of training epochs\n", 23 | " - `iter_per_epoch` : Number of tiles to use per epoch\n", 24 | " - `lr` : Learning rate. Will be decayed linearly\n", 25 | " - `warmup_prop` : Proportion of steps to use for learning rate warmup\n", 26 | " - `mix_proba` : Probability to apply MixUp with\n", 27 | " - `mix_alpha` : Alpha parameter for MixUp\n", 28 | " - `use_pl`: Probability to sample a tile from the pseudo-labeled images\n", 29 | " - `use_external`: Probability to sample a tile from the external images\n", 30 | " - `pl_path`: Path to pseudo labels generated by `notebooks/Inference_test.ipynb`\n", 31 | " - `extra_path` : Path to extra labels generated by `notebooks/Json to Mask.ipynb`\n", 32 | " - `rle_path` : Path to train labels downscaled by `notebooks/Image downscaling.ipynb`\n" 33 | ] 34 | }, 35 | { 36 | "cell_type": "code", 37 | "execution_count": null, 38 | "metadata": {}, 39 | "outputs": [], 40 | "source": [ 41 | "# %load_ext nb_black\n", 42 | "%load_ext autoreload\n", 43 | "%autoreload 2" 44 | ] 45 | }, 46 | { 47 | "cell_type": "markdown", 48 | "metadata": {}, 49 | "source": [ 50 | "## Initialization" 51 | ] 52 | }, 53 | { 54 | "cell_type": "markdown", 55 | "metadata": {}, 56 | "source": [ 57 | "### Imports" 58 | ] 59 | }, 60 | { 61 | "cell_type": "code", 62 | "execution_count": null, 63 | "metadata": {}, 64 | "outputs": [], 65 | "source": [ 66 | "import os\n", 67 | "import sys\n", 68 | "import torch\n", 69 | "import warnings\n", 70 | "import numpy as np\n", 71 | "import pandas as pd\n", 72 | "\n", 73 | "from tqdm.notebook import tqdm\n", 74 | "\n", 75 | "sys.path.append(\"../code/\")\n", 76 | "# os.environ['CUDA_VISIBLE_DEVICES'] = \"1,0\"\n", 77 | "warnings.simplefilter(\"ignore\", UserWarning)" 78 | ] 79 | }, 80 | { 81 | "cell_type": "code", 82 | "execution_count": null, 83 | "metadata": {}, 84 | "outputs": [], 85 | "source": [ 86 | "from training.main import k_fold\n", 87 | "\n", 88 | "from utils.logger import (\n", 89 | " prepare_log_folder,\n", 90 | " save_config,\n", 91 | " create_logger,\n", 92 | " update_overall_logs,\n", 93 | ")\n", 94 | "\n", 95 | "from params import *" 96 | ] 97 | }, 98 | { 99 | "cell_type": "markdown", 100 | "metadata": {}, 101 | "source": [ 102 | "## Training" 103 | ] 104 | }, 105 | { 106 | "cell_type": "code", 107 | "execution_count": null, 108 | "metadata": {}, 109 | "outputs": [], 110 | "source": [ 111 | "BATCH_SIZES = {\n", 112 | " \"resnet18\": 64,\n", 113 | " \"resnet34\": 32,\n", 114 | " \"resnext50_32x4d\": 32,\n", 115 | " \"se_resnext50_32x4d\": 32,\n", 116 | " \"efficientnet-b0\": 32,\n", 117 | " \"efficientnet-b1\": 32,\n", 118 | " \"efficientnet-b2\": 32,\n", 119 | " \"efficientnet-b3\": 16,\n", 120 | " \"efficientnet-b4\": 16,\n", 121 | " \"efficientnet-b5\": 16,\n", 122 | " \"efficientnet-b6\": 8,\n", 123 | "}" 124 | ] 125 | }, 126 | { 127 | "cell_type": "code", 128 | "execution_count": null, 129 | "metadata": {}, 130 | "outputs": [], 131 | "source": [ 132 | "class Config:\n", 133 | " \"\"\"\n", 134 | " Parameters used for training\n", 135 | " \"\"\"\n", 136 | " # General\n", 137 | " seed = 42\n", 138 | " verbose = 1\n", 139 | "\n", 140 | " device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n", 141 | " save_weights = True\n", 142 | "\n", 143 | " # Images\n", 144 | " tile_size = 512\n", 145 | " reduce_factor = 2\n", 146 | " on_spot_sampling = 0.9\n", 147 | " overlap_factor = 1.5\n", 148 | "\n", 149 | " img_dir = DATA_PATH + f\"train_{tile_size}_red_{reduce_factor}\"\n", 150 | " mask_dir = DATA_PATH + f\"masks_{tile_size}_red_{reduce_factor}\"\n", 151 | "\n", 152 | " # k-fold\n", 153 | " cv_column = \"5fold\"\n", 154 | " random_state = 0\n", 155 | " selected_folds = [0, 1, 2, 3, 4]\n", 156 | "\n", 157 | " # Model\n", 158 | " encoder = \"efficientnet-b1\" # \"resnet18\" \"resnext50_32x4d\", \"resnet34\", \"efficientnet-b5\"\n", 159 | " decoder = \"Unet\" # \"Unet\", \"DeepLabV3Plus\"\n", 160 | " encoder_weights = \"imagenet\"\n", 161 | " num_classes = 2\n", 162 | "\n", 163 | " # Training\n", 164 | " loss = \"BCEWithLogitsLoss\" # \"lovasz\"\n", 165 | " activation = \"none\" if loss == \"lovasz\" else \"sigmoid\"\n", 166 | "\n", 167 | " optimizer = \"Adam\"\n", 168 | "\n", 169 | " batch_size = BATCH_SIZES[encoder]\n", 170 | "\n", 171 | " if tile_size == 512:\n", 172 | " batch_size = batch_size // 2\n", 173 | "\n", 174 | " if batch_size >= 32:\n", 175 | " epochs = 50 \n", 176 | " elif batch_size >= 16:\n", 177 | " epochs = 40\n", 178 | " elif batch_size >= 6:\n", 179 | " epochs = 30\n", 180 | " else:\n", 181 | " epochs = 25\n", 182 | " val_bs = batch_size * 2\n", 183 | " \n", 184 | " iter_per_epoch = 5000\n", 185 | " lr = 1e-3\n", 186 | " warmup_prop = 0.05\n", 187 | "\n", 188 | " first_epoch_eval = 0\n", 189 | "\n", 190 | " mix_proba = 0\n", 191 | " mix_alpha = 0.4\n", 192 | " if mix_proba > 0:\n", 193 | " epochs *= 3\n", 194 | "\n", 195 | " use_pl = 0.15\n", 196 | " use_external = 0.2\n", 197 | "\n", 198 | " pl_path = \"../logs/2021-05-01/2/\" \n", 199 | " extra_path = [\n", 200 | " \"../input/train_extra.csv\", # Class healthy\n", 201 | " \"../input/train_extra_onlyfc.csv\" # Class unhealthy\n", 202 | " ]\n", 203 | " rle_path = [\n", 204 | " f\"../input/train_{reduce_factor}_fix.csv\", # Class healthy\n", 205 | " f\"../input/train_{reduce_factor}_onlyfc.csv\", # Class unhealthy\n", 206 | " ]" 207 | ] 208 | }, 209 | { 210 | "cell_type": "code", 211 | "execution_count": null, 212 | "metadata": {}, 213 | "outputs": [], 214 | "source": [ 215 | "DEBUG = True\n", 216 | "log_folder = None" 217 | ] 218 | }, 219 | { 220 | "cell_type": "code", 221 | "execution_count": null, 222 | "metadata": { 223 | "scrolled": false 224 | }, 225 | "outputs": [], 226 | "source": [ 227 | "if not DEBUG:\n", 228 | " log_folder = prepare_log_folder(LOG_PATH)\n", 229 | " print(f\"Logging results to {log_folder}\")\n", 230 | " config_df = save_config(Config, log_folder + \"config.json\")\n", 231 | " create_logger(directory=log_folder, name=\"logs.txt\")\n", 232 | "\n", 233 | "metrics = k_fold(Config, log_folder=log_folder)" 234 | ] 235 | } 236 | ], 237 | "metadata": { 238 | "kernelspec": { 239 | "display_name": "venv", 240 | "language": "python", 241 | "name": "venv" 242 | }, 243 | "language_info": { 244 | "codemirror_mode": { 245 | "name": "ipython", 246 | "version": 3 247 | }, 248 | "file_extension": ".py", 249 | "mimetype": "text/x-python", 250 | "name": "python", 251 | "nbconvert_exporter": "python", 252 | "pygments_lexer": "ipython3", 253 | "version": "3.8.5" 254 | } 255 | }, 256 | "nbformat": 4, 257 | "nbformat_minor": 4 258 | } 259 | -------------------------------------------------------------------------------- /notebooks/Visualize Predictions.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "**About** : This notebook is used to visualize predictions on the train and test data.\n", 8 | " - Works to visualize predictions from the two previous notebooks, but also from a submission file.\n", 9 | " - Specify the `name`, `log_folder` and `sub` parameters according to what you want to plot." 10 | ] 11 | }, 12 | { 13 | "cell_type": "code", 14 | "execution_count": null, 15 | "metadata": {}, 16 | "outputs": [], 17 | "source": [ 18 | "%load_ext autoreload\n", 19 | "%autoreload 2" 20 | ] 21 | }, 22 | { 23 | "cell_type": "markdown", 24 | "metadata": {}, 25 | "source": [ 26 | "## Initialization" 27 | ] 28 | }, 29 | { 30 | "cell_type": "markdown", 31 | "metadata": {}, 32 | "source": [ 33 | "### Imports" 34 | ] 35 | }, 36 | { 37 | "cell_type": "code", 38 | "execution_count": null, 39 | "metadata": {}, 40 | "outputs": [], 41 | "source": [ 42 | "import os\n", 43 | "import sys\n", 44 | "import cv2\n", 45 | "import json\n", 46 | "import glob\n", 47 | "import numpy as np\n", 48 | "import pandas as pd\n", 49 | "import plotly.express as px\n", 50 | "\n", 51 | "from tqdm.notebook import tqdm\n", 52 | "\n", 53 | "sys.path.append(\"../code/\")" 54 | ] 55 | }, 56 | { 57 | "cell_type": "code", 58 | "execution_count": null, 59 | "metadata": {}, 60 | "outputs": [], 61 | "source": [ 62 | "from params import *\n", 63 | "from utils.rle import *\n", 64 | "\n", 65 | "from data.dataset import load_image\n", 66 | "\n", 67 | "from utils.metrics import dice_scores_img\n", 68 | "from utils.plots import plot_heatmap_preds, plot_contours_preds" 69 | ] 70 | }, 71 | { 72 | "cell_type": "markdown", 73 | "metadata": {}, 74 | "source": [ 75 | "### Data" 76 | ] 77 | }, 78 | { 79 | "cell_type": "code", 80 | "execution_count": null, 81 | "metadata": {}, 82 | "outputs": [], 83 | "source": [ 84 | "THRESHOLD = 0.5" 85 | ] 86 | }, 87 | { 88 | "cell_type": "code", 89 | "execution_count": null, 90 | "metadata": {}, 91 | "outputs": [], 92 | "source": [ 93 | "df_info = pd.read_csv(DATA_PATH + f\"HuBMAP-20-dataset_information.csv\")\n", 94 | "df_mask = pd.read_csv(DATA_PATH + \"train_2_fix.csv\")\n", 95 | "df_test = pd.read_csv(DATA_PATH + \"sample_submission.csv\")" 96 | ] 97 | }, 98 | { 99 | "cell_type": "markdown", 100 | "metadata": {}, 101 | "source": [ 102 | "### Experiment" 103 | ] 104 | }, 105 | { 106 | "cell_type": "code", 107 | "execution_count": null, 108 | "metadata": {}, 109 | "outputs": [], 110 | "source": [ 111 | "log_folder = \"../logs/2021-05-06/1/\" # b1" 112 | ] 113 | }, 114 | { 115 | "cell_type": "code", 116 | "execution_count": null, 117 | "metadata": {}, 118 | "outputs": [], 119 | "source": [ 120 | "class Config:\n", 121 | " def __init__(self, **entries):\n", 122 | " self.__dict__.update(entries)\n", 123 | "\n", 124 | "config = json.load(open(log_folder + 'config.json', 'r'))\n", 125 | "config = Config(**config)" 126 | ] 127 | }, 128 | { 129 | "cell_type": "code", 130 | "execution_count": null, 131 | "metadata": {}, 132 | "outputs": [], 133 | "source": [ 134 | "preds = glob.glob(log_folder + \"pred_*.npy\")" 135 | ] 136 | }, 137 | { 138 | "cell_type": "markdown", 139 | "metadata": {}, 140 | "source": [ 141 | "## Train viz" 142 | ] 143 | }, 144 | { 145 | "cell_type": "code", 146 | "execution_count": null, 147 | "metadata": {}, 148 | "outputs": [], 149 | "source": [ 150 | "mask_name = \"b2dc8411c\" " 151 | ] 152 | }, 153 | { 154 | "cell_type": "code", 155 | "execution_count": null, 156 | "metadata": {}, 157 | "outputs": [], 158 | "source": [ 159 | "idx = [i for i, path in enumerate(preds) if mask_name in path][0]\n", 160 | "probs = np.load(preds[idx]).astype(np.float32)\n", 161 | "\n", 162 | "if len(probs.shape) == 3:\n", 163 | " probs = probs.mean(0)\n", 164 | "\n", 165 | "pred = (probs > THRESHOLD).astype(np.uint8)" 166 | ] 167 | }, 168 | { 169 | "cell_type": "code", 170 | "execution_count": null, 171 | "metadata": {}, 172 | "outputs": [], 173 | "source": [ 174 | "img = load_image(os.path.join(TIFF_PATH_2, mask_name + \".tiff\"), full_size=False, reduce_factor=2)" 175 | ] 176 | }, 177 | { 178 | "cell_type": "code", 179 | "execution_count": null, 180 | "metadata": {}, 181 | "outputs": [], 182 | "source": [ 183 | "rle = df_mask[df_mask['id'] == mask_name]['encoding']\n", 184 | "mask = enc2mask(rle, (img.shape[1], img.shape[0]))" 185 | ] 186 | }, 187 | { 188 | "cell_type": "markdown", 189 | "metadata": {}, 190 | "source": [ 191 | "### Plot" 192 | ] 193 | }, 194 | { 195 | "cell_type": "code", 196 | "execution_count": null, 197 | "metadata": {}, 198 | "outputs": [], 199 | "source": [ 200 | "w = 1000\n", 201 | "h = int(w * mask.shape[0] / mask.shape[1])" 202 | ] 203 | }, 204 | { 205 | "cell_type": "code", 206 | "execution_count": null, 207 | "metadata": { 208 | "scrolled": false 209 | }, 210 | "outputs": [], 211 | "source": [ 212 | "# fig = plot_contours_preds(img, pred, mask, w=1, downsize=4)\n", 213 | "\n", 214 | "# fig.update_layout(\n", 215 | "# autosize=False,\n", 216 | "# width=w,\n", 217 | "# height=h,\n", 218 | "# )\n", 219 | "\n", 220 | "# fig.show()" 221 | ] 222 | }, 223 | { 224 | "cell_type": "code", 225 | "execution_count": null, 226 | "metadata": { 227 | "scrolled": false 228 | }, 229 | "outputs": [], 230 | "source": [ 231 | "# fig = plot_heatmap_preds(img, probs, mask, w=1, downsize=4)\n", 232 | "\n", 233 | "# fig.update_layout(\n", 234 | "# autosize=False,\n", 235 | "# width=w,\n", 236 | "# height=h,\n", 237 | "# )\n", 238 | "\n", 239 | "# fig.show()" 240 | ] 241 | }, 242 | { 243 | "cell_type": "markdown", 244 | "metadata": {}, 245 | "source": [ 246 | "## Test viz" 247 | ] 248 | }, 249 | { 250 | "cell_type": "markdown", 251 | "metadata": {}, 252 | "source": [ 253 | "### Load image" 254 | ] 255 | }, 256 | { 257 | "cell_type": "code", 258 | "execution_count": null, 259 | "metadata": {}, 260 | "outputs": [], 261 | "source": [ 262 | "name = 'aa05346ff'\n", 263 | "fold = 0\n", 264 | "\n", 265 | "img = load_image(f'{DATA_PATH}/test_4/{name}.tiff', full_size=False)" 266 | ] 267 | }, 268 | { 269 | "cell_type": "markdown", 270 | "metadata": {}, 271 | "source": [ 272 | "### Using the output of the test inference notebook" 273 | ] 274 | }, 275 | { 276 | "cell_type": "code", 277 | "execution_count": null, 278 | "metadata": {}, 279 | "outputs": [], 280 | "source": [ 281 | "probs = np.load(log_folder + f'pred_{name}_{fold}.npy').astype(np.float32)\n", 282 | "pred = (probs > THRESHOLD).astype(np.uint8)" 283 | ] 284 | }, 285 | { 286 | "cell_type": "markdown", 287 | "metadata": {}, 288 | "source": [ 289 | "### Using submissions" 290 | ] 291 | }, 292 | { 293 | "cell_type": "code", 294 | "execution_count": null, 295 | "metadata": { 296 | "scrolled": true 297 | }, 298 | "outputs": [], 299 | "source": [ 300 | "shape = df_info[df_info['image_file'] == name + '.tiff'][['width_pixels', 'height_pixels']].values[0]" 301 | ] 302 | }, 303 | { 304 | "cell_type": "code", 305 | "execution_count": null, 306 | "metadata": {}, 307 | "outputs": [], 308 | "source": [ 309 | "sub = pd.read_csv(OUT_PATH + \"submission_combine.csv\")\n", 310 | "rle = sub[sub['id'] == name]['predicted'].values\n", 311 | "\n", 312 | "sub_mask = enc2mask(rle, shape)\n", 313 | "\n", 314 | "sub_mask = cv2.resize(\n", 315 | " sub_mask,\n", 316 | " tuple(list(shape // 4)),\n", 317 | " interpolation=cv2.INTER_NEAREST,\n", 318 | ")" 319 | ] 320 | }, 321 | { 322 | "cell_type": "markdown", 323 | "metadata": {}, 324 | "source": [ 325 | "You can plot two submissions to compare them" 326 | ] 327 | }, 328 | { 329 | "cell_type": "code", 330 | "execution_count": null, 331 | "metadata": {}, 332 | "outputs": [], 333 | "source": [ 334 | "# sub = pd.read_csv(\"YOUR_SUB_NAME.csv\")\n", 335 | "# rle = sub[sub['id'] == name]['predicted'].values\n", 336 | "\n", 337 | "# sub_mask2 = enc2mask(rle, shape)\n", 338 | "\n", 339 | "# sub_mask2 = cv2.resize(\n", 340 | "# sub_mask2,\n", 341 | "# tuple(list(shape // 4)),\n", 342 | "# interpolation=cv2.INTER_NEAREST,\n", 343 | "# )" 344 | ] 345 | }, 346 | { 347 | "cell_type": "markdown", 348 | "metadata": {}, 349 | "source": [ 350 | "### Plot" 351 | ] 352 | }, 353 | { 354 | "cell_type": "code", 355 | "execution_count": null, 356 | "metadata": {}, 357 | "outputs": [], 358 | "source": [ 359 | "w = 1000\n", 360 | "h = int(w * pred.shape[0] / pred.shape[1])" 361 | ] 362 | }, 363 | { 364 | "cell_type": "code", 365 | "execution_count": null, 366 | "metadata": {}, 367 | "outputs": [], 368 | "source": [ 369 | "fig = plot_contours_preds(img, sub_mask, w=2, downsize=2) # 1 sub\n", 370 | "# fig = plot_contours_preds(img, sub_mask, mask=sub_mask2, w=2, downsize=2) # 2 subs\n", 371 | "\n", 372 | "fig.update_layout(\n", 373 | " autosize=False,\n", 374 | " width=w,\n", 375 | " height=h,\n", 376 | ")\n", 377 | "\n", 378 | "fig.show()" 379 | ] 380 | } 381 | ], 382 | "metadata": { 383 | "kernelspec": { 384 | "display_name": "venv", 385 | "language": "python", 386 | "name": "venv" 387 | }, 388 | "language_info": { 389 | "codemirror_mode": { 390 | "name": "ipython", 391 | "version": 3 392 | }, 393 | "file_extension": ".py", 394 | "mimetype": "text/x-python", 395 | "name": "python", 396 | "nbconvert_exporter": "python", 397 | "pygments_lexer": "ipython3", 398 | "version": "3.8.5" 399 | } 400 | }, 401 | "nbformat": 4, 402 | "nbformat_minor": 4 403 | } 404 | -------------------------------------------------------------------------------- /overview.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Optimox/HubMap/16da543e4978b50dc02c7eb9158cef7f83f82d78/overview.png --------------------------------------------------------------------------------