├── .gitmodules
├── .idea
├── .gitignore
├── codeStyles
│ ├── codeStyleConfig.xml
│ └── Project.xml
├── copyright
│ ├── profiles_settings.xml
│ └── MIT_license.xml
├── other.xml
├── modules.xml
├── vcs.xml
├── InnerEye-DeepLearning.iml
├── runConfigurations
│ ├── Template__Tensorboard_monitoring.xml
│ ├── Template__Cross_Validation.xml
│ ├── Template__Visualize_Patch_Sampling.xml
│ ├── Template__Plot_Cross_Validation_Results.xml
│ ├── Template__Run_ML_on_local_machine.xml
│ ├── Template__Submit_image_for_inference.xml
│ ├── pytest_all_simple_tests.xml
│ ├── Template__Run_ML_on_AzureML.xml
│ └── Template__normalize_and_visualize_dataset.xml
└── inspectionProfiles
│ └── Project_Default.xml
├── .gitconfig
├── RegressionTestResults
├── PR_HelloContainer
│ ├── OUTPUT
│ │ └── test_mse.txt
│ └── AZUREML_OUTPUT
│ │ └── final_model
│ │ └── model_inference_config.json
├── PR_BasicModel2Epochs
│ ├── OUTPUT
│ │ ├── best_validation_epoch
│ │ │ ├── Test
│ │ │ │ ├── image_channel_ids.txt
│ │ │ │ ├── ground_truth_ids.txt
│ │ │ │ ├── dataset_id.txt
│ │ │ │ ├── BaselineComparisonWilcoxonSignedRankTestResults.txt
│ │ │ │ ├── metrics.csv
│ │ │ │ ├── metrics_boxplot.png
│ │ │ │ ├── scatterplots
│ │ │ │ │ └── Single_vs_CURRENT.png
│ │ │ │ ├── thumbnails
│ │ │ │ │ ├── 005_lung_l_slice_053.png
│ │ │ │ │ ├── 005_lung_r_slice_037.png
│ │ │ │ │ └── 005_spinalcord_slice_088.png
│ │ │ │ ├── 005
│ │ │ │ │ ├── background.nii.gz
│ │ │ │ │ └── posterior_lung_l.nii.gz
│ │ │ │ ├── metrics_aggregates.csv
│ │ │ │ └── MetricsAcrossAllRuns.csv
│ │ │ └── Val
│ │ │ │ ├── metrics.csv
│ │ │ │ ├── metrics_boxplot.png
│ │ │ │ ├── thumbnails
│ │ │ │ ├── 002_lung_l_slice_069.png
│ │ │ │ ├── 002_lung_r_slice_052.png
│ │ │ │ └── 002_spinalcord_slice_113.png
│ │ │ │ ├── 002
│ │ │ │ ├── background.nii.gz
│ │ │ │ └── posterior_lung_r.nii.gz
│ │ │ │ └── metrics_aggregates.csv
│ │ ├── patch_sampling
│ │ │ ├── 0_sampled_patches_dim2.png
│ │ │ ├── 0_sampled_patches_dim0.png
│ │ │ └── 0_sampled_patches_dim1.png
│ │ ├── Val
│ │ │ └── epoch_metrics.csv
│ │ └── Train
│ │ │ └── epoch_metrics.csv
│ └── AZUREML_OUTPUT
│ │ └── final_model
│ │ └── model_inference_config.json
├── PR_TrainEnsemble
│ ├── AZUREML_PARENT_OUTPUT
│ │ ├── CrossValResults
│ │ │ ├── 0
│ │ │ │ └── Test
│ │ │ │ │ └── metrics.csv
│ │ │ ├── 1
│ │ │ │ └── Test
│ │ │ │ │ └── metrics.csv
│ │ │ ├── CrossValidationMannWhitneyTestResults.txt
│ │ │ ├── CrossValidationWilcoxonSignedRankTestResults.txt
│ │ │ ├── Test_outliers.txt
│ │ │ └── ENSEMBLE
│ │ │ │ └── Test
│ │ │ │ └── metrics.csv
│ │ ├── best_validation_epoch
│ │ │ └── Test
│ │ │ │ ├── metrics.csv
│ │ │ │ ├── thumbnails
│ │ │ │ └── 005_spinalcord_slice_088.png
│ │ │ │ └── 005
│ │ │ │ └── posterior_lung_r.nii.gz
│ │ └── final_ensemble_model
│ │ │ └── model_inference_config.json
│ └── OUTPUT
│ │ ├── Val
│ │ └── epoch_metrics.csv
│ │ └── Train
│ │ └── epoch_metrics.csv
├── PR_Train2Nodes
│ └── OUTPUT
│ │ ├── best_validation_epoch
│ │ ├── Test
│ │ │ ├── BaselineComparisonWilcoxonSignedRankTestResults.txt
│ │ │ ├── scatterplots
│ │ │ │ └── Single_vs_CURRENT.png
│ │ │ └── metrics.csv
│ │ └── Val
│ │ │ └── metrics.csv
│ │ ├── Val
│ │ └── epoch_metrics.csv
│ │ └── Train
│ │ └── epoch_metrics.csv
├── PR_GlaucomaCV
│ └── AZUREML_PARENT_OUTPUT
│ │ ├── final_ensemble_model
│ │ └── model_inference_config.json
│ │ └── CrossValResults
│ │ └── metrics_aggregates.csv
└── PR_LungRegression
│ └── OUTPUT
│ ├── test_dataset.csv
│ └── val_dataset.csv
├── .coveragerc
├── Tests
├── ML
│ ├── test_data
│ │ ├── plot_cross_validation
│ │ │ ├── HD_cfff5ceb-a227-41d6-a23c-0ebbc33b6301
│ │ │ │ └── dummy.txt
│ │ │ ├── multi_label_sequence_in_crossval
│ │ │ │ ├── 0
│ │ │ │ │ ├── Val
│ │ │ │ │ │ ├── metrics.csv
│ │ │ │ │ │ └── epoch_metrics.csv
│ │ │ │ │ └── dataset.csv
│ │ │ │ └── 1
│ │ │ │ │ └── Val
│ │ │ │ │ ├── metrics.csv
│ │ │ │ │ └── epoch_metrics.csv
│ │ │ └── main_1570466706163110
│ │ │ │ ├── 0
│ │ │ │ ├── Test
│ │ │ │ │ └── metrics.csv
│ │ │ │ └── Val
│ │ │ │ │ └── metrics.csv
│ │ │ │ ├── 1
│ │ │ │ ├── Test
│ │ │ │ │ └── metrics.csv
│ │ │ │ └── Val
│ │ │ │ │ └── metrics.csv
│ │ │ │ └── ENSEMBLE
│ │ │ │ └── Test
│ │ │ │ └── metrics.csv
│ │ ├── test_good.nii.gz
│ │ ├── test_img.nii.gz
│ │ ├── posterior_bladder.nii.gz
│ │ ├── test_dataset_parameters.csv
│ │ ├── full_header_csv
│ │ │ ├── ptv.nii.gz
│ │ │ └── rectum.nii.gz
│ │ ├── scale_and_unscale_image.nii.gz
│ │ ├── ResultsByMode.csv
│ │ ├── data.h5
│ │ ├── patch_sampling
│ │ │ ├── scan_small.nii.gz
│ │ │ ├── sampled_center.nii.gz
│ │ │ ├── overlay_0.png
│ │ │ ├── overlay_1.png
│ │ │ ├── overlay_2.png
│ │ │ ├── sampled_to_boundary.nii.gz
│ │ │ ├── sampling_2d.npy
│ │ │ ├── sampling_2d.png
│ │ │ ├── sampled_center.npy
│ │ │ ├── 123_sampled_patches_dim0.png
│ │ │ ├── 123_sampled_patches_dim1.png
│ │ │ ├── 123_sampled_patches_dim2.png
│ │ │ ├── sampled_to_boundary.npy
│ │ │ ├── overlay_with_aspect_dim0.png
│ │ │ ├── overlay_with_aspect_dim1.png
│ │ │ └── overlay_with_aspect_dim2.png
│ │ ├── smoothed_posterior_bladder.nii.gz
│ │ ├── 042_slice_001.png
│ │ ├── test_good.npz
│ │ ├── train_and_test_data
│ │ │ ├── id1_mask.nii.gz
│ │ │ ├── id2_mask.nii.gz
│ │ │ ├── id1_channel1.nii.gz
│ │ │ ├── id1_channel2.nii.gz
│ │ │ ├── id1_region.nii.gz
│ │ │ ├── id2_channel1.nii.gz
│ │ │ ├── id2_channel2.nii.gz
│ │ │ ├── id2_region.nii.gz
│ │ │ ├── checkpoints
│ │ │ │ └── 1_checkpoint.pth.tar
│ │ │ ├── metrics.csv
│ │ │ ├── metrics_aggregates.csv
│ │ │ ├── scalar_epoch_metrics.csv
│ │ │ └── scalar_prediction_target_metrics.csv
│ │ ├── classification_data
│ │ │ ├── id1_channel1.nii.gz
│ │ │ ├── 4be9beed-5861-fdd2-72c2-8dd89aadc1ef.h5
│ │ │ ├── 61bc9d73-9fbb-bd7d-c06b-eeffbafabcc4.h5
│ │ │ ├── 6ceacaf8-abd2-ffec-2ade-d52afd6dd1be.h5
│ │ │ └── dataset.csv
│ │ ├── dicom_series_data
│ │ │ ├── hnsegmentation.nii.gz
│ │ │ └── HN
│ │ │ │ ├── (134).dcm
│ │ │ │ ├── (135).dcm
│ │ │ │ └── (137).dcm
│ │ ├── image_and_contour.png
│ │ ├── 042_slice_001_contour.png
│ │ ├── dice_per_epoch_3classes.png
│ │ ├── image_scaled_and_contour.png
│ │ ├── classification_data_2d
│ │ │ ├── im1.npy
│ │ │ ├── im2.npy
│ │ │ └── dataset.csv
│ │ ├── dice_per_epoch_15classes.png
│ │ ├── histo_heatmaps
│ │ │ ├── score_hist.png
│ │ │ ├── slide_0.1.png
│ │ │ ├── slide_1.2.png
│ │ │ ├── slide_2.4.png
│ │ │ ├── slide_3.6.png
│ │ │ ├── confusion_matrix_1.png
│ │ │ ├── confusion_matrix_3.png
│ │ │ └── heatmap_overlay.png
│ │ ├── image_and_multiple_contours.png
│ │ ├── prefix042_class1_slice_001.png
│ │ ├── prefix042_class2_slice_002.png
│ │ ├── classification_data_generated_random
│ │ │ ├── im1.npy
│ │ │ ├── im2.npy
│ │ │ ├── im3.npy
│ │ │ ├── im4.npy
│ │ │ └── dataset.csv
│ │ ├── classification_data_multiclass
│ │ │ ├── 1_blue.png
│ │ │ ├── 1_green.png
│ │ │ └── dataset.csv
│ │ ├── elastic_transformed_image_and_contour.png
│ │ ├── gamma_transformed_image_and_contour.png
│ │ ├── hdf5_data
│ │ │ ├── patient_hdf5s
│ │ │ │ ├── 4be9beed-5861-fdd2-72c2-8dd89aadc1ef.h5
│ │ │ │ ├── 61bc9d73-9fbb-bd7d-c06b-eeffbafabcc4.h5
│ │ │ │ ├── 6ceacaf8-abd2-ffec-2ade-d52afd6dd1be.h5
│ │ │ │ ├── 75eb4c55-debe-f906-fdff-a8ca3faf1ac5.h5
│ │ │ │ ├── a46ebddd-b85f-de45-d8e3-9cdb10dbaf5f.h5
│ │ │ │ ├── b3200426-1a58-bfea-4aba-cbacbe66ea5e.h5
│ │ │ │ ├── cacbf4ab-dcbf-bf1e-37bc-f1c4a5aecef1.h5
│ │ │ │ ├── d316cfe5-e62a-3c0e-afda-72c3cf5ea2d8.h5
│ │ │ │ ├── e7a8c8c6-ae38-e42c-cdc4-a035c67ea122.h5
│ │ │ │ └── fa1fcda8-bbad-d4c1-1235-1a6631abec5a.h5
│ │ │ ├── dataset_missing_values.csv
│ │ │ └── dataset.csv
│ │ ├── metrics_aggregates.csv
│ │ ├── test_aggregate_metrics_classification
│ │ │ ├── Val
│ │ │ │ ├── metrics.csv.rank0
│ │ │ │ ├── metrics.csv.rank1
│ │ │ │ └── expected_metrics.csv
│ │ │ └── Train
│ │ │ │ ├── metrics.csv.rank0
│ │ │ │ ├── metrics.csv.rank1
│ │ │ │ └── expected_metrics.csv
│ │ ├── ResultsByModeAndStructure.csv
│ │ ├── dataset_with_full_header.csv
│ │ ├── sequence_data_for_classification
│ │ │ └── dataset.csv
│ │ └── dataset.csv
│ ├── reports
│ │ ├── val_metrics_classification.csv
│ │ └── test_metrics_classification.csv
│ ├── configs
│ │ └── BasicModel2EpochsOutsidePackage.py
│ ├── visualizers
│ │ ├── test_reliability_curve.py
│ │ └── test_regression_visualization.py
│ ├── utils
│ │ ├── test_dataset_utils.py
│ │ ├── test_model_metadata_util.py
│ │ ├── test_transforms.py
│ │ └── test_layer_utils.py
│ ├── test_normalize_and_visualize.py
│ ├── pipelines
│ │ └── test_ensemble.py
│ └── models
│ │ └── losses
│ │ └── test_mixture_loss.py
├── Scripts
│ ├── script_for_tests.py
│ ├── test_move_model.py
│ └── test_run_scoring.py
├── Azure
│ ├── test_recovery_id.py
│ └── test_download_pytest.py
└── SSL
│ └── test_encoders.py
├── azure-pipelines
├── checkout.yml
├── azureml-conda-environment.yml
├── train.yaml
├── store_settings.yml
├── checkout_windows.yml
├── prepare_conda.yml
├── build_windows.yaml
├── tests_after_training.yml
├── train_template.yml
├── cancel_aml_jobs.yml
└── inner_eye_env.yml
├── docs
├── source
│ ├── rst
│ │ └── api
│ │ │ ├── ML
│ │ │ ├── SSL
│ │ │ │ ├── utils.rst
│ │ │ │ ├── lightning_containers.rst
│ │ │ │ ├── datamodules_and_datasets.rst
│ │ │ │ └── lightning_modules.rst
│ │ │ ├── models
│ │ │ │ ├── blocks.rst
│ │ │ │ ├── losses.rst
│ │ │ │ ├── layers.rst
│ │ │ │ └── architectures.rst
│ │ │ ├── core
│ │ │ │ ├── testing.rst
│ │ │ │ ├── metrics.rst
│ │ │ │ ├── training.rst
│ │ │ │ ├── common.rst
│ │ │ │ ├── visualization.rst
│ │ │ │ ├── configs.rst
│ │ │ │ └── lightning.rst
│ │ │ ├── configs
│ │ │ │ ├── other.rst
│ │ │ │ ├── regression.rst
│ │ │ │ ├── classification.rst
│ │ │ │ ├── ssl.rst
│ │ │ │ └── segmentation.rst
│ │ │ ├── models.rst
│ │ │ ├── pipelines.rst
│ │ │ ├── core.rst
│ │ │ ├── configs.rst
│ │ │ ├── SSL.rst
│ │ │ ├── ML.rst
│ │ │ ├── augmentations.rst
│ │ │ ├── dataset.rst
│ │ │ ├── visualizers.rst
│ │ │ └── utils.rst
│ │ │ ├── index.rst
│ │ │ ├── Scripts
│ │ │ ├── move_model.rst
│ │ │ ├── scripts.rst
│ │ │ ├── runner.rst
│ │ │ └── submit_for_inference.rst
│ │ │ ├── Azure
│ │ │ └── azure.rst
│ │ │ └── Common
│ │ │ └── common.rst
│ ├── images
│ │ ├── deployment.png
│ │ ├── patch_sampling_example.png
│ │ ├── hippocampus_metrics_boxplot.png
│ │ └── screenshot_azureml_patch_sampling.png
│ ├── md
│ │ ├── releases.md
│ │ ├── testing.md
│ │ └── model_diagnostics.md
│ └── index.rst
├── README.md
├── Makefile
└── make.bat
├── InnerEye
├── ML
│ ├── .amlignore
│ ├── configs
│ │ ├── segmentation
│ │ │ ├── BasicModel2Epochs1Channel.py
│ │ │ └── BasicModel2EpochsMoreData.py
│ │ ├── ssl
│ │ │ ├── cxr_linear_head_augmentations.yaml
│ │ │ ├── CIFAR_classifier_configs.py
│ │ │ ├── cxr_ssl_encoder_augmentations.yaml
│ │ │ └── CovidContainers.py
│ │ └── regression
│ │ │ └── LungRegression.py
│ ├── models
│ │ ├── layers
│ │ │ └── identity.py
│ │ └── losses
│ │ │ └── mixture.py
│ ├── utils
│ │ ├── model_metadata_util.py
│ │ └── run_recovery.py
│ ├── SSL
│ │ ├── datamodules_and_datasets
│ │ │ ├── cifar_datasets.py
│ │ │ └── dataset_cls_utils.py
│ │ └── lightning_modules
│ │ │ └── byol
│ │ │ └── byol_models.py
│ ├── model_inference_config.py
│ └── visualizers
│ │ └── regression_visualization.py
├── settings.yml
├── README.md
├── Common
│ ├── type_annotations.py
│ ├── fixed_paths_for_tests.py
│ ├── output_directories.py
│ └── spawn_subprocess.py
├── Azure
│ └── parser_util.py
└── Scripts
│ └── check_annotation_quality.py
├── .flake8
├── .github
├── ISSUE_TEMPLATE
│ ├── config.yml
│ ├── feature_request.md
│ └── documentation_issue.yml
├── workflows
│ ├── check-pr-title.yml
│ ├── issues_to_ado.yml
│ ├── main.yml
│ └── codeql-analysis.yml
└── pull_request_template.md
├── mypy.ini
├── InnerEye-DataQuality
└── README.md
├── .readthedocs.yaml
├── CODE_OF_CONDUCT.md
├── .vscode
└── settings.json
├── .gitattributes
├── .amlignore
├── .amltignore
├── .pre-commit-config.yaml
├── pytest.ini
├── LICENSE
├── GeoPol.xml
├── create_and_lock_environment.sh
├── conftest.py
└── TestSubmodule
└── test_submodule_runner.py
/.gitmodules:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/.idea/.gitignore:
--------------------------------------------------------------------------------
1 | # Default ignored files
2 | /shelf/
3 |
--------------------------------------------------------------------------------
/.gitconfig:
--------------------------------------------------------------------------------
1 | [alias]
2 | deepclean = clean -xfd --exclude=.idea/
3 |
--------------------------------------------------------------------------------
/RegressionTestResults/PR_HelloContainer/OUTPUT/test_mse.txt:
--------------------------------------------------------------------------------
1 | 0.012701375409960747
2 |
--------------------------------------------------------------------------------
/.coveragerc:
--------------------------------------------------------------------------------
1 | [run]
2 | omit =
3 | Tests/*
4 | TestsOutsidePackage/*
5 | TestSubmodule/*
6 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/plot_cross_validation/HD_cfff5ceb-a227-41d6-a23c-0ebbc33b6301/dummy.txt:
--------------------------------------------------------------------------------
1 | dummy
2 |
--------------------------------------------------------------------------------
/azure-pipelines/checkout.yml:
--------------------------------------------------------------------------------
1 | steps:
2 | - checkout: self
3 | lfs: true
4 | submodules: true
5 |
--------------------------------------------------------------------------------
/docs/source/rst/api/ML/SSL/utils.rst:
--------------------------------------------------------------------------------
1 | SSL Utils
2 | ==========
3 |
4 | .. automodule:: InnerEye.ML.SSL.utils
5 |
--------------------------------------------------------------------------------
/RegressionTestResults/PR_BasicModel2Epochs/OUTPUT/best_validation_epoch/Test/image_channel_ids.txt:
--------------------------------------------------------------------------------
1 | ct
2 | heart
3 |
--------------------------------------------------------------------------------
/InnerEye/ML/.amlignore:
--------------------------------------------------------------------------------
1 | .ipynb_checkpoints
2 | azureml-logs
3 | .azureml
4 | .git
5 | outputs
6 | azureml-setup
7 | docs
8 |
--------------------------------------------------------------------------------
/docs/source/rst/api/ML/models/blocks.rst:
--------------------------------------------------------------------------------
1 | Blocks
2 | ======
3 |
4 | .. automodule:: InnerEye.ML.models.blocks.residual
5 |
--------------------------------------------------------------------------------
/docs/source/rst/api/ML/core/testing.rst:
--------------------------------------------------------------------------------
1 | Testing Models
2 | ==============
3 |
4 | .. automodule:: InnerEye.ML.model_testing
5 |
--------------------------------------------------------------------------------
/docs/source/rst/api/index.rst:
--------------------------------------------------------------------------------
1 | .. toctree::
2 |
3 | ML/ML
4 | Azure/azure
5 | Scripts/scripts
6 | Common/common
7 |
--------------------------------------------------------------------------------
/RegressionTestResults/PR_BasicModel2Epochs/OUTPUT/best_validation_epoch/Test/ground_truth_ids.txt:
--------------------------------------------------------------------------------
1 | spinalcord
2 | lung_r
3 | lung_l
4 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/test_good.nii.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/microsoft/InnerEye-DeepLearning/HEAD/Tests/ML/test_data/test_good.nii.gz
--------------------------------------------------------------------------------
/Tests/ML/test_data/test_img.nii.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/microsoft/InnerEye-DeepLearning/HEAD/Tests/ML/test_data/test_img.nii.gz
--------------------------------------------------------------------------------
/Tests/ML/test_data/posterior_bladder.nii.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/microsoft/InnerEye-DeepLearning/HEAD/Tests/ML/test_data/posterior_bladder.nii.gz
--------------------------------------------------------------------------------
/Tests/ML/test_data/test_dataset_parameters.csv:
--------------------------------------------------------------------------------
1 | 1,2,crop_sizes
2 | [ 37 37 37],[ 37 37 37],"[55, 55, 55]"
3 | [ 37 37 37],[ 37 37 37],"[55, 55, 55]"
4 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/full_header_csv/ptv.nii.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/microsoft/InnerEye-DeepLearning/HEAD/Tests/ML/test_data/full_header_csv/ptv.nii.gz
--------------------------------------------------------------------------------
/Tests/ML/test_data/full_header_csv/rectum.nii.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/microsoft/InnerEye-DeepLearning/HEAD/Tests/ML/test_data/full_header_csv/rectum.nii.gz
--------------------------------------------------------------------------------
/docs/source/rst/api/ML/core/metrics.rst:
--------------------------------------------------------------------------------
1 | Metrics
2 | ========
3 |
4 | .. automodule:: InnerEye.ML.metrics
5 |
6 | .. automodule:: InnerEye.ML.metrics_dict
7 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/scale_and_unscale_image.nii.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/microsoft/InnerEye-DeepLearning/HEAD/Tests/ML/test_data/scale_and_unscale_image.nii.gz
--------------------------------------------------------------------------------
/RegressionTestResults/PR_BasicModel2Epochs/OUTPUT/best_validation_epoch/Test/dataset_id.txt:
--------------------------------------------------------------------------------
1 | 2339eba2-8ec5-4ccb-86ff-c170470ac6e2_geonorm_with_train_test_split_2020_05_26
2 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/ResultsByMode.csv:
--------------------------------------------------------------------------------
1 | mode,mean,50%,min,max
2 | Test,88.400,92.050,59.100,98.200
3 | Train,91.146,91.000,77.100,98.800
4 | Val,88.514,92.100,45.700,98.400
5 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/data.h5:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:78b1d8a88361f66a60c1f1dceffadb3db5e66a93286b8f8a70849390eeb2baf6
3 | size 159276
4 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/patch_sampling/scan_small.nii.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/microsoft/InnerEye-DeepLearning/HEAD/Tests/ML/test_data/patch_sampling/scan_small.nii.gz
--------------------------------------------------------------------------------
/Tests/ML/test_data/smoothed_posterior_bladder.nii.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/microsoft/InnerEye-DeepLearning/HEAD/Tests/ML/test_data/smoothed_posterior_bladder.nii.gz
--------------------------------------------------------------------------------
/Tests/ML/test_data/042_slice_001.png:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:828770c73bd9935d53372a4d9b878da9993042fc3906f098bb695a077115e4b4
3 | size 20241
4 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/patch_sampling/sampled_center.nii.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/microsoft/InnerEye-DeepLearning/HEAD/Tests/ML/test_data/patch_sampling/sampled_center.nii.gz
--------------------------------------------------------------------------------
/Tests/ML/test_data/test_good.npz:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:ab6a5b82bd0ad3b1ccb8d74fd41c36758f18bc156f456d651e1f75cf3e665404
3 | size 24670
4 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/train_and_test_data/id1_mask.nii.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/microsoft/InnerEye-DeepLearning/HEAD/Tests/ML/test_data/train_and_test_data/id1_mask.nii.gz
--------------------------------------------------------------------------------
/Tests/ML/test_data/train_and_test_data/id2_mask.nii.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/microsoft/InnerEye-DeepLearning/HEAD/Tests/ML/test_data/train_and_test_data/id2_mask.nii.gz
--------------------------------------------------------------------------------
/docs/source/images/deployment.png:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:a43b79b4d19ad261fddc59cf37bd1575b9076dcbe29b5ee5cf2bbe7989789e88
3 | size 360642
4 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/classification_data/id1_channel1.nii.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/microsoft/InnerEye-DeepLearning/HEAD/Tests/ML/test_data/classification_data/id1_channel1.nii.gz
--------------------------------------------------------------------------------
/Tests/ML/test_data/dicom_series_data/hnsegmentation.nii.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/microsoft/InnerEye-DeepLearning/HEAD/Tests/ML/test_data/dicom_series_data/hnsegmentation.nii.gz
--------------------------------------------------------------------------------
/Tests/ML/test_data/image_and_contour.png:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:07c781b6863180d153943ed9e34028cda55e2ef99a6d448fb981a7c718340e5f
3 | size 4668
4 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/train_and_test_data/id1_channel1.nii.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/microsoft/InnerEye-DeepLearning/HEAD/Tests/ML/test_data/train_and_test_data/id1_channel1.nii.gz
--------------------------------------------------------------------------------
/Tests/ML/test_data/train_and_test_data/id1_channel2.nii.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/microsoft/InnerEye-DeepLearning/HEAD/Tests/ML/test_data/train_and_test_data/id1_channel2.nii.gz
--------------------------------------------------------------------------------
/Tests/ML/test_data/train_and_test_data/id1_region.nii.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/microsoft/InnerEye-DeepLearning/HEAD/Tests/ML/test_data/train_and_test_data/id1_region.nii.gz
--------------------------------------------------------------------------------
/Tests/ML/test_data/train_and_test_data/id2_channel1.nii.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/microsoft/InnerEye-DeepLearning/HEAD/Tests/ML/test_data/train_and_test_data/id2_channel1.nii.gz
--------------------------------------------------------------------------------
/Tests/ML/test_data/train_and_test_data/id2_channel2.nii.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/microsoft/InnerEye-DeepLearning/HEAD/Tests/ML/test_data/train_and_test_data/id2_channel2.nii.gz
--------------------------------------------------------------------------------
/Tests/ML/test_data/train_and_test_data/id2_region.nii.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/microsoft/InnerEye-DeepLearning/HEAD/Tests/ML/test_data/train_and_test_data/id2_region.nii.gz
--------------------------------------------------------------------------------
/Tests/ML/test_data/042_slice_001_contour.png:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:d4450200a7b3c1f1c4b94d844a3adfd429690c698592fe9cefac429e77b6ec56
3 | size 4716
4 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/dice_per_epoch_3classes.png:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:9d22925db5b800d530965db13b47a0351b4403b55ddc1d9c00181b143306c563
3 | size 30190
4 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/image_scaled_and_contour.png:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:aade999f54b7e54823b0c3f189ee1bc580d30e89dbcedc66ab3f0deafdb9a1e2
3 | size 4659
4 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/patch_sampling/overlay_0.png:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:f91a769bbce02bd6d636b163df9f623e2ef2a09a04b69e4a88557419e6cc50b7
3 | size 9252
4 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/patch_sampling/overlay_1.png:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:3c982ca7892b08e73c231d9c2c8137f312355ae8d3e99ba096f705260a3b8b0c
3 | size 3752
4 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/patch_sampling/overlay_2.png:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:1fa353210ce0756a0cf6b8a99244ab676335acd28b5ce5f6c55e8b5bfeb60b5d
3 | size 3887
4 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/patch_sampling/sampled_to_boundary.nii.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/microsoft/InnerEye-DeepLearning/HEAD/Tests/ML/test_data/patch_sampling/sampled_to_boundary.nii.gz
--------------------------------------------------------------------------------
/docs/source/images/patch_sampling_example.png:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:ed3bb4d800d269eb1b4fe8f82bd8be32f0db225d502904ef8af25a53c9f65ae8
3 | size 161947
4 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/classification_data_2d/im1.npy:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:6d433574775ed89d04f1aab2f0a30262f6b4b18006ab7a357424e9cf9450fb77
3 | size 268
4 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/classification_data_2d/im2.npy:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:0d22ed3d746fa5e5b33bfff0b19e0c9421804cd7ac5fbb78f269eb4b568ab9ad
3 | size 268
4 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/dice_per_epoch_15classes.png:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:d823d569a4958887a13b1b53373a3e998f461edd6cc6086f6d685d97a3b6db85
3 | size 93190
4 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/dicom_series_data/HN/(134).dcm:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:59e7b0b3fd2a68e000ceb8b571c713a1ffb02c71862250a3a73f4e6f34c02d8c
3 | size 526068
4 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/dicom_series_data/HN/(135).dcm:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:655a9094c560276c1b7a100fa11e2fcfc47ef87375a5de5dd723872a0da2ec6a
3 | size 526064
4 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/dicom_series_data/HN/(137).dcm:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:67facf10ca9dce1d640504c58b1189f6641119123e8d3697c6ed5aab62e52edd
3 | size 526068
4 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/histo_heatmaps/score_hist.png:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:6ddc430ffcade51a072e9452833143840b1e5726148fd850ad3f370f1315bb32
3 | size 20452
4 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/histo_heatmaps/slide_0.1.png:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:e6867218d67f06f482f1ce4a5c102fee662e445b34c7d09121b2aba8380eb54a
3 | size 474111
4 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/histo_heatmaps/slide_1.2.png:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:e6867218d67f06f482f1ce4a5c102fee662e445b34c7d09121b2aba8380eb54a
3 | size 474111
4 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/histo_heatmaps/slide_2.4.png:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:e6867218d67f06f482f1ce4a5c102fee662e445b34c7d09121b2aba8380eb54a
3 | size 474111
4 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/histo_heatmaps/slide_3.6.png:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:e6867218d67f06f482f1ce4a5c102fee662e445b34c7d09121b2aba8380eb54a
3 | size 474111
4 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/image_and_multiple_contours.png:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:7306c10335cc4c7e3119f4f02c5b857167d33485e9820a74141f236c93c8db7e
3 | size 7945
4 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/patch_sampling/sampling_2d.npy:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:95ca6f88e4ae0d5987ac716ba0ffd805ca6d48216399f5de895ce8cc54eac872
3 | size 1328
4 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/patch_sampling/sampling_2d.png:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:d42ff4d84d53070882dca524b0bddee49169f5f1dec5d1c59c56ebc51bf1786f
3 | size 6745
4 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/prefix042_class1_slice_001.png:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:de66998f2e188a9d0acb87b604ddc12126baeb1922bd1f2d43a878ea30c69d87
3 | size 9158
4 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/prefix042_class2_slice_002.png:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:2040e4884744ece0a917d1bd50bae81dff499f61bbd3abecee1ee43ad9d0c78b
3 | size 9309
4 |
--------------------------------------------------------------------------------
/docs/source/images/hippocampus_metrics_boxplot.png:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:3f64ad02c1a556242acf26e132a116219069a07bbf1578c21ba1881ff9b47661
3 | size 42221
4 |
--------------------------------------------------------------------------------
/docs/source/rst/api/Scripts/move_model.rst:
--------------------------------------------------------------------------------
1 | Move model
2 | ===========
3 |
4 | .. argparse::
5 | :ref: InnerEye.Scripts.move_model.get_move_model_parser
6 | :prog: move_model.py
7 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/histo_heatmaps/confusion_matrix_1.png:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:2c3da44ec1ac48549495229aca1d94e9e78eda6c2dc40ade44d68d90efa1dc6e
3 | size 22623
4 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/histo_heatmaps/confusion_matrix_3.png:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:2391edcf8e1fb4a2fdb17408d4526f43c7d9e836d366d2c83c08315ccd70ad8f
3 | size 35291
4 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/histo_heatmaps/heatmap_overlay.png:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:3ab9e04b3b6ff098ff0269f8c3ffbf1cad45c8c6635c68e7732a59ea9568e82f
3 | size 314086
4 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/patch_sampling/sampled_center.npy:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:95e42bf16200bf1ee45c0b9d87f567c08c114ca9e783d40c3b6542a5d6c429b3
3 | size 18128
4 |
--------------------------------------------------------------------------------
/docs/source/rst/api/ML/core/training.rst:
--------------------------------------------------------------------------------
1 | Training Models
2 | ================
3 |
4 | .. automodule:: InnerEye.ML.model_training
5 |
6 | .. automodule:: InnerEye.ML.photometric_normalization
7 |
--------------------------------------------------------------------------------
/.idea/codeStyles/codeStyleConfig.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/RegressionTestResults/PR_TrainEnsemble/AZUREML_PARENT_OUTPUT/CrossValResults/CrossValidationMannWhitneyTestResults.txt:
--------------------------------------------------------------------------------
1 | There were not enough data points for any statistically meaningful comparisons.
2 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/classification_data_generated_random/im1.npy:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:a0f4db9ea4b3287c145c4ecbc358488bdda91bdae75ee08c194b22e34b05a64b
3 | size 688
4 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/classification_data_generated_random/im2.npy:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:bd51e373a9107df4b2333312c9a48abb77fe1e44a0d3b81f3fea3fbe4f39f1ca
3 | size 688
4 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/classification_data_generated_random/im3.npy:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:1269649c96772a35aae6967e7a74f7cffbaef26295b66756de6859a3c52eb9e1
3 | size 688
4 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/classification_data_generated_random/im4.npy:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:68ceb77fb28b5b37c85f1f4f1f89897b2693772596f36d7a8160ef70e75cd2f2
3 | size 688
4 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/classification_data_multiclass/1_blue.png:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:d4450200a7b3c1f1c4b94d844a3adfd429690c698592fe9cefac429e77b6ec56
3 | size 4716
4 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/classification_data_multiclass/1_green.png:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:d4450200a7b3c1f1c4b94d844a3adfd429690c698592fe9cefac429e77b6ec56
3 | size 4716
4 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/elastic_transformed_image_and_contour.png:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:d4fb26944ad1fef4265cb52b64160865a940cd28ed4ad2bb7bf1c90458622bf6
3 | size 39050
4 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/gamma_transformed_image_and_contour.png:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:775d542598a607881a313c0ac828ffc47dd86b2567bcb4366646b7ea14a121d8
3 | size 3923
4 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/patch_sampling/123_sampled_patches_dim0.png:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:63c7b6d5cb26a79f0e781e92a0d4029447643a6ab08377056499670e22e5b556
3 | size 9548
4 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/patch_sampling/123_sampled_patches_dim1.png:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:c34400b71a03713a40f530158cfada3f39d153c6e51356a66147327ca9bd290d
3 | size 3829
4 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/patch_sampling/123_sampled_patches_dim2.png:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:069571e1f18b74321dc579581ca964c73911759f9ddb719a8376d35088028c82
3 | size 3762
4 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/patch_sampling/sampled_to_boundary.npy:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:ffb9909fee3bbfcb069d0a03dc64ea4a024d36b880422a13496299c989e6ffd1
3 | size 18128
4 |
--------------------------------------------------------------------------------
/azure-pipelines/azureml-conda-environment.yml:
--------------------------------------------------------------------------------
1 | name: AzureML_SDK
2 | channels:
3 | - defaults
4 | dependencies:
5 | - pip=20.1.1
6 | - python=3.7.3
7 | - pip:
8 | - azureml-sdk==1.36.0
9 |
--------------------------------------------------------------------------------
/docs/source/images/screenshot_azureml_patch_sampling.png:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:3d435820b637b59e1a2b8db8567749f4bf69b69cd075fece87065992ae0a364f
3 | size 288927
4 |
--------------------------------------------------------------------------------
/docs/source/rst/api/ML/configs/other.rst:
--------------------------------------------------------------------------------
1 | Other configs
2 | ==========================
3 |
4 | FastMRT Varnet
5 | ----------------
6 |
7 | .. automodule:: InnerEye.ML.configs.other.fastmri_varnet
8 |
--------------------------------------------------------------------------------
/RegressionTestResults/PR_Train2Nodes/OUTPUT/best_validation_epoch/Test/BaselineComparisonWilcoxonSignedRankTestResults.txt:
--------------------------------------------------------------------------------
1 | There were not enough data points for any statistically meaningful comparisons.
2 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/patch_sampling/overlay_with_aspect_dim0.png:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:3eac7b6a239f91243b00aa2b90ae3c0396cadd8128104c47eda95ec58eb257a9
3 | size 27756
4 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/patch_sampling/overlay_with_aspect_dim1.png:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:3855d6aacb25a8d3c35922cd8543344d59586d8b64db26ae250571656ba4cd43
3 | size 15490
4 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/patch_sampling/overlay_with_aspect_dim2.png:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:4051ab32986bb78bb3239f0273b5fd034310206d10f652accf75419741dd9055
3 | size 13173
4 |
--------------------------------------------------------------------------------
/RegressionTestResults/PR_BasicModel2Epochs/OUTPUT/best_validation_epoch/Test/BaselineComparisonWilcoxonSignedRankTestResults.txt:
--------------------------------------------------------------------------------
1 | There were not enough data points for any statistically meaningful comparisons.
2 |
--------------------------------------------------------------------------------
/RegressionTestResults/PR_TrainEnsemble/AZUREML_PARENT_OUTPUT/CrossValResults/CrossValidationWilcoxonSignedRankTestResults.txt:
--------------------------------------------------------------------------------
1 | There were not enough data points for any statistically meaningful comparisons.
2 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/train_and_test_data/checkpoints/1_checkpoint.pth.tar:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:ae356410e26a823f892277d6f750d84340d3b4be76d539aff1044c9d63d33ecf
3 | size 32854
4 |
--------------------------------------------------------------------------------
/docs/source/rst/api/ML/configs/regression.rst:
--------------------------------------------------------------------------------
1 | Regression Configs
2 | ===================
3 |
4 | Dummy Regression
5 | -----------------
6 |
7 | .. automodule:: InnerEye.ML.configs.regression.DummyRegression
8 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/train_and_test_data/metrics.csv:
--------------------------------------------------------------------------------
1 | Patient,Structure,Dice,HausdorffDistance_mm,MeanDistance_mm
2 | 1,region,0.666,1.732,18446742974197923840.000
3 | 2,region,0.666,2.000,18446742974197923840.000
4 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/classification_data/4be9beed-5861-fdd2-72c2-8dd89aadc1ef.h5:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:c985cfbaf8cc0c25fd783f4ecfc7703bbf9c7f514d644720fbc5aef5299df50b
3 | size 14104
4 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/classification_data/61bc9d73-9fbb-bd7d-c06b-eeffbafabcc4.h5:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:db19afc8979482a76252b4c332ac7a350148c5be9c81ea900e3f9fe2b018a4d7
3 | size 14104
4 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/classification_data/6ceacaf8-abd2-ffec-2ade-d52afd6dd1be.h5:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:0bcc11c3e2f7a0abb81d0f6d6aae619274417e4982303e70db4f8700e0c02e29
3 | size 14104
4 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/hdf5_data/patient_hdf5s/4be9beed-5861-fdd2-72c2-8dd89aadc1ef.h5:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:c985cfbaf8cc0c25fd783f4ecfc7703bbf9c7f514d644720fbc5aef5299df50b
3 | size 14104
4 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/hdf5_data/patient_hdf5s/61bc9d73-9fbb-bd7d-c06b-eeffbafabcc4.h5:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:db19afc8979482a76252b4c332ac7a350148c5be9c81ea900e3f9fe2b018a4d7
3 | size 14104
4 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/hdf5_data/patient_hdf5s/6ceacaf8-abd2-ffec-2ade-d52afd6dd1be.h5:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:0bcc11c3e2f7a0abb81d0f6d6aae619274417e4982303e70db4f8700e0c02e29
3 | size 14104
4 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/hdf5_data/patient_hdf5s/75eb4c55-debe-f906-fdff-a8ca3faf1ac5.h5:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:4fee53e0a253dc63924680e8f7e6a21ed9bb4143a8f6870cd506547c613e15aa
3 | size 14104
4 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/hdf5_data/patient_hdf5s/a46ebddd-b85f-de45-d8e3-9cdb10dbaf5f.h5:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:0d326f529137596668988fffdf4451eff60012428e98243d3b148c328d802550
3 | size 14104
4 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/hdf5_data/patient_hdf5s/b3200426-1a58-bfea-4aba-cbacbe66ea5e.h5:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:323b18089acb88eed1f8a4fc3de5749544ff9f96a4d44151e770980bcde415b1
3 | size 14104
4 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/hdf5_data/patient_hdf5s/cacbf4ab-dcbf-bf1e-37bc-f1c4a5aecef1.h5:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:201b000ff2c5fa8d4fecb74af58fd7d87d0456f349cfb32566012814dd9ed471
3 | size 14104
4 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/hdf5_data/patient_hdf5s/d316cfe5-e62a-3c0e-afda-72c3cf5ea2d8.h5:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:86a5105935fafabc672b51dbb6f4108eb6a9d386622fdd4bff190d9372f08bc2
3 | size 14104
4 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/hdf5_data/patient_hdf5s/e7a8c8c6-ae38-e42c-cdc4-a035c67ea122.h5:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:3f97b6b011227264e99f067dd86fe306b514d715470a1ea2851212647924e654
3 | size 14104
4 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/hdf5_data/patient_hdf5s/fa1fcda8-bbad-d4c1-1235-1a6631abec5a.h5:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:51b0cdf36b025da8ef0f3049f7fdefb17e15181291783e588c994eea71bdf9e3
3 | size 14104
4 |
--------------------------------------------------------------------------------
/RegressionTestResults/PR_BasicModel2Epochs/OUTPUT/patch_sampling/0_sampled_patches_dim2.png:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:f455962fa4a91862f6b00ace2a8b349880e9ce3d3fa71e1969bd7a4a54762284
3 | size 95776
4 |
--------------------------------------------------------------------------------
/.flake8:
--------------------------------------------------------------------------------
1 | [flake8]
2 | ignore = E226,E302,E41,W391, E701, W291, E722, W503, E128, E126, E127, E731, E401, E402
3 | max-line-length = 160
4 | max-complexity = 25
5 | exclude = fastMRI/ test_outputs/ hi-ml/
6 | min_python_version = 3.7
7 |
--------------------------------------------------------------------------------
/RegressionTestResults/PR_BasicModel2Epochs/OUTPUT/best_validation_epoch/Test/metrics.csv:
--------------------------------------------------------------------------------
1 | Patient,Structure,Dice,HausdorffDistance_mm,MeanDistance_mm
2 | 5,lung_l,0.000,inf,inf
3 | 5,lung_r,0.000,inf,inf
4 | 5,spinalcord,0.000,inf,inf
5 |
--------------------------------------------------------------------------------
/RegressionTestResults/PR_BasicModel2Epochs/OUTPUT/best_validation_epoch/Test/metrics_boxplot.png:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:251cce95dc7361a4b4d750f3709d321dcf0a6cb70ec51b1b6ae40c0d54533006
3 | size 3324
4 |
--------------------------------------------------------------------------------
/RegressionTestResults/PR_BasicModel2Epochs/OUTPUT/best_validation_epoch/Val/metrics.csv:
--------------------------------------------------------------------------------
1 | Patient,Structure,Dice,HausdorffDistance_mm,MeanDistance_mm
2 | 2,lung_l,0.000,inf,inf
3 | 2,lung_r,0.000,inf,inf
4 | 2,spinalcord,0.000,inf,inf
5 |
--------------------------------------------------------------------------------
/RegressionTestResults/PR_BasicModel2Epochs/OUTPUT/best_validation_epoch/Val/metrics_boxplot.png:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:251cce95dc7361a4b4d750f3709d321dcf0a6cb70ec51b1b6ae40c0d54533006
3 | size 3324
4 |
--------------------------------------------------------------------------------
/RegressionTestResults/PR_BasicModel2Epochs/OUTPUT/patch_sampling/0_sampled_patches_dim0.png:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:2933d1c3b0f9bacb70b0b21cb948f7f84fb08a8e8379c8b3d420b521f65fa1c6
3 | size 115733
4 |
--------------------------------------------------------------------------------
/RegressionTestResults/PR_BasicModel2Epochs/OUTPUT/patch_sampling/0_sampled_patches_dim1.png:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:4e6ccc0b93516e0166ed69807f5741f4b7b0701937a242c0d0f744a0fb482e6c
3 | size 117568
4 |
--------------------------------------------------------------------------------
/.idea/copyright/profiles_settings.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/RegressionTestResults/PR_TrainEnsemble/AZUREML_PARENT_OUTPUT/CrossValResults/Test_outliers.txt:
--------------------------------------------------------------------------------
1 |
2 |
3 | === METRIC: Dice ===
4 |
5 | No outliers found
6 |
7 | === METRIC: HausdorffDistance_mm ===
8 |
9 | No outliers found
10 |
--------------------------------------------------------------------------------
/RegressionTestResults/PR_Train2Nodes/OUTPUT/best_validation_epoch/Test/scatterplots/Single_vs_CURRENT.png:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:527930cc54b6e3581f87da7b9056b005cccd7c242f7afcc7382b73690d98f49d
3 | size 56806
4 |
--------------------------------------------------------------------------------
/RegressionTestResults/PR_TrainEnsemble/AZUREML_PARENT_OUTPUT/CrossValResults/0/Test/metrics.csv:
--------------------------------------------------------------------------------
1 | Patient,Structure,Dice,HausdorffDistance_mm,MeanDistance_mm
2 | 5,lung_l,0.000,inf,inf
3 | 5,lung_r,0.000,inf,inf
4 | 5,spinalcord,0.000,inf,inf
5 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/classification_data_multiclass/dataset.csv:
--------------------------------------------------------------------------------
1 | ID,channel,path,label
2 | S1,blue,1_blue.png,1|2|3
3 | S1,green,1_green.png,
4 | S2,blue,1_blue.png,2|3
5 | S2,green,1_green.png,
6 | S3,blue,1_blue.png,3
7 | S3,green,1_green.png,
8 |
--------------------------------------------------------------------------------
/RegressionTestResults/PR_BasicModel2Epochs/OUTPUT/best_validation_epoch/Test/scatterplots/Single_vs_CURRENT.png:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:527930cc54b6e3581f87da7b9056b005cccd7c242f7afcc7382b73690d98f49d
3 | size 56806
4 |
--------------------------------------------------------------------------------
/RegressionTestResults/PR_HelloContainer/AZUREML_OUTPUT/final_model/model_inference_config.json:
--------------------------------------------------------------------------------
1 | {"model_name": "HelloContainer", "checkpoint_paths": ["checkpoints/last.ckpt"], "model_configs_namespace": "InnerEye.ML.configs.other.HelloContainer"}
2 |
--------------------------------------------------------------------------------
/RegressionTestResults/PR_TrainEnsemble/AZUREML_PARENT_OUTPUT/CrossValResults/1/Test/metrics.csv:
--------------------------------------------------------------------------------
1 | Patient,Structure,Dice,HausdorffDistance_mm,MeanDistance_mm
2 | 5,lung_l,0.000,inf,inf
3 | 5,lung_r,0.000,inf,inf
4 | 5,spinalcord,0.000,169.770,108.318
5 |
--------------------------------------------------------------------------------
/RegressionTestResults/PR_BasicModel2Epochs/OUTPUT/best_validation_epoch/Test/thumbnails/005_lung_l_slice_053.png:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:8ae0ef1e805f46fb276840cf3cc46132cf5dcda9668bbec5888b2a6e82cdc433
3 | size 103335
4 |
--------------------------------------------------------------------------------
/RegressionTestResults/PR_BasicModel2Epochs/OUTPUT/best_validation_epoch/Test/thumbnails/005_lung_r_slice_037.png:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:fcc084a11f2de22a985fd0dbcca87c6ad8f5f8f8f9a108f47c5ef9dc74dc8e83
3 | size 108918
4 |
--------------------------------------------------------------------------------
/RegressionTestResults/PR_BasicModel2Epochs/OUTPUT/best_validation_epoch/Val/thumbnails/002_lung_l_slice_069.png:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:9e7f78ec91e5d62c018c9ebc204d3d9db014c2e15212742cb4cd1d5fff781758
3 | size 129932
4 |
--------------------------------------------------------------------------------
/RegressionTestResults/PR_BasicModel2Epochs/OUTPUT/best_validation_epoch/Val/thumbnails/002_lung_r_slice_052.png:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:5e2801e5701536f59a55c309c19609b0c16443784bdef7f5682c49f2ab2500dc
3 | size 133198
4 |
--------------------------------------------------------------------------------
/RegressionTestResults/PR_BasicModel2Epochs/OUTPUT/best_validation_epoch/Val/thumbnails/002_spinalcord_slice_113.png:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:8345742f6a66d89108d6b2858819939278d4cddb6beb00371459cac58293ec0b
3 | size 123327
4 |
--------------------------------------------------------------------------------
/RegressionTestResults/PR_TrainEnsemble/AZUREML_PARENT_OUTPUT/CrossValResults/ENSEMBLE/Test/metrics.csv:
--------------------------------------------------------------------------------
1 | Patient,Structure,Dice,HausdorffDistance_mm,MeanDistance_mm
2 | 5,lung_l,0.000,103.189,38.122
3 | 5,lung_r,0.000,inf,inf
4 | 5,spinalcord,0.000,inf,inf
5 |
--------------------------------------------------------------------------------
/RegressionTestResults/PR_TrainEnsemble/AZUREML_PARENT_OUTPUT/best_validation_epoch/Test/metrics.csv:
--------------------------------------------------------------------------------
1 | Patient,Structure,Dice,HausdorffDistance_mm,MeanDistance_mm
2 | 5,lung_l,0.000,103.189,38.122
3 | 5,lung_r,0.000,inf,inf
4 | 5,spinalcord,0.000,inf,inf
5 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/classification_data_2d/dataset.csv:
--------------------------------------------------------------------------------
1 | subjectID,channel,path,value
2 | S1,image,im1.npy
3 | S1,label,,True
4 | S2,image,im2.npy
5 | S2,label,,True
6 | S3,image,im1.npy
7 | S3,label,,False
8 | S4,image,im2.npy
9 | S4,label,,False
10 |
--------------------------------------------------------------------------------
/docs/source/rst/api/ML/core/common.rst:
--------------------------------------------------------------------------------
1 | Common
2 | =======
3 |
4 | .. automodule:: InnerEye.ML.run_ml
5 |
6 | .. automodule:: InnerEye.ML.runner
7 |
8 | .. automodule:: InnerEye.ML.common
9 |
10 | .. automodule:: InnerEye.ML.baselines_util
11 |
--------------------------------------------------------------------------------
/RegressionTestResults/PR_BasicModel2Epochs/OUTPUT/best_validation_epoch/Test/005/background.nii.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/microsoft/InnerEye-DeepLearning/HEAD/RegressionTestResults/PR_BasicModel2Epochs/OUTPUT/best_validation_epoch/Test/005/background.nii.gz
--------------------------------------------------------------------------------
/RegressionTestResults/PR_BasicModel2Epochs/OUTPUT/best_validation_epoch/Test/thumbnails/005_spinalcord_slice_088.png:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:0d0ec9d8c878f4472d10414907a6cfd589a03884214ef92412234410523fd56a
3 | size 102433
4 |
--------------------------------------------------------------------------------
/RegressionTestResults/PR_BasicModel2Epochs/OUTPUT/best_validation_epoch/Val/002/background.nii.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/microsoft/InnerEye-DeepLearning/HEAD/RegressionTestResults/PR_BasicModel2Epochs/OUTPUT/best_validation_epoch/Val/002/background.nii.gz
--------------------------------------------------------------------------------
/docs/source/rst/api/ML/core/visualization.rst:
--------------------------------------------------------------------------------
1 | Visualization
2 | ==============
3 |
4 | .. automodule:: InnerEye.ML.plotting
5 |
6 | .. automodule:: InnerEye.ML.normalize_and_visualize_dataset
7 |
8 | .. automodule:: InnerEye.ML.surface_distance_heatmaps
9 |
--------------------------------------------------------------------------------
/RegressionTestResults/PR_BasicModel2Epochs/AZUREML_OUTPUT/final_model/model_inference_config.json:
--------------------------------------------------------------------------------
1 | {"model_name": "BasicModel2Epochs", "checkpoint_paths": ["checkpoints/last.ckpt"], "model_configs_namespace": "InnerEye.ML.configs.segmentation.BasicModel2Epochs"}
2 |
--------------------------------------------------------------------------------
/RegressionTestResults/PR_BasicModel2Epochs/OUTPUT/best_validation_epoch/Val/002/posterior_lung_r.nii.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/microsoft/InnerEye-DeepLearning/HEAD/RegressionTestResults/PR_BasicModel2Epochs/OUTPUT/best_validation_epoch/Val/002/posterior_lung_r.nii.gz
--------------------------------------------------------------------------------
/RegressionTestResults/PR_TrainEnsemble/AZUREML_PARENT_OUTPUT/best_validation_epoch/Test/thumbnails/005_spinalcord_slice_088.png:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:0d0ec9d8c878f4472d10414907a6cfd589a03884214ef92412234410523fd56a
3 | size 102433
4 |
--------------------------------------------------------------------------------
/.idea/other.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/RegressionTestResults/PR_BasicModel2Epochs/OUTPUT/best_validation_epoch/Test/005/posterior_lung_l.nii.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/microsoft/InnerEye-DeepLearning/HEAD/RegressionTestResults/PR_BasicModel2Epochs/OUTPUT/best_validation_epoch/Test/005/posterior_lung_l.nii.gz
--------------------------------------------------------------------------------
/azure-pipelines/train.yaml:
--------------------------------------------------------------------------------
1 | name: train-$(Date:yyyyMMdd)$(Rev:-r)
2 | jobs:
3 | - job: Train
4 | pool:
5 | vmImage: 'ubuntu-16.04'
6 | steps:
7 | - template: train_template.yml
8 | parameters:
9 | wait_for_completion: 'False'
10 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/config.yml:
--------------------------------------------------------------------------------
1 | blank_issues_enabled: true
2 | contact_links:
3 | - name: ❓ Discussions tab
4 | url: https://github.com/microsoft/InnerEye-DeepLearning/discussions/new
5 | about: Ask and answer InnerEye-related questions or start a general discussion
6 |
--------------------------------------------------------------------------------
/RegressionTestResults/PR_TrainEnsemble/AZUREML_PARENT_OUTPUT/best_validation_epoch/Test/005/posterior_lung_r.nii.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/microsoft/InnerEye-DeepLearning/HEAD/RegressionTestResults/PR_TrainEnsemble/AZUREML_PARENT_OUTPUT/best_validation_epoch/Test/005/posterior_lung_r.nii.gz
--------------------------------------------------------------------------------
/docs/source/rst/api/Scripts/scripts.rst:
--------------------------------------------------------------------------------
1 | Scripts
2 | =======
3 |
4 | Please see links below for details on the available arguments for each of the primary InnerEye scripts.
5 |
6 | .. toctree::
7 | :maxdepth: 1
8 |
9 | runner
10 | submit_for_inference
11 | move_model
12 |
--------------------------------------------------------------------------------
/RegressionTestResults/PR_GlaucomaCV/AZUREML_PARENT_OUTPUT/final_ensemble_model/model_inference_config.json:
--------------------------------------------------------------------------------
1 | {"model_name": "GlaucomaPublic", "checkpoint_paths": ["checkpoints/OTHER_RUNS/1/last.ckpt", "checkpoints/last.ckpt"], "model_configs_namespace": "InnerEye.ML.configs.classification.GlaucomaPublic"}
2 |
--------------------------------------------------------------------------------
/azure-pipelines/store_settings.yml:
--------------------------------------------------------------------------------
1 | steps:
2 | - bash: |
3 | echo -e "variables:\n subscription_id: '$(InnerEyeDevSubscriptionID)'\n application_id: '$(InnerEyeDeepLearningServicePrincipalID)'" > InnerEyePrivateSettings.yml
4 | displayName: Store subscription in InnerEyePrivateSettings.yml
5 |
--------------------------------------------------------------------------------
/docs/source/rst/api/ML/models.rst:
--------------------------------------------------------------------------------
1 | Models
2 | ======
3 |
4 | Below is a list of models and various deep learning components that are available within InnerEye-DeepLearning.
5 |
6 | .. toctree::
7 | models/architectures
8 | models/blocks
9 | models/layers
10 | models/losses
11 |
--------------------------------------------------------------------------------
/docs/source/rst/api/ML/models/losses.rst:
--------------------------------------------------------------------------------
1 | Losses
2 | ======
3 |
4 | .. automodule:: InnerEye.ML.models.losses.cross_entropy
5 |
6 | .. automodule:: InnerEye.ML.models.losses.ece
7 |
8 | .. automodule:: InnerEye.ML.models.losses.mixture
9 |
10 | .. automodule:: InnerEye.ML.models.losses.soft_dice
11 |
--------------------------------------------------------------------------------
/docs/source/rst/api/Scripts/runner.rst:
--------------------------------------------------------------------------------
1 | Runner
2 | ======
3 |
4 | This script is the entry point for running all InnerEye jobs, both locally and in AzureML. See the user guides for example usage.
5 |
6 | .. argparse::
7 | :ref: InnerEye.Azure.azure_runner.create_runner_parser
8 | :prog: runner.py
9 |
--------------------------------------------------------------------------------
/RegressionTestResults/PR_Train2Nodes/OUTPUT/best_validation_epoch/Val/metrics.csv:
--------------------------------------------------------------------------------
1 | Patient,Structure,Dice,HausdorffDistance_mm,MeanDistance_mm
2 | 8,lung_l,0.000,inf,inf
3 | 9,lung_l,0.000,inf,inf
4 | 8,lung_r,0.000,inf,inf
5 | 9,lung_r,0.000,inf,inf
6 | 8,spinalcord,0.000,inf,inf
7 | 9,spinalcord,0.000,inf,inf
8 |
--------------------------------------------------------------------------------
/docs/source/rst/api/ML/pipelines.rst:
--------------------------------------------------------------------------------
1 | Pipelines
2 | =========
3 |
4 | Inference
5 | ---------
6 |
7 | .. automodule:: InnerEye.ML.pipelines.inference
8 |
9 | .. automodule:: InnerEye.ML.pipelines.scalar_inference
10 |
11 | Enesemble
12 | ---------
13 | .. automodule:: InnerEye.ML.pipelines.ensemble
14 |
--------------------------------------------------------------------------------
/mypy.ini:
--------------------------------------------------------------------------------
1 | [mypy]
2 | python_version=3.7
3 | scripts_are_modules=True
4 | namespace_packages=True
5 | show_traceback=True
6 | ignore_missing_imports=True
7 | follow_imports=silent
8 | follow_imports_for_stubs=True
9 | disallow_untyped_calls=False
10 | disallow_untyped_defs=True
11 | strict_optional=True
12 |
--------------------------------------------------------------------------------
/RegressionTestResults/PR_TrainEnsemble/AZUREML_PARENT_OUTPUT/final_ensemble_model/model_inference_config.json:
--------------------------------------------------------------------------------
1 | {"model_name": "BasicModelForEnsembleTest", "checkpoint_paths": ["checkpoints/OTHER_RUNS/1/last.ckpt", "checkpoints/last.ckpt"], "model_configs_namespace": "InnerEye.ML.configs.segmentation.BasicModel2Epochs"}
2 |
--------------------------------------------------------------------------------
/InnerEye-DataQuality/README.md:
--------------------------------------------------------------------------------
1 | The InnerEye-DataQuality code has been moved here: [https://github.com/microsoft/InnerEye-DeepLearning/tree/1606729c7a16e1bfeb269694314212b6e2737939/InnerEye-DataQuality](https://github.com/microsoft/InnerEye-DeepLearning/tree/1606729c7a16e1bfeb269694314212b6e2737939/InnerEye-DataQuality)
2 |
--------------------------------------------------------------------------------
/docs/source/rst/api/ML/core.rst:
--------------------------------------------------------------------------------
1 | ML Core
2 | ================
3 |
4 | .. toctree::
5 | :caption: Key Components in running InnerEye-DeepLearning
6 |
7 | core/common
8 | core/lightning
9 | core/training
10 | core/testing
11 | core/configs
12 | core/metrics
13 | core/visualization
14 |
--------------------------------------------------------------------------------
/docs/source/rst/api/ML/models/layers.rst:
--------------------------------------------------------------------------------
1 | Layers
2 | ======
3 |
4 | .. automodule:: InnerEye.ML.models.layers.basic
5 |
6 | .. automodule:: InnerEye.ML.models.layers.identity
7 |
8 | .. automodule:: InnerEye.ML.models.layers.pooling_layers
9 |
10 | .. automodule:: InnerEye.ML.models.layers.weight_standardization
11 |
--------------------------------------------------------------------------------
/docs/source/rst/api/Scripts/submit_for_inference.rst:
--------------------------------------------------------------------------------
1 | Submit for Inference
2 | ====================
3 |
4 | This script is used to submit a single image for inference against a trained model.
5 |
6 | .. argparse::
7 | :ref: InnerEye.Scripts.submit_for_inference.get_submit_for_inference_parser
8 | :prog: submit_for_inference.py
9 |
--------------------------------------------------------------------------------
/.idea/modules.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
--------------------------------------------------------------------------------
/.idea/vcs.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
--------------------------------------------------------------------------------
/docs/source/rst/api/ML/core/configs.rst:
--------------------------------------------------------------------------------
1 | Configs
2 | =======
3 |
4 | .. automodule:: InnerEye.ML.config
5 |
6 | .. automodule:: InnerEye.ML.model_config_base
7 |
8 | .. automodule:: InnerEye.ML.deep_learning_config
9 |
10 | .. automodule:: InnerEye.ML.scalar_config
11 |
12 | .. automodule:: InnerEye.ML.model_inference_config
13 |
--------------------------------------------------------------------------------
/docs/source/rst/api/ML/SSL/lightning_containers.rst:
--------------------------------------------------------------------------------
1 | SSL Lightning Containers
2 | ========================
3 |
4 | Container
5 | ---------
6 |
7 | .. automodule:: InnerEye.ML.SSL.lightning_containers.ssl_container
8 |
9 | Image Classifier
10 | ----------------
11 |
12 | .. automodule:: InnerEye.ML.SSL.lightning_containers.ssl_image_classifier
13 |
--------------------------------------------------------------------------------
/docs/source/rst/api/ML/configs/classification.rst:
--------------------------------------------------------------------------------
1 | Classification Configs
2 | =============================
3 |
4 | COVID Model
5 | -----------
6 |
7 | .. autoclass:: InnerEye.ML.configs.classification.CovidModel.CovidModel
8 |
9 | .. autoclass:: InnerEye.ML.configs.classification.CovidModel.DicomPreparation
10 | :special-members: __call__
11 |
--------------------------------------------------------------------------------
/docs/source/rst/api/ML/configs.rst:
--------------------------------------------------------------------------------
1 | Configurations
2 | ==============
3 |
4 | The following is a list of the pre-defined configurations that can be used or inherited and adapted to your own purposes.
5 |
6 | .. toctree::
7 |
8 | configs/classification
9 | configs/regression
10 | configs/segmentation
11 | configs/ssl
12 | configs/other
13 |
--------------------------------------------------------------------------------
/.readthedocs.yaml:
--------------------------------------------------------------------------------
1 | # Read the Docs configuration file
2 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
3 |
4 | version: 2
5 |
6 | build:
7 | os: ubuntu-20.04
8 | tools:
9 | python: miniconda3-4.7
10 |
11 | sphinx:
12 | configuration: docs/source/conf.py
13 |
14 | conda:
15 | environment: primary_deps.yml
16 |
--------------------------------------------------------------------------------
/docs/source/rst/api/ML/SSL.rst:
--------------------------------------------------------------------------------
1 | Self-Supervised Learning (SSL)
2 | ==============================
3 |
4 | Encoder
5 | -------
6 |
7 | .. automodule:: InnerEye.ML.SSL.encoders
8 |
9 | All Other SSL Components
10 | ------------------------
11 |
12 | .. toctree::
13 | SSL/lightning_containers
14 | SSL/lightning_modules
15 | SSL/datamodules_and_datasets
16 | SSL/utils
17 |
--------------------------------------------------------------------------------
/InnerEye/settings.yml:
--------------------------------------------------------------------------------
1 | variables:
2 | tenant_id: '72f988bf-86f1-41af-91ab-2d7cd011db47'
3 | subscription_id: ''
4 | application_id: ''
5 | azureml_datastore: 'innereyedatasets'
6 | resource_group: 'InnerEye-DeepLearning'
7 | docker_shm_size: '440g'
8 | workspace_name: 'InnerEye-DeepLearning'
9 | cluster: 'training-nd24'
10 | model_configs_namespace: ''
11 | extra_code_directory: ''
12 |
--------------------------------------------------------------------------------
/docs/source/rst/api/ML/ML.rst:
--------------------------------------------------------------------------------
1 | Machine learning
2 | ================
3 |
4 | Below you will find the docstrings of all resources found in `InnerEye/ML `_.
5 |
6 |
7 | .. toctree::
8 |
9 | core
10 | models
11 | dataset
12 | augmentations
13 | SSL
14 | configs
15 | utils
16 | pipelines
17 | visualizers
18 |
--------------------------------------------------------------------------------
/RegressionTestResults/PR_BasicModel2Epochs/OUTPUT/best_validation_epoch/Test/metrics_aggregates.csv:
--------------------------------------------------------------------------------
1 | Structure,count,DiceNumeric_mean,DiceNumeric_std,DiceNumeric_min,DiceNumeric_max,HausdorffDistance_mm_mean,HausdorffDistance_mm_std,HausdorffDistance_mm_min,HausdorffDistance_mm_max,MeanDistance_mm_mean,MeanDistance_mm_std,MeanDistance_mm_min,MeanDistance_mm_max
2 | lung_l,1.000,0.000,,0.000,0.000,inf,,inf,inf,inf,,inf,inf
3 |
--------------------------------------------------------------------------------
/RegressionTestResults/PR_BasicModel2Epochs/OUTPUT/best_validation_epoch/Val/metrics_aggregates.csv:
--------------------------------------------------------------------------------
1 | Structure,count,DiceNumeric_mean,DiceNumeric_std,DiceNumeric_min,DiceNumeric_max,HausdorffDistance_mm_mean,HausdorffDistance_mm_std,HausdorffDistance_mm_min,HausdorffDistance_mm_max,MeanDistance_mm_mean,MeanDistance_mm_std,MeanDistance_mm_min,MeanDistance_mm_max
2 | lung_l,1.000,0.000,,0.000,0.000,inf,,inf,inf,inf,,inf,inf
3 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/classification_data/dataset.csv:
--------------------------------------------------------------------------------
1 | subjectID,channel,path,value,scalar1,scalar2
2 | S1,image,4be9beed-5861-fdd2-72c2-8dd89aadc1ef.h5,,,1
3 | S1,label,,True,1.0
4 | S2,image,6ceacaf8-abd2-ffec-2ade-d52afd6dd1be.h5,,,2
5 | S2,label,,True,2.0
6 | S3,image,61bc9d73-9fbb-bd7d-c06b-eeffbafabcc4.h5,,,3
7 | S3,label,,False,3.0
8 | S4,image,61bc9d73-9fbb-bd7d-c06b-eeffbafabcc4.h5,,,4
9 | S4,label,,False,3.0
10 |
--------------------------------------------------------------------------------
/RegressionTestResults/PR_Train2Nodes/OUTPUT/Val/epoch_metrics.csv:
--------------------------------------------------------------------------------
1 | subject_count,loss,Dice/AverageAcrossStructures,Dice/spinalcord,Dice/lung_r,Dice/lung_l,VoxelCount/spinalcord,VoxelCount/lung_r,VoxelCount/lung_l,epoch,cross_validation_split_index
2 | 4.000000,0.729041,0.000000,0.000000,0.000000,0.000000,0.000000,0.000000,65335.000000,0,-1
3 | 4.000000,0.729027,0.000000,0.000000,0.000000,0.000000,0.000000,0.000000,65335.000000,1,-1
4 |
--------------------------------------------------------------------------------
/RegressionTestResults/PR_TrainEnsemble/OUTPUT/Val/epoch_metrics.csv:
--------------------------------------------------------------------------------
1 | subject_count,loss,Dice/AverageAcrossStructures,Dice/spinalcord,Dice/lung_r,Dice/lung_l,VoxelCount/spinalcord,VoxelCount/lung_r,VoxelCount/lung_l,epoch,cross_validation_split_index
2 | 2.000000,0.715168,0.000000,0.000000,0.000000,0.000000,0.000000,0.000000,94367.500000,0,0
3 | 2.000000,0.715166,0.000000,0.000000,0.000000,0.000000,0.000000,0.000000,94367.500000,1,0
4 |
--------------------------------------------------------------------------------
/RegressionTestResults/PR_BasicModel2Epochs/OUTPUT/Val/epoch_metrics.csv:
--------------------------------------------------------------------------------
1 | subject_count,loss,Dice/AverageAcrossStructures,Dice/spinalcord,Dice/lung_r,Dice/lung_l,VoxelCount/spinalcord,VoxelCount/lung_r,VoxelCount/lung_l,epoch,cross_validation_split_index
2 | 2.000000,0.716739,0.000000,0.000000,0.000000,0.000000,0.000000,0.000000,84282.000000,0,-1
3 | 2.000000,0.716731,0.000000,0.000000,0.000000,0.000000,0.000000,0.000000,84282.000000,1,-1
4 |
--------------------------------------------------------------------------------
/RegressionTestResults/PR_TrainEnsemble/OUTPUT/Train/epoch_metrics.csv:
--------------------------------------------------------------------------------
1 | subject_count,loss,learning_rate,Dice/AverageAcrossStructures,Dice/spinalcord,Dice/lung_r,Dice/lung_l,VoxelCount/spinalcord,VoxelCount/lung_r,VoxelCount/lung_l,epoch,cross_validation_split_index
2 | 2.000000,0.723947,0.000100,0.000000,0.000000,0.000000,0.000000,0.000000,0.000000,148093.000000,0,0
3 | 2.000000,0.768604,0.000090,0.000000,0.000000,0.000000,0.000000,0.000000,86614.000000,0.000000,1,0
4 |
--------------------------------------------------------------------------------
/docs/source/rst/api/ML/augmentations.rst:
--------------------------------------------------------------------------------
1 | Data augmentation
2 | =================
3 |
4 | Image Transformations
5 | ----------------------
6 |
7 | .. automodule:: InnerEye.ML.augmentations.image_transforms
8 |
9 | Transformation Pipeline
10 | -----------------------
11 |
12 | .. automodule:: InnerEye.ML.augmentations.transform_pipeline
13 |
14 | Utils
15 | -----
16 |
17 | .. automodule:: InnerEye.ML.augmentations.augmentation_for_segmentation_utils
18 |
--------------------------------------------------------------------------------
/RegressionTestResults/PR_Train2Nodes/OUTPUT/Train/epoch_metrics.csv:
--------------------------------------------------------------------------------
1 | subject_count,loss,learning_rate,Dice/AverageAcrossStructures,Dice/spinalcord,Dice/lung_r,Dice/lung_l,VoxelCount/spinalcord,VoxelCount/lung_r,VoxelCount/lung_l,epoch,cross_validation_split_index
2 | 4.000000,0.716914,0.000100,0.000000,0.000000,0.000000,0.000000,0.000000,0.000000,99342.000000,0,-1
3 | 4.000000,0.773825,0.000090,0.000000,0.000000,0.000000,0.000000,181.250000,83803.250000,122.250000,1,-1
4 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/train_and_test_data/metrics_aggregates.csv:
--------------------------------------------------------------------------------
1 | Structure,count,DiceNumeric_mean,DiceNumeric_std,DiceNumeric_min,DiceNumeric_max,HausdorffDistance_mm_mean,HausdorffDistance_mm_std,HausdorffDistance_mm_min,HausdorffDistance_mm_max,MeanDistance_mm_mean,MeanDistance_mm_std,MeanDistance_mm_min,MeanDistance_mm_max
2 | region,2.000,0.666,0.000,0.666,0.666,1.866,0.190,1.732,2.000,18446742974197923840.000,0.000,18446742974197923840.000,18446742974197923840.000
3 |
--------------------------------------------------------------------------------
/RegressionTestResults/PR_BasicModel2Epochs/OUTPUT/Train/epoch_metrics.csv:
--------------------------------------------------------------------------------
1 | subject_count,loss,learning_rate,Dice/AverageAcrossStructures,Dice/spinalcord,Dice/lung_r,Dice/lung_l,VoxelCount/spinalcord,VoxelCount/lung_r,VoxelCount/lung_l,epoch,cross_validation_split_index
2 | 2.000000,0.718717,0.000100,0.000000,0.000000,0.000000,0.000000,0.000000,0.000000,126273.000000,0,-1
3 | 2.000000,0.775692,0.000090,0.000000,0.000000,0.000000,0.000000,0.000000,84030.000000,0.000000,1,-1
4 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/plot_cross_validation/multi_label_sequence_in_crossval/0/Val/metrics.csv:
--------------------------------------------------------------------------------
1 | prediction_target,epoch,subject,model_output,label,cross_validation_split_index,data_split
2 | 01,1,2137.00005,0.5434974431991577,1.0,0,Val
3 | 01,1,3250.00005,0.5082249641418457,0.0,0,Val
4 | 02,1,2137.00005,0.5553834438323975,1.0,0,Val
5 | 02,1,3250.00005,0.4725574851036072,0.0,0,Val
6 | 03,1,2137.00005,0.5667034983634949,1.0,0,Val
7 | 03,1,3250.00005,0.4632112383842468,0.0,0,Val
8 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/plot_cross_validation/multi_label_sequence_in_crossval/1/Val/metrics.csv:
--------------------------------------------------------------------------------
1 | prediction_target,epoch,subject,model_output,label,cross_validation_split_index,data_split
2 | 01,1,3250.12345,0.47584840655326843,0.0,1,Val
3 | 01,1,2137.00125,0.5432486534118652,0.0,1,Val
4 | 02,1,3250.12345,0.4699607789516449,0.0,1,Val
5 | 02,1,2137.00125,0.5575994253158569,0.0,1,Val
6 | 03,1,3250.12345,0.47309622168540955,0.0,1,Val
7 | 03,1,2137.00125,0.5700393915176392,0.0,1,Val
8 |
--------------------------------------------------------------------------------
/docs/README.md:
--------------------------------------------------------------------------------
1 | # Building docs for InnerEye-DeepLearning
2 |
3 | 1. First, make sure you have set up your conda environment as described in the [Quick Setup Guide](../README.md#quick-setup).
4 | 2. Run `make html` from the `docs` folder. This will create html files under docs/build/html.
5 | 3. From the `docs/build/html` folder, run `python -m http.server 8080` to host the docs locally.
6 | 4. From your browser, navigate to `http://localhost:8080` to view the documentation.
7 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/metrics_aggregates.csv:
--------------------------------------------------------------------------------
1 | Structure,count,DiceNumeric_mean,DiceNumeric_std,DiceNumeric_min,DiceNumeric_max,HausdorffDistance_mm_mean,HausdorffDistance_mm_std,HausdorffDistance_mm_min,HausdorffDistance_mm_max,MeanDistance_mm_mean,MeanDistance_mm_std,MeanDistance_mm_min,MeanDistance_mm_max
2 | kidney,2.000,0.550,0.212,0.400,0.700,1.000,0.000,1.000,1.000,0.150,0.071,0.100,0.200
3 | liver,3.000,0.733,0.306,0.400,1.000,1.000,0.000,1.000,1.000,0.400,0.100,0.300,0.500
4 |
--------------------------------------------------------------------------------
/docs/source/rst/api/ML/configs/ssl.rst:
--------------------------------------------------------------------------------
1 | Self-Supervised Learning Configs
2 | ================================
3 |
4 | CIFAR
5 | -----
6 |
7 | .. automodule:: InnerEye.ML.configs.ssl.CIFAR_classifier_configs
8 |
9 | .. automodule:: InnerEye.ML.configs.ssl.CIFAR_SSL_configs
10 |
11 | COVID
12 | -----
13 |
14 | .. automodule:: InnerEye.ML.configs.ssl.CovidContainers
15 |
16 | Chest X-Rays
17 | ------------
18 |
19 | .. automodule:: InnerEye.ML.configs.ssl.CXR_SSL_configs
20 |
--------------------------------------------------------------------------------
/docs/source/rst/api/ML/core/lightning.rst:
--------------------------------------------------------------------------------
1 | Lightning Components
2 | ================================
3 |
4 | .. automodule:: InnerEye.ML.lightning_base
5 | :exclude-members: configure_optimizers
6 |
7 | .. automodule:: InnerEye.ML.lightning_container
8 |
9 | .. automodule:: InnerEye.ML.lightning_models
10 |
11 | .. automodule:: InnerEye.ML.lightning_helpers
12 |
13 | .. automodule:: InnerEye.ML.lightning_loggers
14 |
15 | .. automodule:: InnerEye.ML.lightning_metrics
16 |
--------------------------------------------------------------------------------
/RegressionTestResults/PR_Train2Nodes/OUTPUT/best_validation_epoch/Test/metrics.csv:
--------------------------------------------------------------------------------
1 | Patient,Structure,Dice,HausdorffDistance_mm,MeanDistance_mm
2 | 4,lung_l,0.000,inf,inf
3 | 5,lung_l,0.000,inf,inf
4 | 6,lung_l,0.000,inf,inf
5 | 7,lung_l,0.000,inf,inf
6 | 4,lung_r,0.000,inf,inf
7 | 5,lung_r,0.000,inf,inf
8 | 6,lung_r,0.000,inf,inf
9 | 7,lung_r,0.000,inf,inf
10 | 4,spinalcord,0.000,inf,inf
11 | 5,spinalcord,0.000,inf,inf
12 | 6,spinalcord,0.000,inf,inf
13 | 7,spinalcord,0.000,inf,inf
14 |
--------------------------------------------------------------------------------
/CODE_OF_CONDUCT.md:
--------------------------------------------------------------------------------
1 | # Microsoft Open Source Code of Conduct
2 |
3 | This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
4 |
5 | Resources:
6 |
7 | - [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/)
8 | - [Microsoft Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/)
9 | - Contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with questions or concerns
10 |
--------------------------------------------------------------------------------
/Tests/ML/reports/val_metrics_classification.csv:
--------------------------------------------------------------------------------
1 | prediction_target,epoch,subject,model_output,label,cross_validation_split_index,data_split
2 | Default,0,0,0.2,1,-1,Val
3 | Default,0,1,0.6,1,-1,Val
4 | Default,0,2,0.7,1,-1,Val
5 | Default,0,3,0.8,1,-1,Val
6 | Default,0,4,0.9,1,-1,Val
7 | Default,0,5,1.0,1,-1,Val
8 | Default,0,6,0.0,0,-1,Val
9 | Default,0,7,0.1,0,-1,Val
10 | Default,0,8,0.2,0,-1,Val
11 | Default,0,9,0.3,0,-1,Val
12 | Default,0,10,0.4,0,-1,Val
13 | Default,0,11,0.8,0,-1,Val
14 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/test_aggregate_metrics_classification/Val/metrics.csv.rank0:
--------------------------------------------------------------------------------
1 | epoch,subject,prediction_target,model_output,label,data_split,cross_validation_split_index
2 | 0,S5,Default,0.092429,0.000000,Val,1
3 | 0,S7,Default,0.095273,0.000000,Val,1
4 | 1,S5,Default,0.091557,0.000000,Val,1
5 | 1,S7,Default,0.094306,0.000000,Val,1
6 | 2,S5,Default,0.090680,0.000000,Val,1
7 | 2,S7,Default,0.093331,0.000000,Val,1
8 | 3,S7,Default,0.092348,0.000000,Val,1
9 | 3,S5,Default,0.089795,0.000000,Val,1
10 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/test_aggregate_metrics_classification/Val/metrics.csv.rank1:
--------------------------------------------------------------------------------
1 | epoch,subject,prediction_target,model_output,label,data_split,cross_validation_split_index
2 | 0,S6,Default,0.192429,0.000000,Val,1
3 | 0,S8,Default,0.195273,0.000000,Val,1
4 | 1,S6,Default,0.191557,0.000000,Val,1
5 | 1,S8,Default,0.194306,0.000000,Val,1
6 | 2,S6,Default,0.190680,0.000000,Val,1
7 | 2,S8,Default,0.193331,0.000000,Val,1
8 | 3,S6,Default,0.192348,0.000000,Val,1
9 | 3,S8,Default,0.189795,0.000000,Val,1
10 |
--------------------------------------------------------------------------------
/Tests/ML/reports/test_metrics_classification.csv:
--------------------------------------------------------------------------------
1 | prediction_target,epoch,subject,model_output,label,cross_validation_split_index,data_split
2 | Default,0,0,0.0,1,-1,Test
3 | Default,0,1,0.2,1,-1,Test
4 | Default,0,2,0.4,1,-1,Test
5 | Default,0,3,0.6,1,-1,Test
6 | Default,0,4,0.8,1,-1,Test
7 | Default,0,5,1.0,1,-1,Test
8 | Default,0,6,0.0,0,-1,Test
9 | Default,0,7,0.2,0,-1,Test
10 | Default,0,8,0.4,0,-1,Test
11 | Default,0,9,0.6,0,-1,Test
12 | Default,0,10,0.8,0,-1,Test
13 | Default,0,11,1.0,0,-1,Test
14 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/test_aggregate_metrics_classification/Train/metrics.csv.rank0:
--------------------------------------------------------------------------------
1 | epoch,subject,prediction_target,model_output,label,data_split,cross_validation_split_index
2 | 0,S2,Default,0.092429,0.000000,Train,1
3 | 0,S4,Default,0.095273,0.000000,Train,1
4 | 1,S2,Default,0.091557,0.000000,Train,1
5 | 1,S4,Default,0.094306,0.000000,Train,1
6 | 2,S2,Default,0.090680,0.000000,Train,1
7 | 2,S4,Default,0.093331,0.000000,Train,1
8 | 3,S4,Default,0.092348,0.000000,Train,1
9 | 3,S2,Default,0.089795,0.000000,Train,1
10 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/test_aggregate_metrics_classification/Train/metrics.csv.rank1:
--------------------------------------------------------------------------------
1 | epoch,subject,prediction_target,model_output,label,data_split,cross_validation_split_index
2 | 0,S3,Default,0.192429,0.000000,Train,1
3 | 0,S1,Default,0.195273,0.000000,Train,1
4 | 1,S3,Default,0.191557,0.000000,Train,1
5 | 1,S1,Default,0.194306,0.000000,Train,1
6 | 2,S3,Default,0.190680,0.000000,Train,1
7 | 2,S1,Default,0.193331,0.000000,Train,1
8 | 3,S3,Default,0.192348,0.000000,Train,1
9 | 3,S1,Default,0.189795,0.000000,Train,1
10 |
--------------------------------------------------------------------------------
/azure-pipelines/checkout_windows.yml:
--------------------------------------------------------------------------------
1 | steps:
2 | - checkout: self
3 | lfs: true
4 | submodules: true
5 |
6 | - bash: |
7 | subdir=Scripts
8 | echo "Adding this directory to PATH: $CONDA/$subdir"
9 | echo "##vso[task.prependpath]$CONDA/$subdir"
10 | displayName: Add conda to PATH
11 | condition: succeeded()
12 |
13 | - bash: |
14 | conda install conda=4.8.3 -y
15 | conda --version
16 | conda list
17 | displayName: Print conda version and initial package list
18 |
--------------------------------------------------------------------------------
/docs/source/rst/api/ML/dataset.rst:
--------------------------------------------------------------------------------
1 | Dataset
2 | =======
3 |
4 | Full Datasets
5 | --------------
6 |
7 | .. automodule:: InnerEye.ML.dataset.full_image_dataset
8 |
9 | .. automodule:: InnerEye.ML.dataset.sample
10 |
11 | Scalar Datasets
12 | ---------------
13 |
14 | .. automodule:: InnerEye.ML.dataset.scalar_dataset
15 |
16 | .. automodule:: InnerEye.ML.dataset.scalar_sample
17 |
18 | CroppingDataset
19 | ---------------
20 | .. automodule:: InnerEye.ML.dataset.cropping_dataset
21 | :special-members: __getitem__
22 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/plot_cross_validation/multi_label_sequence_in_crossval/0/Val/epoch_metrics.csv:
--------------------------------------------------------------------------------
1 | loss,seconds_per_batch,seconds_per_epoch,learning_rate,cross_entropy,accuracy_at_threshold_05,area_under_roc_curve,area_under_pr_curve,accuracy_at_optimal_threshold,false_positive_rate_at_optimal_threshold,false_negative_rate_at_optimal_threshold,optimal_threshold,subject_count,epoch,cross_validation_split_index
2 | 0.6890372037887573,0.08300209045410156,0.0910027027130127,0.1,0.5950348377227783,1.0,1.0,1.0,0.5,0.0,0.0,0.5667034983634949,2.0,1,0
3 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/plot_cross_validation/multi_label_sequence_in_crossval/1/Val/epoch_metrics.csv:
--------------------------------------------------------------------------------
1 | loss,seconds_per_batch,seconds_per_epoch,learning_rate,cross_entropy,accuracy_at_threshold_05,area_under_roc_curve,area_under_pr_curve,accuracy_at_optimal_threshold,false_positive_rate_at_optimal_threshold,false_negative_rate_at_optimal_threshold,optimal_threshold,subject_count,epoch,cross_validation_split_index
2 | 0.7274554967880249,0.09100222587585449,0.10100007057189941,0.1,0.7423995733261108,0.5,1.0,1.0,1.0,0.0,,1.5700393915176392,2.0,1,1
3 |
--------------------------------------------------------------------------------
/Tests/ML/configs/BasicModel2EpochsOutsidePackage.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------------------------
2 | # Copyright (c) Microsoft Corporation. All rights reserved.
3 | # Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
4 | # ------------------------------------------------------------------------------------------
5 | from InnerEye.ML.configs.segmentation.BasicModel2Epochs import BasicModel2Epochs
6 |
7 |
8 | class BasicModel2EpochsOutsidePackage(BasicModel2Epochs):
9 | pass
10 |
--------------------------------------------------------------------------------
/azure-pipelines/prepare_conda.yml:
--------------------------------------------------------------------------------
1 | steps:
2 | - bash: |
3 | if [ $(Agent.OS) = 'Windows_NT' ]
4 | then subdir=Scripts
5 | else subdir=bin
6 | fi
7 | echo "Adding this directory to PATH: $CONDA/$subdir"
8 | echo "##vso[task.prependpath]$CONDA/$subdir"
9 | displayName: Add conda to PATH
10 | condition: succeeded()
11 |
12 | - bash: |
13 | sudo chown -R $USER /usr/share/miniconda
14 | condition: and(succeeded(), eq( variables['Agent.OS'], 'Linux' ))
15 | displayName: Take ownership of conda installation (Linux only)
16 |
--------------------------------------------------------------------------------
/.vscode/settings.json:
--------------------------------------------------------------------------------
1 | {
2 | "python.analysis.typeCheckingMode": "basic",
3 | "python.testing.pytestArgs": [
4 | "Tests"
5 | ],
6 | "python.testing.unittestEnabled": false,
7 | "python.testing.pytestEnabled": true,
8 | "workbench.colorCustomizations": {
9 | "activityBar.background": "#07338b",
10 | "titleBar.activeBackground": "#07338b",
11 | "titleBar.activeForeground": "#F8FCE5"
12 | },
13 | "files.trimTrailingWhitespace": true,
14 | "files.trimFinalNewlines": true,
15 | "files.insertFinalNewline": true,
16 | }
17 |
--------------------------------------------------------------------------------
/docs/source/rst/api/Azure/azure.rst:
--------------------------------------------------------------------------------
1 | Azure
2 | =====
3 |
4 | Below you will find the documentation for `InnerEye/Azure `_.
5 |
6 |
7 | Runner
8 | ------
9 |
10 | .. automodule:: InnerEye.Azure.azure_runner
11 |
12 |
13 | Configuration
14 | -------------
15 |
16 | .. automodule:: InnerEye.Azure.azure_config
17 |
18 |
19 | Utils
20 | -----
21 |
22 | .. automodule:: InnerEye.Azure.azure_util
23 |
24 | .. automodule:: InnerEye.Azure.parser_util
25 |
26 | .. automodule:: InnerEye.Azure.secrets_handling
27 |
--------------------------------------------------------------------------------
/.idea/copyright/MIT_license.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/Tests/Scripts/script_for_tests.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------------------------
2 | # Copyright (c) Microsoft Corporation. All rights reserved.
3 | # Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
4 | # ------------------------------------------------------------------------------------------
5 | import sys
6 |
7 | """
8 | This is a script that will be invoked from the test suite.
9 | """
10 |
11 | if __name__ == '__main__':
12 | arg = sys.argv[1]
13 | if arg != "correct":
14 | raise ValueError("Not correct")
15 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/classification_data_generated_random/dataset.csv:
--------------------------------------------------------------------------------
1 | subjectID,channel,path,value,date
2 | 1,image,im1.npy,,1970-01-01
3 | 1,label,,False,
4 | 2,image,im2.npy,,1970-01-01
5 | 2,label,,False,
6 | 3,image,im3.npy,,1970-01-01
7 | 3,label,,True,
8 | 4,image,im4.npy,,1970-01-01
9 | 4,label,,True,
10 | 5,image,im1.npy,,1970-01-01
11 | 5,label,,False,
12 | 6,image,im2.npy,,1970-01-01
13 | 6,label,,False,
14 | 7,image,im3.npy,,1970-01-01
15 | 7,label,,True,
16 | 8,image,im4.npy,,1970-01-01
17 | 8,label,,True,
18 | 9,image,im1.npy,,1970-01-01
19 | 9,label,,False,
20 | 10,image,im2.npy,,1970-01-01
21 | 10,label,,False,
22 |
--------------------------------------------------------------------------------
/InnerEye/README.md:
--------------------------------------------------------------------------------
1 | # Microsoft Research Cambridge InnerEyeDeepLearning for Medical Image Analysis
2 |
3 | ## Consuming the InnerEye package
4 |
5 | * You need to have a Conda installation on your machine.
6 | * Create a Conda environment file `environment.yml` in your source code with this contents:
7 |
8 | ```yaml
9 | name: MyEnv
10 | channels:
11 | - defaults
12 | - pytorch
13 | dependencies:
14 | - pip=20.0.2
15 | - python=3.7.3
16 | - pytorch=1.3.0
17 | - pip:
18 | - innereye
19 | ```
20 |
21 | * Create a conda environment: `conda env create --file environment.yml`
22 | * Activate the environment: `conda activate MyEnv`
23 |
--------------------------------------------------------------------------------
/azure-pipelines/build_windows.yaml:
--------------------------------------------------------------------------------
1 | steps:
2 | - template: checkout_windows.yml
3 |
4 | - task: CredScan@3
5 | condition: and(succeeded(), eq( variables['Agent.OS'], 'Windows_NT' ))
6 |
7 | - task: PostAnalysis@1
8 | condition: and(succeeded(), eq( variables['Agent.OS'], 'Windows_NT' ))
9 | displayName: 'Post Analysis'
10 | inputs:
11 | CredScan: true
12 |
13 | - task: ComponentGovernanceComponentDetection@0
14 | condition: succeeded()
15 | inputs:
16 | scanType: 'Register'
17 | verbosity: 'Normal'
18 | alertWarningLevel: 'High'
19 | failOnAlert: false
20 | failOnStderr: true
21 |
--------------------------------------------------------------------------------
/InnerEye/ML/configs/segmentation/BasicModel2Epochs1Channel.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------------------------
2 | # Copyright (c) Microsoft Corporation. All rights reserved.
3 | # Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
4 | # ------------------------------------------------------------------------------------------
5 |
6 | from InnerEye.ML.configs.segmentation.BasicModel2Epochs import BasicModel2Epochs
7 |
8 |
9 | class BasicModel2Epochs1Channel(BasicModel2Epochs):
10 | def __init__(self) -> None:
11 | super().__init__(image_channels=["ct"])
12 |
--------------------------------------------------------------------------------
/RegressionTestResults/PR_LungRegression/OUTPUT/test_dataset.csv:
--------------------------------------------------------------------------------
1 | subject,filePath,channel,seriesId
2 | 12,12/spinalcord.nii.gz,spinalcord,1.3.6.1.4.1.14519.5.2.1.7014.4598.303839966804062210456566892461
3 | 12,12/lung_r.nii.gz,lung_r,1.3.6.1.4.1.14519.5.2.1.7014.4598.303839966804062210456566892461
4 | 12,12/lung_l.nii.gz,lung_l,1.3.6.1.4.1.14519.5.2.1.7014.4598.303839966804062210456566892461
5 | 12,12/heart.nii.gz,heart,1.3.6.1.4.1.14519.5.2.1.7014.4598.303839966804062210456566892461
6 | 12,12/esophagus.nii.gz,esophagus,1.3.6.1.4.1.14519.5.2.1.7014.4598.303839966804062210456566892461
7 | 12,12/ct.nii.gz,ct,1.3.6.1.4.1.14519.5.2.1.7014.4598.303839966804062210456566892461
8 |
--------------------------------------------------------------------------------
/docs/source/rst/api/Common/common.rst:
--------------------------------------------------------------------------------
1 | Common
2 | =======
3 |
4 | This page contains the docs for all shared components that can be found under `InnerEye/Common/ `_.
5 |
6 | .. automodule:: InnerEye.Common.common_util
7 |
8 | .. automodule:: InnerEye.Common.fixed_paths_for_tests
9 |
10 | .. automodule:: InnerEye.Common.fixed_paths
11 |
12 | .. automodule:: InnerEye.Common.generic_parsing
13 |
14 | .. automodule:: InnerEye.Common.metrics_constants
15 |
16 | .. automodule:: InnerEye.Common.output_directories
17 |
18 | .. automodule:: InnerEye.Common.resource_monitor
19 |
20 | .. automodule:: InnerEye.Common.spawn_subprocess
21 |
--------------------------------------------------------------------------------
/docs/source/rst/api/ML/visualizers.rst:
--------------------------------------------------------------------------------
1 | Visualizers
2 | ===========
3 |
4 | Below you will find all classes and functions related to the visualization of models and outputs.
5 |
6 | .. automodule:: InnerEye.ML.visualizers.activation_maps
7 |
8 | .. automodule:: InnerEye.ML.visualizers.metrics_scatterplot
9 |
10 | .. automodule:: InnerEye.ML.visualizers.model_hooks
11 |
12 | .. automodule:: InnerEye.ML.visualizers.model_summary
13 |
14 | .. automodule:: InnerEye.ML.visualizers.patch_sampling
15 |
16 | .. automodule:: InnerEye.ML.visualizers.plot_cross_validation
17 |
18 | .. automodule:: InnerEye.ML.visualizers.regression_visualization
19 |
20 | .. automodule:: InnerEye.ML.visualizers.reliability_curve
21 |
--------------------------------------------------------------------------------
/InnerEye/ML/configs/ssl/cxr_linear_head_augmentations.yaml:
--------------------------------------------------------------------------------
1 | preprocess:
2 | center_crop_size: 224
3 | resize: 256
4 | augmentation:
5 | use_random_horizontal_flip: True
6 | use_random_affine: True
7 | use_random_color: True
8 | use_random_crop: True
9 | use_gamma_transform: True
10 | use_random_erasing: False
11 | add_gaussian_noise: False
12 | use_elastic_transform: False
13 | random_horizontal_flip:
14 | prob: 0.5
15 | random_affine:
16 | max_angle: 30
17 | max_horizontal_shift: 0.00
18 | max_vertical_shift: 0.00
19 | max_shear: 15
20 | random_color:
21 | brightness: 0.2
22 | contrast: 0.2
23 | saturation: 0.0
24 | random_crop:
25 | scale: (0.8, 1.0)
26 |
--------------------------------------------------------------------------------
/.gitattributes:
--------------------------------------------------------------------------------
1 | *.tar filter=lfs diff=lfs merge=lfs -text
2 | *.nii filter=lfs diff=lfs merge=lfs -text
3 | *.pth.tar filter=lfs diff=lfs merge=lfs -text
4 | *.h5 filter=lfs diff=lfs merge=lfs -text
5 | *.npy filter=lfs diff=lfs merge=lfs -text
6 | *.png filter=lfs diff=lfs merge=lfs -text
7 | *.npz filter=lfs diff=lfs merge=lfs -text
8 | *.dcm filter=lfs diff=lfs merge=lfs -text
9 | *.zip filter=lfs diff=lfs merge=lfs -text
10 | *.jpg filter=lfs diff=lfs merge=lfs -text
11 | *.tiff filter=lfs diff=lfs merge=lfs -text
12 | * text=auto
13 | *.tar binary
14 | *.nii binary
15 | *.nii.gz binary
16 | *.pth.tar binary
17 | *.npy binary
18 | *.npz binary
19 | *.dcm binary
20 | *.zip binary
21 | *.jpg binary
22 | *.tiff binary
23 |
--------------------------------------------------------------------------------
/docs/source/rst/api/ML/SSL/datamodules_and_datasets.rst:
--------------------------------------------------------------------------------
1 | SSL Datamodules and Datasets
2 | ============================
3 |
4 | Datasets
5 | ----------
6 |
7 | .. automodule:: InnerEye.ML.SSL.datamodules_and_datasets.cifar_datasets
8 |
9 | .. automodule:: InnerEye.ML.SSL.datamodules_and_datasets.cxr_datasets
10 |
11 |
12 | Dataset Class Utils
13 | --------------------
14 |
15 | .. automodule:: InnerEye.ML.SSL.datamodules_and_datasets.dataset_cls_utils
16 |
17 | Datamodules
18 | -----------
19 |
20 | .. automodule:: InnerEye.ML.SSL.datamodules_and_datasets.datamodules
21 |
22 |
23 | Transformation Utils
24 | --------------------
25 |
26 | .. automodule:: InnerEye.ML.SSL.datamodules_and_datasets.transforms_utils
27 |
--------------------------------------------------------------------------------
/docs/Makefile:
--------------------------------------------------------------------------------
1 | # Minimal makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line, and also
5 | # from the environment for the first two.
6 | SPHINXOPTS ?=
7 | SPHINXBUILD ?= sphinx-build
8 | SOURCEDIR = source
9 | BUILDDIR = build
10 |
11 | # Put it first so that "make" without argument is like "make help".
12 | help:
13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
14 |
15 | .PHONY: help Makefile
16 |
17 | # Catch-all target: route all unknown targets to Sphinx using the new
18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
19 | %: Makefile
20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
21 |
--------------------------------------------------------------------------------
/.github/workflows/check-pr-title.yml:
--------------------------------------------------------------------------------
1 | name: 'Check pull request title'
2 | on:
3 | pull_request:
4 | types: [edited, opened, synchronize, reopened]
5 |
6 | jobs:
7 | title-check:
8 | runs-on: ubuntu-latest
9 | if: ${{ github.event.pull_request.user.login != 'dependabot[bot]' }}
10 | steps:
11 | - uses: naveenk1223/action-pr-title@master
12 | with:
13 | regex: '^[A-Z]+\:\s[A-Z].+[^\.]$' # Regex the title should match.
14 | allowed_prefixes: 'ENH,BUG,STYLE,DOC,DEL' # title should start with the given prefix
15 | prefix_case_sensitive: true # title prefixes are case ensitive
16 | min_length: 10 # Min length of the title
17 | max_length: 72 # Max length of the title
18 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/hdf5_data/dataset_missing_values.csv:
--------------------------------------------------------------------------------
1 | subject,USUBJID,SITEID,STUDYID,filePath,acquisition_date,feature
2 | 0001,RTH258-C000.2000.0000,2000,RTH258-C000,patient_hdf5s/4be9beed-5861-fdd2-72c2-8dd89aadc1ef.h5,2018-07-10T00:00:00,20
3 | ,RTH258-C000.2000.0001,,RTH258-C000,patient_hdf5s/61bc9d73-9fbb-bd7d-c06b-eeffbafabcc4.h5,2001-10-25T00:00:00,
4 | 0101,,2000,,,2012-07-17T00:00:00,32
5 | 0100,RTH258-C000.2000.0002,2000,RTH258-C000,patient_hdf5s/a46ebddd-b85f-de45-d8e3-9cdb10dbaf5f.h5,2017-11-19T00:00:00,64
6 | ,RTH258-C000.2005.0001,,RTH258-C000,patient_hdf5s/b3200426-1a58-bfea-4aba-cbacbe66ea5e.h5,2003-05-05T00:00:00,
7 | 1101,RTH258-C000.2005.0001,2005,RTH258-C000,patient_hdf5s/d316cfe5-e62a-3c0e-afda-72c3cf5ea2d8.h5,2001-12-02T00:00:00,70
8 |
--------------------------------------------------------------------------------
/.amlignore:
--------------------------------------------------------------------------------
1 | .idea
2 | .git
3 | .azureml
4 | .pytest_cache
5 | .mypy_cache
6 | .github
7 | .amlignore
8 | .coveragerc
9 | .editorconfig
10 | .flake8
11 | .gitattributes
12 | .gitconfig
13 | .gitignore
14 | .gitmodules
15 | CODE_OF_CONDUCT.md
16 | GeoPol.xml
17 | most_recent_run.txt
18 | mypy.ini
19 | mypy_runner.py
20 | pull_request_template.md
21 | SECURITY.md
22 | __pycache__
23 | azure-pipelines
24 | /datasets
25 | docs
26 | sphinx-docs
27 | modelweights
28 | outputs
29 | logs
30 | test_outputs
31 | run_outputs
32 | # Test output from model registration
33 | TestsOutsidePackage/azureml-models
34 | tensorboard_runs
35 | InnerEyeTestVariables.txt
36 | InnerEyePrivateSettings.yml
37 | cifar-10-batches-py
38 | cifar-100-python
39 | !**/InnerEye/ML/Histopathology/datasets
40 | None/
41 |
--------------------------------------------------------------------------------
/docs/source/rst/api/ML/SSL/lightning_modules.rst:
--------------------------------------------------------------------------------
1 | SSL Lightning Modules
2 | =====================
3 |
4 | Bring Your Own Latent (BYOL)
5 | ------------------------------
6 |
7 | .. automodule:: InnerEye.ML.SSL.lightning_modules.byol.byol_models
8 |
9 | .. automodule:: InnerEye.ML.SSL.lightning_modules.byol.byol_module
10 |
11 | .. automodule:: InnerEye.ML.SSL.lightning_modules.byol.byol_moving_average
12 |
13 | SimCLR
14 | -------
15 |
16 | .. automodule:: InnerEye.ML.SSL.lightning_modules.simclr_module
17 |
18 |
19 | SSL Classifier
20 | -----------------
21 |
22 | .. automodule:: InnerEye.ML.SSL.lightning_modules.ssl_classifier_module
23 |
24 |
25 | Online Evaluator
26 | -----------------
27 |
28 | .. automodule:: InnerEye.ML.SSL.lightning_modules.ssl_online_evaluator
29 |
--------------------------------------------------------------------------------
/.amltignore:
--------------------------------------------------------------------------------
1 | .idea
2 | .git
3 | .azureml
4 | .pytest_cache
5 | .mypy_cache
6 | .github
7 | .amlignore
8 | .coveragerc
9 | .editorconfig
10 | .flake8
11 | .gitattributes
12 | .gitconfig
13 | .gitignore
14 | .gitmodules
15 | CODE_OF_CONDUCT.md
16 | GeoPol.xml
17 | most_recent_run.txt
18 | mypy.ini
19 | mypy_runner.py
20 | pull_request_template.md
21 | SECURITY.md
22 | __pycache__
23 | azure-pipelines
24 | /datasets
25 | docs
26 | sphinx-docs
27 | modelweights
28 | outputs
29 | logs
30 | test_outputs
31 | run_outputs
32 | # Test output from model registration
33 | TestsOutsidePackage/azureml-models
34 | tensorboard_runs
35 | InnerEyeTestVariables.txt
36 | InnerEyePrivateSettings.yml
37 | cifar-10-batches-py
38 | cifar-100-python
39 | !**/InnerEye/ML/Histopathology/datasets
40 | None/
41 | Tests/ML/test_data
42 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/plot_cross_validation/main_1570466706163110/0/Test/metrics.csv:
--------------------------------------------------------------------------------
1 | Patient,Structure,Dice
2 | 329,bladder,0.000
3 | 332,bladder,0.000
4 | 337,bladder,0.000
5 | 349,bladder,0.000
6 | 360,bladder,0.000
7 | 329,femur_l,0.141
8 | 360,femur_l,0.207
9 | 337,femur_l,0.216
10 | 349,femur_l,0.234
11 | 332,femur_l,0.276
12 | 329,femur_r,0.000
13 | 332,femur_r,0.000
14 | 337,femur_r,0.000
15 | 349,femur_r,0.000
16 | 360,femur_r,0.000
17 | 329,prostate,0.000
18 | 332,prostate,0.000
19 | 337,prostate,0.000
20 | 349,prostate,0.000
21 | 360,prostate,0.000
22 | 329,rectum,0.003
23 | 337,rectum,0.004
24 | 332,rectum,0.005
25 | 360,rectum,0.006
26 | 349,rectum,0.009
27 | 329,seminalvesicles,0.000
28 | 332,seminalvesicles,0.000
29 | 337,seminalvesicles,0.000
30 | 349,seminalvesicles,0.000
31 | 360,seminalvesicles,0.000
32 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/plot_cross_validation/main_1570466706163110/0/Val/metrics.csv:
--------------------------------------------------------------------------------
1 | Patient,Structure,Dice
2 | 320,bladder,0.000
3 | 343,bladder,0.000
4 | 344,bladder,0.000
5 | 374,bladder,0.000
6 | 409,bladder,0.000
7 | 320,femur_l,0.209
8 | 374,femur_l,0.212
9 | 409,femur_l,0.231
10 | 344,femur_l,0.246
11 | 343,femur_l,0.249
12 | 320,femur_r,0.000
13 | 343,femur_r,0.000
14 | 344,femur_r,0.000
15 | 374,femur_r,0.000
16 | 409,femur_r,0.000
17 | 320,prostate,0.000
18 | 343,prostate,0.000
19 | 344,prostate,0.000
20 | 374,prostate,0.000
21 | 409,prostate,0.000
22 | 374,rectum,0.002
23 | 320,rectum,0.003
24 | 343,rectum,0.004
25 | 409,rectum,0.004
26 | 344,rectum,0.008
27 | 320,seminalvesicles,0.000
28 | 343,seminalvesicles,0.000
29 | 344,seminalvesicles,0.000
30 | 374,seminalvesicles,0.000
31 | 409,seminalvesicles,0.000
32 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/plot_cross_validation/main_1570466706163110/1/Test/metrics.csv:
--------------------------------------------------------------------------------
1 | Patient,Structure,Dice
2 | 329,bladder,0.000
3 | 332,bladder,0.000
4 | 337,bladder,0.000
5 | 349,bladder,0.000
6 | 360,bladder,0.000
7 | 329,femur_l,0.134
8 | 360,femur_l,0.204
9 | 337,femur_l,0.211
10 | 349,femur_l,0.228
11 | 332,femur_l,0.268
12 | 329,femur_r,0.000
13 | 332,femur_r,0.000
14 | 337,femur_r,0.000
15 | 349,femur_r,0.000
16 | 360,femur_r,0.000
17 | 329,prostate,0.000
18 | 332,prostate,0.000
19 | 337,prostate,0.000
20 | 349,prostate,0.000
21 | 360,prostate,0.000
22 | 329,rectum,0.003
23 | 337,rectum,0.005
24 | 332,rectum,0.006
25 | 360,rectum,0.007
26 | 349,rectum,0.010
27 | 329,seminalvesicles,0.000
28 | 332,seminalvesicles,0.000
29 | 337,seminalvesicles,0.000
30 | 349,seminalvesicles,0.000
31 | 360,seminalvesicles,0.000
32 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/plot_cross_validation/main_1570466706163110/1/Val/metrics.csv:
--------------------------------------------------------------------------------
1 | Patient,Structure,Dice
2 | 306,bladder,0.000
3 | 341,bladder,0.000
4 | 365,bladder,0.000
5 | 366,bladder,0.000
6 | 411,bladder,0.000
7 | 366,femur_l,0.208
8 | 341,femur_l,0.220
9 | 306,femur_l,0.233
10 | 411,femur_l,0.236
11 | 365,femur_l,0.260
12 | 306,femur_r,0.000
13 | 365,femur_r,0.000
14 | 366,femur_r,0.000
15 | 411,femur_r,0.000
16 | 341,femur_r,0.001
17 | 306,prostate,0.000
18 | 341,prostate,0.000
19 | 365,prostate,0.000
20 | 366,prostate,0.000
21 | 411,prostate,0.000
22 | 365,rectum,0.007
23 | 306,rectum,0.008
24 | 366,rectum,0.009
25 | 411,rectum,0.009
26 | 341,rectum,0.011
27 | 306,seminalvesicles,0.000
28 | 341,seminalvesicles,0.000
29 | 365,seminalvesicles,0.000
30 | 411,seminalvesicles,0.000
31 | 366,seminalvesicles,0.001
32 |
--------------------------------------------------------------------------------
/docs/source/rst/api/ML/models/architectures.rst:
--------------------------------------------------------------------------------
1 | Architectures
2 | ==============
3 |
4 | Base Model
5 | -----------
6 |
7 | .. automodule:: InnerEye.ML.models.architectures.base_model
8 |
9 | U-Nets
10 | --------
11 | .. automodule:: InnerEye.ML.models.architectures.unet_3d
12 |
13 | .. automodule:: InnerEye.ML.models.architectures.unet_2d
14 |
15 | Classificiation
16 | ---------------
17 |
18 | .. automodule:: InnerEye.ML.models.architectures.classification.bit
19 |
20 | .. automodule:: InnerEye.ML.models.architectures.classification.image_encoder_with_mlp
21 |
22 | .. automodule:: InnerEye.ML.models.architectures.classification.segmentation_encoder
23 |
24 | Others
25 | -------
26 |
27 | .. automodule:: InnerEye.ML.models.architectures.complex
28 |
29 | .. automodule:: InnerEye.ML.models.architectures.mlp
30 |
--------------------------------------------------------------------------------
/RegressionTestResults/PR_GlaucomaCV/AZUREML_PARENT_OUTPUT/CrossValResults/metrics_aggregates.csv:
--------------------------------------------------------------------------------
1 | prediction_target,area_under_roc_curve,area_under_pr_curve,accuracy_at_optimal_threshold,false_positive_rate_at_optimal_threshold,false_negative_rate_at_optimal_threshold,optimal_threshold,cross_entropy,accuracy_at_threshold_05,subject_count,data_split,epoch
2 | Default,0.60021,0.78835,0.49554,0.14706,0.65385,0.39931,0.85572,0.30357,224.00000,Test,best_validation_epoch
3 | Default,0.54660,0.83040,0.39279,0.13537,0.70481,0.53955,0.68686,0.50501,998.00000,Train,0
4 | Default,0.63622,0.85393,0.70541,0.57205,0.20026,0.47925,0.67841,0.50501,998.00000,Train,1
5 | Default,0.60339,0.84457,0.45792,0.16157,0.65020,0.40186,0.84921,0.22946,998.00000,Val,0
6 | Default,0.61009,0.84893,0.46693,0.15284,0.64239,0.39429,0.89540,0.22946,998.00000,Val,1
7 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/plot_cross_validation/main_1570466706163110/ENSEMBLE/Test/metrics.csv:
--------------------------------------------------------------------------------
1 | Patient,Structure,Dice
2 | 329,bladder,0.000
3 | 332,bladder,0.000
4 | 337,bladder,0.000
5 | 349,bladder,0.000
6 | 360,bladder,0.000
7 | 329,femur_l,0.141
8 | 360,femur_l,0.207
9 | 337,femur_l,0.216
10 | 349,femur_l,0.234
11 | 332,femur_l,0.276
12 | 329,femur_r,0.000
13 | 332,femur_r,0.000
14 | 337,femur_r,0.000
15 | 349,femur_r,0.000
16 | 360,femur_r,0.000
17 | 329,prostate,0.000
18 | 332,prostate,0.000
19 | 337,prostate,0.000
20 | 349,prostate,0.000
21 | 360,prostate,0.000
22 | 329,rectum,0.003
23 | 337,rectum,0.004
24 | 332,rectum,0.005
25 | 360,rectum,0.006
26 | 349,rectum,0.009
27 | 329,seminalvesicles,0.000
28 | 332,seminalvesicles,0.000
29 | 337,seminalvesicles,0.000
30 | 349,seminalvesicles,0.000
31 | 360,seminalvesicles,0.000
32 |
--------------------------------------------------------------------------------
/Tests/ML/visualizers/test_reliability_curve.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------------------------
2 | # Copyright (c) Microsoft Corporation. All rights reserved.
3 | # Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
4 | # ------------------------------------------------------------------------------------------
5 | import numpy as np
6 |
7 | from InnerEye.ML.visualizers.reliability_curve import plot_reliability_curve
8 |
9 |
10 | def test_plot_reliability_curve() -> None:
11 | prediction = [np.random.rand(250, 1), np.random.rand(200, 1)]
12 | target = [np.random.randint(2, size=(250, 1)), np.random.randint(2, size=(200, 1))]
13 | plot_reliability_curve(y_predict=prediction, y_true=target, num_bins=10, normalise=True)
14 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/test_aggregate_metrics_classification/Val/expected_metrics.csv:
--------------------------------------------------------------------------------
1 | epoch,subject,prediction_target,model_output,label,data_split,cross_validation_split_index
2 | 0,S5,Default,0.092429,0.000000,Val,1
3 | 0,S7,Default,0.095273,0.000000,Val,1
4 | 1,S5,Default,0.091557,0.000000,Val,1
5 | 1,S7,Default,0.094306,0.000000,Val,1
6 | 2,S5,Default,0.090680,0.000000,Val,1
7 | 2,S7,Default,0.093331,0.000000,Val,1
8 | 3,S7,Default,0.092348,0.000000,Val,1
9 | 3,S5,Default,0.089795,0.000000,Val,1
10 | 0,S6,Default,0.192429,0.000000,Val,1
11 | 0,S8,Default,0.195273,0.000000,Val,1
12 | 1,S6,Default,0.191557,0.000000,Val,1
13 | 1,S8,Default,0.194306,0.000000,Val,1
14 | 2,S6,Default,0.190680,0.000000,Val,1
15 | 2,S8,Default,0.193331,0.000000,Val,1
16 | 3,S6,Default,0.192348,0.000000,Val,1
17 | 3,S8,Default,0.189795,0.000000,Val,1
18 |
--------------------------------------------------------------------------------
/InnerEye/ML/models/layers/identity.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------------------------
2 | # Copyright (c) Microsoft Corporation. All rights reserved.
3 | # Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
4 | # ------------------------------------------------------------------------------------------
5 | import torch
6 | import torch.nn as nn
7 |
8 |
9 | class Identity(nn.Module):
10 | """
11 | Implements an identity torch module where input is passed as it is to output.
12 | There are no parameters in the module.
13 | """
14 |
15 | def __init__(self) -> None:
16 | super(Identity, self).__init__()
17 |
18 | def forward(self, input: torch.Tensor) -> torch.Tensor: # type: ignore
19 | return input
20 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/hdf5_data/dataset.csv:
--------------------------------------------------------------------------------
1 | subject,USUBJID,SITEID,STUDYID,filePath,acquisition_date,feature
2 | 0001,RTH258-C000.2000.0000,2000,RTH258-C000,patient_hdf5s/4be9beed-5861-fdd2-72c2-8dd89aadc1ef.h5,2018-07-10T00:00:00,20
3 | 0010,RTH258-C000.2000.0001,2000,RTH258-C000,patient_hdf5s/61bc9d73-9fbb-bd7d-c06b-eeffbafabcc4.h5,2001-10-25T00:00:00,80
4 | 0101,RTH258-C000.2000.0001,2000,RTH258-C000,patient_hdf5s/75eb4c55-debe-f906-fdff-a8ca3faf1ac5.h5,2012-07-17T00:00:00,32
5 | 0100,RTH258-C000.2000.0002,2000,RTH258-C000,patient_hdf5s/a46ebddd-b85f-de45-d8e3-9cdb10dbaf5f.h5,2017-11-19T00:00:00,64
6 | 1101,RTH258-C000.2005.0001,2005,RTH258-C000,patient_hdf5s/b3200426-1a58-bfea-4aba-cbacbe66ea5e.h5,2003-05-05T00:00:00,76
7 | 1101,RTH258-C000.2005.0001,2005,RTH258-C000,patient_hdf5s/d316cfe5-e62a-3c0e-afda-72c3cf5ea2d8.h5,2001-12-02T00:00:00,70
8 |
--------------------------------------------------------------------------------
/Tests/ML/utils/test_dataset_utils.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------------------------
2 | # Copyright (c) Microsoft Corporation. All rights reserved.
3 | # Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
4 | # ------------------------------------------------------------------------------------------
5 | import numpy as np
6 | import pandas as pd
7 |
8 | from InnerEye.ML.utils.dataset_util import CategoricalToOneHotEncoder
9 |
10 |
11 | def test_one_hot_encoder_with_infinite_values() -> None:
12 | df = pd.DataFrame(columns=["categorical"])
13 | df["categorical"] = ["F", "M", np.inf]
14 | encoder = CategoricalToOneHotEncoder.create_from_dataframe(df, ["categorical"])
15 | assert np.isnan(encoder.encode({"categorical": np.inf})).all()
16 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/plot_cross_validation/multi_label_sequence_in_crossval/0/dataset.csv:
--------------------------------------------------------------------------------
1 | subject,FT,VA,VisitZeroBased,Week,Label
2 | 2137.00005,362,71,0,0,
3 | 2137.00005,357,69,1,4,0
4 | 2137.00005,355,64,2,8,0
5 | 2137.00005,355,63,3,12,1
6 | 2137.00125,348,64,0,16,0
7 | 2137.00125,316,68,1,20,0
8 | 2137.00125,349,68,2,24,0
9 | 2137.00125,361,67,3,28,0
10 | 2137.00125,350,68,4,32,0
11 | 2627.00001,477,58,0,0,0
12 | 2627.00001,220,59,1,4,0
13 | 2627.00001,222,60,2,8,0
14 | 2627.00001,217,65,5,20,1
15 | 2627.12341,210,60,0,24,0
16 | 2627.12341,217,61,1,28,0
17 | 2627.12341,224,63,2,32,1
18 | 3250.00005,344,76,0,0,0
19 | 3250.00005,233,76,1,4,0
20 | 3250.00005,212,84,2,8,0
21 | 3250.00005,215,84,3,12,0
22 | 3250.00005,215,82,4,16,0
23 | 3250.12345,233,84,0,20,0
24 | 3250.12345,218,84,1,24,0
25 | 3250.12345,221,84,2,28,0
26 | 3250.12345,238,84,3,32,0
27 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/test_aggregate_metrics_classification/Train/expected_metrics.csv:
--------------------------------------------------------------------------------
1 | epoch,subject,prediction_target,model_output,label,data_split,cross_validation_split_index
2 | 0,S2,Default,0.092429,0.000000,Train,1
3 | 0,S4,Default,0.095273,0.000000,Train,1
4 | 1,S2,Default,0.091557,0.000000,Train,1
5 | 1,S4,Default,0.094306,0.000000,Train,1
6 | 2,S2,Default,0.090680,0.000000,Train,1
7 | 2,S4,Default,0.093331,0.000000,Train,1
8 | 3,S4,Default,0.092348,0.000000,Train,1
9 | 3,S2,Default,0.089795,0.000000,Train,1
10 | 0,S3,Default,0.192429,0.000000,Train,1
11 | 0,S1,Default,0.195273,0.000000,Train,1
12 | 1,S3,Default,0.191557,0.000000,Train,1
13 | 1,S1,Default,0.194306,0.000000,Train,1
14 | 2,S3,Default,0.190680,0.000000,Train,1
15 | 2,S1,Default,0.193331,0.000000,Train,1
16 | 3,S3,Default,0.192348,0.000000,Train,1
17 | 3,S1,Default,0.189795,0.000000,Train,1
18 |
--------------------------------------------------------------------------------
/.idea/codeStyles/Project.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
--------------------------------------------------------------------------------
/InnerEye/ML/configs/ssl/CIFAR_classifier_configs.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------------------------
2 | # Copyright (c) Microsoft Corporation. All rights reserved.
3 | # Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
4 | # ------------------------------------------------------------------------------------------
5 | from InnerEye.ML.SSL.lightning_containers.ssl_container import SSLDatasetName
6 | from InnerEye.ML.SSL.lightning_containers.ssl_image_classifier import SSLClassifierContainer
7 |
8 |
9 | class SSLClassifierCIFAR(SSLClassifierContainer):
10 | def __init__(self) -> None:
11 | super().__init__(
12 | linear_head_dataset_name=SSLDatasetName.CIFAR10,
13 | random_seed=1,
14 | num_epochs=100,
15 | l_rate=1e-4,
16 | num_workers=6)
17 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/ResultsByModeAndStructure.csv:
--------------------------------------------------------------------------------
1 | mode,Structure,mean,50%,min,max
2 | Test,bladder,93.620,93.900,92.400,94.400
3 | Test,femur_l,97.660,98.000,96.800,98.200
4 | Test,femur_r,97.380,97.400,96.200,98.200
5 | Test,prostate,83.520,84.400,76.800,87.700
6 | Test,rectum,80.920,83.600,59.100,91.700
7 | Test,seminalvesicles,77.300,79.100,63.100,84.300
8 | Train,bladder,90.775,91.650,81.000,98.800
9 | Train,femur_l,97.575,97.700,96.500,98.400
10 | Train,femur_r,97.425,97.250,96.700,98.500
11 | Train,prostate,85.550,87.300,78.500,89.100
12 | Train,rectum,91.450,91.000,90.900,92.900
13 | Train,seminalvesicles,84.100,84.500,77.100,90.300
14 | Val,bladder,94.627,95.000,88.200,98.300
15 | Val,femur_l,97.636,97.900,96.400,98.400
16 | Val,femur_r,97.591,97.900,96.500,98.400
17 | Val,prostate,81.836,85.800,60.400,92.100
18 | Val,rectum,84.382,84.000,68.200,92.100
19 | Val,seminalvesicles,75.009,76.300,45.700,90.500
20 |
--------------------------------------------------------------------------------
/InnerEye/ML/configs/ssl/cxr_ssl_encoder_augmentations.yaml:
--------------------------------------------------------------------------------
1 | preprocess:
2 | center_crop_size: 224
3 | resize: 256
4 | augmentation:
5 | use_random_horizontal_flip: True
6 | use_random_affine: True
7 | use_random_color: True
8 | use_random_crop: True
9 | use_gamma_transform: True
10 | use_random_erasing: True
11 | add_gaussian_noise: True
12 | use_elastic_transform: True
13 | random_horizontal_flip:
14 | prob: 0.5
15 | random_erasing:
16 | scale: (0.15, 0.4)
17 | ratio: (0.33, 3)
18 | random_affine:
19 | max_angle: 180
20 | max_horizontal_shift: 0.00
21 | max_vertical_shift: 0.00
22 | max_shear: 40
23 | elastic_transform:
24 | sigma: 4
25 | alpha: 34
26 | p_apply: 0.4
27 | random_color:
28 | brightness: 0.2
29 | contrast: 0.2
30 | saturation: 0.0
31 | random_crop:
32 | scale: (0.4, 1.0)
33 | gaussian_noise:
34 | std: 0.05
35 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/feature_request.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: "\U0001F680 Feature Request"
3 | about: Submit a proposal/request for a new InnerEye feature
4 | title: ''
5 | labels: 'enhancement'
6 | assignees: ''
7 |
8 | ---
9 |
10 | **🚀 Feature**
11 |
12 |
13 | **Motivation**
14 |
15 |
16 |
17 | **Pitch**
18 |
19 |
20 |
21 | **Alternatives**
22 |
23 |
24 |
25 | **Additional context**
26 |
27 |
28 |
--------------------------------------------------------------------------------
/.github/workflows/issues_to_ado.yml:
--------------------------------------------------------------------------------
1 | name: Sync issues to Azure DevOps work items
2 |
3 | on:
4 | issues:
5 | types:
6 | [opened, edited, deleted, closed, reopened, labeled, unlabeled, assigned]
7 |
8 | jobs:
9 | issues_to_ado:
10 | name: Sync issues with Azure DevOps
11 | runs-on: ubuntu-latest
12 | steps:
13 | - uses: danhellem/github-actions-issue-to-work-item@master
14 | env:
15 | ado_token: "${{ secrets.ADO_PERSONAL_ACCESS_TOKEN }}"
16 | github_token: "${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}"
17 | ado_organization: "innereye"
18 | ado_project: "InnerEye"
19 | ado_area_path: "InnerEye\\OSS"
20 | ado_iteration_path: "InnerEye"
21 | ado_wit: "User Story"
22 | ado_new_state: "New"
23 | ado_active_state: "Active"
24 | ado_close_state: "Closed"
25 | ado_bypassrules: true
26 |
--------------------------------------------------------------------------------
/docs/make.bat:
--------------------------------------------------------------------------------
1 | @ECHO OFF
2 |
3 | pushd %~dp0
4 |
5 | REM Command file for Sphinx documentation
6 |
7 | if "%SPHINXBUILD%" == "" (
8 | set SPHINXBUILD=sphinx-build
9 | )
10 | set SOURCEDIR=source
11 | set BUILDDIR=build
12 |
13 | %SPHINXBUILD% >NUL 2>NUL
14 | if errorlevel 9009 (
15 | echo.
16 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
17 | echo.installed, then set the SPHINXBUILD environment variable to point
18 | echo.to the full path of the 'sphinx-build' executable. Alternatively you
19 | echo.may add the Sphinx directory to PATH.
20 | echo.
21 | echo.If you don't have Sphinx installed, grab it from
22 | echo.https://www.sphinx-doc.org/
23 | exit /b 1
24 | )
25 |
26 | if "%1" == "" goto help
27 |
28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
29 | goto end
30 |
31 | :help
32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
33 |
34 | :end
35 | popd
36 |
--------------------------------------------------------------------------------
/azure-pipelines/tests_after_training.yml:
--------------------------------------------------------------------------------
1 | # This pipeline assumes that there is a Conda environment InnerEye already. It will run pytest and publish the test results.
2 | steps:
3 | # The run ID of the training run is now available in most_recent_run.txt.
4 | - bash: |
5 | source activate InnerEye
6 | pytest ./Tests -m "${{parameters.pytest_mark}}" --junitxml=junit/test-${{parameters.test_run_title}}.xml
7 | env:
8 | PYTHONPATH: $(Build.SourcesDirectory)
9 | APPLICATION_KEY: $(InnerEyeDeepLearningServicePrincipalKey)
10 | BUILD_BRANCH: $(Build.SourceBranch)
11 | displayName: Run pytests that require training run
12 |
13 | - task: PublishTestResults@2
14 | inputs:
15 | testResultsFiles: junit/test-${{parameters.test_run_title}}.xml
16 | testRunTitle: ${{parameters.test_run_title}}
17 | condition: succeededOrFailed()
18 | displayName: Publish test results ${{parameters.test_run_title}}
19 |
--------------------------------------------------------------------------------
/docs/source/md/releases.md:
--------------------------------------------------------------------------------
1 | # Releases
2 |
3 | The InnerEye toolbox is in an early stage, where many of its inner workings are changing rapidly. However, the
4 | purely config-driven approach to model building should remain stable. That is, you can expect backwards
5 | compatibility if you are building models by creating configuration files and changing the fields of the classes
6 | that define, say, a segmentation model. The same goes for all Azure-related configuration options.
7 | If your code relies on specific functions inside the InnerEye code base, you should expect that this can change.
8 |
9 | The current InnerEye codebase is not published as a Python package, and hence does not have implicit version numbers.
10 | We are applying tagging instead, with increases corresponding to what otherwise would be major/minor versions.
11 |
12 | Please refer to the [Changelog](https://github.com/microsoft/InnerEye-DeepLearning/blob/main/CHANGELOG.md) for an overview of recent changes.
13 |
--------------------------------------------------------------------------------
/InnerEye/Common/type_annotations.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------------------------
2 | # Copyright (c) Microsoft Corporation. All rights reserved.
3 | # Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
4 | # ------------------------------------------------------------------------------------------
5 | from pathlib import Path
6 | from typing import Dict, Iterable, List, Optional, Tuple, TypeVar, Union
7 |
8 | T = TypeVar('T')
9 | PathOrString = Union[Path, str]
10 | TupleStringOptionalFloat = Tuple[str, Optional[float]]
11 | TupleInt2 = Tuple[int, int]
12 | TupleInt3 = Tuple[int, int, int]
13 | TupleFloat2 = Tuple[float, float]
14 | TupleFloat3 = Tuple[float, float, float]
15 | TupleFloat9 = Tuple[float, float, float, float, float, float, float, float, float]
16 | IntOrTuple3 = Union[int, TupleInt3, Iterable]
17 | DictStrFloat = Dict[str, float]
18 | DictStrFloatOrFloatList = Dict[str, Union[float, List[float]]]
19 |
--------------------------------------------------------------------------------
/RegressionTestResults/PR_BasicModel2Epochs/OUTPUT/best_validation_epoch/Test/MetricsAcrossAllRuns.csv:
--------------------------------------------------------------------------------
1 | ,Patient,Structure,Dice,HausdorffDistance_mm,MeanDistance_mm,seriesId,institutionId,split,mode
2 | 0,5,lung_l,0.0,inf,inf,402ba5d42f37357f18f29af17b0846cbca4c430fea5b15a6baad5ec29dd6c9ba,b7f757fb-12e0-489e-a6da-f64895cdd229,CURRENT,Test
3 | 6,5,lung_r,0.0,inf,inf,402ba5d42f37357f18f29af17b0846cbca4c430fea5b15a6baad5ec29dd6c9ba,b7f757fb-12e0-489e-a6da-f64895cdd229,CURRENT,Test
4 | 12,5,spinalcord,0.0,inf,inf,402ba5d42f37357f18f29af17b0846cbca4c430fea5b15a6baad5ec29dd6c9ba,b7f757fb-12e0-489e-a6da-f64895cdd229,CURRENT,Test
5 | 0,5,lung_l,0.0,inf,inf,402ba5d42f37357f18f29af17b0846cbca4c430fea5b15a6baad5ec29dd6c9ba,b7f757fb-12e0-489e-a6da-f64895cdd229,Single,Test
6 | 6,5,lung_r,0.0,inf,inf,402ba5d42f37357f18f29af17b0846cbca4c430fea5b15a6baad5ec29dd6c9ba,b7f757fb-12e0-489e-a6da-f64895cdd229,Single,Test
7 | 12,5,spinalcord,0.0,inf,inf,402ba5d42f37357f18f29af17b0846cbca4c430fea5b15a6baad5ec29dd6c9ba,b7f757fb-12e0-489e-a6da-f64895cdd229,Single,Test
8 |
--------------------------------------------------------------------------------
/.idea/InnerEye-DeepLearning.iml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/train_and_test_data/scalar_epoch_metrics.csv:
--------------------------------------------------------------------------------
1 | loss,cross_entropy,accuracy_at_threshold_05,seconds_per_batch,seconds_per_epoch,learning_rate,area_under_roc_curve,area_under_pr_curve,accuracy_at_optimal_threshold,false_positive_rate_at_optimal_threshold,false_negative_rate_at_optimal_threshold,optimal_threshold,subject_count,epoch,cross_validation_split_index
2 | 0.7016490995883942,0.7016490697860718,0.375,0.02346169948577881,0.06398129463195801,0.0001,0.8125,0.85,0.6666666865348816,0.0,0.5,0.5281670689582825,6.0,1,-1
3 | 0.702895998954773,0.7028960287570953,0.375,0.019975781440734863,0.05499887466430664,9.999712322065557e-05,0.5625,0.6458333333333333,0.6666666865348816,0.0,0.5,0.5280245542526245,6.0,2,-1
4 | 0.7125596106052399,0.712559700012207,0.25,0.02925896644592285,0.07402157783508301,9.999306876841536e-05,0.875,0.875,0.6666666865348816,0.0,0.5,0.5277201533317566,6.0,3,-1
5 | 0.7119755446910858,0.711975485086441,0.25,0.025499820709228516,0.0679938793182373,9.998613801725043e-05,0.875,0.875,0.6666666865348816,0.0,0.5,0.5274553894996643,6.0,4,-1
6 |
--------------------------------------------------------------------------------
/Tests/ML/visualizers/test_regression_visualization.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------------------------
2 | # Copyright (c) Microsoft Corporation. All rights reserved.
3 | # Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
4 | # ------------------------------------------------------------------------------------------
5 | import os
6 |
7 | import numpy as np
8 |
9 | from InnerEye.Common.output_directories import OutputFolderForTests
10 | from InnerEye.ML.visualizers.regression_visualization import plot_variation_error_prediction
11 |
12 |
13 | def test_plot_variation_errors_for_regression(test_output_dirs: OutputFolderForTests) -> None:
14 | plot_variation_error_prediction(
15 | labels=np.array([10, 20, 20, 40, 10, 60, 90]),
16 | predictions=np.array([12, 25, 10, 36, 11, 69, 90]),
17 | filename=os.path.join(test_output_dirs.root_dir, "error_plot.png"))
18 | assert os.path.isfile(os.path.join(test_output_dirs.root_dir, "error_plot.png"))
19 |
--------------------------------------------------------------------------------
/.pre-commit-config.yaml:
--------------------------------------------------------------------------------
1 | # See https://pre-commit.com for more information
2 | # See https://pre-commit.com/hooks.html for more hooks
3 | # See https://github.com/pre-commit/pre-commit-hooks/blob/master/.pre-commit-config.yaml for an example with more hooks
4 |
5 | ci:
6 | autofix_prs: false
7 | autoupdate_commit_msg: "ENH: Autoupdate hooks [pre-commit.ci]"
8 | autoupdate_schedule: monthly
9 |
10 | repos:
11 | - repo: https://github.com/pre-commit/pre-commit-hooks
12 | rev: v4.3.0
13 | hooks:
14 | - id: trailing-whitespace
15 | - id: end-of-file-fixer
16 | - id: check-yaml
17 | - id: check-added-large-files
18 | - id: check-ast
19 | - id: check-merge-conflict
20 | - id: debug-statements
21 | - id: mixed-line-ending
22 | args:
23 | - --fix=lf
24 |
25 | - repo: https://github.com/PyCQA/flake8
26 | rev: 4.0.1
27 | hooks:
28 | - id: flake8
29 | additional_dependencies:
30 | - flake8-typing-imports==1.7.0
31 |
32 | - repo: https://github.com/pre-commit/mirrors-autopep8
33 | rev: v1.6.0
34 | hooks:
35 | - id: autopep8
36 |
--------------------------------------------------------------------------------
/.github/pull_request_template.md:
--------------------------------------------------------------------------------
1 | Please make sure to follow all the tasks in the checklist below. Then, delete this line and write a detailed description of your pull request here.
2 |
3 |
19 |
--------------------------------------------------------------------------------
/azure-pipelines/train_template.yml:
--------------------------------------------------------------------------------
1 | steps:
2 | - template: inner_eye_env.yml
3 |
4 | - bash: |
5 | set -e # This makes the script fail if any command in it fails, not just the last one
6 | source activate InnerEye
7 | branch_prefix="refs/heads/"
8 | full_branch_name=$(Build.SourceBranch)
9 | branch_name_without_prefix=${full_branch_name#$branch_prefix}
10 | python ./InnerEye/ML/runner.py --azureml=True --model="$(model)" --train="$(train)" $(more_switches) --number_of_cross_validation_splits="$(number_of_cross_validation_splits)" --wait_for_completion="${{parameters.wait_for_completion}}" --max_run_duration="${{parameters.max_run_duration}}" --pytest_mark="${{parameters.pytest_mark}}" --cluster="$(cluster)" --run_recovery_id="$(run_recovery_id)" --tag="$(tag)" --build_number=$(Build.BuildId) --build_user="$(Build.RequestedFor)" --build_user_email="" --build_branch="$branch_name_without_prefix" --build_source_repository="$(Build.Repository.Name)"
11 | env:
12 | PYTHONPATH: $(Build.SourcesDirectory)/
13 | APPLICATION_KEY: $(InnerEyeDeepLearningServicePrincipalKey)
14 | displayName: 'Queue AzureML Job'
15 |
--------------------------------------------------------------------------------
/docs/source/rst/api/ML/configs/segmentation.rst:
--------------------------------------------------------------------------------
1 | Segmentation Configs
2 | =====================
3 |
4 | Basic Models
5 | ------------
6 |
7 | .. automodule:: InnerEye.ML.configs.segmentation.BasicModel2Epochs
8 |
9 | .. automodule:: InnerEye.ML.configs.segmentation.BasicModel2Epochs1Channel
10 |
11 | .. automodule:: InnerEye.ML.configs.segmentation.BasicModel2EpochsMoreData
12 |
13 | HelloWorld
14 | ------------
15 |
16 | .. automodule:: InnerEye.ML.configs.segmentation.HelloWorld
17 |
18 | Head and Neck
19 | -------------
20 |
21 | .. automodule:: InnerEye.ML.configs.segmentation.HeadAndNeckBase
22 |
23 | .. automodule:: InnerEye.ML.configs.segmentation.HeadAndNeckPaper
24 |
25 | Prostate
26 | -------------
27 |
28 | .. automodule:: InnerEye.ML.configs.segmentation.ProstateBase
29 |
30 | .. automodule:: InnerEye.ML.configs.segmentation.ProstatePaper
31 |
32 | Hippocampus
33 | ------------
34 |
35 | .. automodule:: InnerEye.ML.configs.segmentation.Hippocampus
36 |
37 | Lung
38 | ------------
39 |
40 | .. automodule:: InnerEye.ML.configs.segmentation.Lung
41 |
42 | Glioblastoma
43 | ------------
44 |
45 | .. automodule:: InnerEye.ML.configs.segmentation.GbmBase
46 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/dataset_with_full_header.csv:
--------------------------------------------------------------------------------
1 | subject,filePath,channel,seriesId,institutionId,imageFilePath,groundTruthFilePath,tags,DIM_X,DIM_Y,DIM_Z
2 | 511,full_header_csv/rectum.nii.gz,rectum,22ef9c5e149650f9cb241d1aa622ad1731b91d1a1df770c05541228b47845ae4,85aaee5f-f5f3-4eae-b6cd-26b0070156d8,BAR/a3b7376117036979dd0ca3bac410a9d70dde9b76c17eb773eaebb19b14172ba9/6ae4ad0d87e561b679d2b9ed86077942f63b6621b5859548fb176707eab0e921/CT_22ef9c5e149650f9cb241d1aa622ad1731b91d1a1df770c05541228b47845ae4_20181127140946.zip,FOO/a3b7376117036979dd0ca3bac410a9d70dde9b76c17eb773eaebb19b14172ba9/6ae4ad0d87e561b679d2b9ed86077942f63b6621b5859548fb176707eab0e921/StructureSet.zip,[FOO;BAR],500,500,107
3 | 511,full_header_csv/ptv.nii.gz,ptv,22ef9c5e149650f9cb241d1aa622ad1731b91d1a1df770c05541228b47845ae4,85aaee5f-f5f3-4eae-b6cd-26b0070156d8,BAR/a3b7376117036979dd0ca3bac410a9d70dde9b76c17eb773eaebb19b14172ba9/6ae4ad0d87e561b679d2b9ed86077942f63b6621b5859548fb176707eab0e921/CT_22ef9c5e149650f9cb241d1aa622ad1731b91d1a1df770c05541228b47845ae4_20181127140946.zip,FOO/a3b7376117036979dd0ca3bac410a9d70dde9b76c17eb773eaebb19b14172ba9/6ae4ad0d87e561b679d2b9ed86077942f63b6621b5859548fb176707eab0e921/StructureSet.zip,[FOO;BAR],500,500,107
4 |
--------------------------------------------------------------------------------
/pytest.ini:
--------------------------------------------------------------------------------
1 | [pytest]
2 | testpaths=Tests TestsOutsidePackage TestSubmodule
3 | norecursedirs=azure-pipelines docs sphinx-docs InnerEye logs outputs test_data Tests/ML/datasets
4 | addopts=--strict-markers
5 | markers=
6 | gpu: Test needs a GPU to run
7 | cpu_and_gpu: Test needs to run twice, once on CPU and once on GPU
8 | azureml: Long running test, run this test as part of the PR build run on AzureML
9 | after_training_single_run: Tests the result of a recently submitted AzureML run (non-ensemble). Relies on most_recent_run.txt
10 | after_training_ensemble_run: Tests the result of a recently submitted ensemble run on AzureML. Relies on most_recent_run.txt
11 | after_training_2node: Tests the result of a recently submitted multi-node training job in AzureML. Relies on most_recent_run.txt
12 | inference: Tests inference runs on a recently trained model. Relies on most_recent_run.txt
13 | after_training_glaucoma_cv_run: Tests the result of a recently submitted classification cross-validation run. Relies on most_recent_run.txt
14 | after_training_hello_container: Tests the result of a recently submitted HelloContainer AzureML training run. Relies on most_recent_run.txt
15 |
--------------------------------------------------------------------------------
/Tests/ML/utils/test_model_metadata_util.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------------------------
2 | # Copyright (c) Microsoft Corporation. All rights reserved.
3 | # Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
4 | # ------------------------------------------------------------------------------------------
5 | import random
6 |
7 | from InnerEye.ML.utils.model_metadata_util import generate_random_colours_list, random_colour
8 |
9 |
10 | def test_random_colour() -> None:
11 | """
12 | Test random colours
13 | """
14 | rng = random.Random(0)
15 | r, g, b = random_colour(rng)
16 | assert 0 <= r < 255
17 | assert 0 <= g < 255
18 | assert 0 <= b < 255
19 |
20 |
21 | def test_generate_random_colours_list() -> None:
22 | """
23 | Test list of random colours
24 | """
25 | rng = random.Random(0)
26 | expected_list_len = 10
27 | list_colours = generate_random_colours_list(rng, 10)
28 | assert len(list_colours) == expected_list_len
29 | for r, g, b in list_colours:
30 | assert 0 <= r < 255
31 | assert 0 <= g < 255
32 | assert 0 <= b < 255
33 |
--------------------------------------------------------------------------------
/docs/source/md/testing.md:
--------------------------------------------------------------------------------
1 | # Pytest and testing on CPU and GPU machines
2 |
3 | All non-trivial proposed changes to the code base should be accompanied by tests.
4 |
5 | Each PullRequest build will run all tests in the repository on CPU machines. One full test run is executed on a Windows
6 | agent, one on a Linux agent. Tests with the pytest mark `gpu` or `azureml` will not be executed in this run.
7 |
8 | In addition, `pytest` will be run as part of the small AzureML job (smoke test) that is part of the PR build.
9 | In that test run, only specific tests will be executed. At present, it will be the tests that are marked
10 | with the pytest mark `gpu`, `cpu_and_gpu` or `azureml`. The AzureML job executes on a GPU VM, hence you can have
11 | tests for GPU-specific capabilities.
12 |
13 | To mark one of your tests as requiring a GPU, prefix the test as follows:
14 |
15 | @pytest.mark.gpu
16 | def test_my_gpu_code() -> None:
17 | ...
18 |
19 | Similarly, use the mark `cpu_and_gpu` for tests that should be run twice, once on the CPU and once on the GPU. For
20 | tests that do not require a GPU, but have longer running times and are better suited to run on the PR build, use the
21 | mark `azureml`.
22 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) Microsoft Corporation.
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE
22 |
--------------------------------------------------------------------------------
/.idea/runConfigurations/Template__Tensorboard_monitoring.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
--------------------------------------------------------------------------------
/azure-pipelines/cancel_aml_jobs.yml:
--------------------------------------------------------------------------------
1 | steps:
2 | - checkout: self
3 |
4 | - template: prepare_conda.yml
5 |
6 | # https://docs.microsoft.com/en-us/azure/devops/pipelines/release/caching?view=azure-devops#pythonanaconda
7 | - task: Cache@2
8 | displayName: Use cached Conda environment AzureML_SDK
9 | inputs:
10 | # Beware of changing the cache key or path independently, safest to change in sync
11 | key: 'usr_share_miniconda_azureml_conda | "$(Agent.OS)" | azure-pipelines/azureml-conda-environment.yml'
12 | path: /usr/share/miniconda/envs
13 | cacheHitVar: CONDA_CACHE_RESTORED
14 |
15 | - script: conda env create --file azure-pipelines/azureml-conda-environment.yml
16 | displayName: Create Conda environment AzureML_SDK
17 | condition: eq(variables.CONDA_CACHE_RESTORED, 'false')
18 |
19 | - bash: |
20 | source activate AzureML_SDK
21 | python azure-pipelines/cancel_aml_jobs.py
22 | displayName: Cancel jobs from previous run
23 | env:
24 | SUBSCRIPTION_ID: $(InnerEyeDevSubscriptionID)
25 | APPLICATION_ID: $(InnerEyeDeepLearningServicePrincipalID)
26 | APPLICATION_KEY: $(InnerEyeDeepLearningServicePrincipalKey)
27 | BRANCH: $(Build.SourceBranch)
28 |
--------------------------------------------------------------------------------
/RegressionTestResults/PR_LungRegression/OUTPUT/val_dataset.csv:
--------------------------------------------------------------------------------
1 | subject,filePath,channel,seriesId
2 | 10,10/spinalcord.nii.gz,spinalcord,1.3.6.1.4.1.14519.5.2.1.7014.4598.205539610686399129384253975216
3 | 10,10/lung_r.nii.gz,lung_r,1.3.6.1.4.1.14519.5.2.1.7014.4598.205539610686399129384253975216
4 | 10,10/lung_l.nii.gz,lung_l,1.3.6.1.4.1.14519.5.2.1.7014.4598.205539610686399129384253975216
5 | 10,10/heart.nii.gz,heart,1.3.6.1.4.1.14519.5.2.1.7014.4598.205539610686399129384253975216
6 | 10,10/esophagus.nii.gz,esophagus,1.3.6.1.4.1.14519.5.2.1.7014.4598.205539610686399129384253975216
7 | 10,10/ct.nii.gz,ct,1.3.6.1.4.1.14519.5.2.1.7014.4598.205539610686399129384253975216
8 | 11,11/spinalcord.nii.gz,spinalcord,1.3.6.1.4.1.14519.5.2.1.7014.4598.189721990631943197010794868711
9 | 11,11/lung_r.nii.gz,lung_r,1.3.6.1.4.1.14519.5.2.1.7014.4598.189721990631943197010794868711
10 | 11,11/lung_l.nii.gz,lung_l,1.3.6.1.4.1.14519.5.2.1.7014.4598.189721990631943197010794868711
11 | 11,11/heart.nii.gz,heart,1.3.6.1.4.1.14519.5.2.1.7014.4598.189721990631943197010794868711
12 | 11,11/esophagus.nii.gz,esophagus,1.3.6.1.4.1.14519.5.2.1.7014.4598.189721990631943197010794868711
13 | 11,11/ct.nii.gz,ct,1.3.6.1.4.1.14519.5.2.1.7014.4598.189721990631943197010794868711
14 |
--------------------------------------------------------------------------------
/InnerEye/ML/configs/segmentation/BasicModel2EpochsMoreData.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------------------------
2 | # Copyright (c) Microsoft Corporation. All rights reserved.
3 | # Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
4 | # ------------------------------------------------------------------------------------------
5 | import pandas as pd
6 |
7 | from InnerEye.ML.configs.segmentation.BasicModel2Epochs import BasicModel2Epochs
8 | from InnerEye.ML.utils.split_dataset import DatasetSplits
9 |
10 |
11 | class BasicModel2EpochsMoreData(BasicModel2Epochs):
12 | """
13 | A clone of the basic PR build model, that has more training data, to avoid PyTorch throwing failures
14 | because each rank does not have enough data to train on.
15 | """
16 |
17 | def __init__(self) -> None:
18 | super().__init__()
19 |
20 | def get_model_train_test_dataset_splits(self, dataset_df: pd.DataFrame) -> DatasetSplits:
21 | return DatasetSplits.from_subject_ids(
22 | df=dataset_df,
23 | train_ids=['0', '1', '2', '3'],
24 | test_ids=['4', '5', '6', '7'],
25 | val_ids=['8', '9']
26 | )
27 |
--------------------------------------------------------------------------------
/.idea/runConfigurations/Template__Cross_Validation.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
--------------------------------------------------------------------------------
/.idea/runConfigurations/Template__Visualize_Patch_Sampling.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
--------------------------------------------------------------------------------
/.idea/runConfigurations/Template__Plot_Cross_Validation_Results.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/documentation_issue.yml:
--------------------------------------------------------------------------------
1 | name: "\U0001F4D2 Documentation Problem"
2 | description: If the documentation is unclear or missing information, please create a report to help us improve InnerEye-DeepLearning
3 | labels: ["documentation"]
4 |
5 | body:
6 | - type: checkboxes
7 | attributes:
8 | label: Is there an existing issue for this?
9 | description: Please search to see if an issue already exists for the problem you encountered.
10 | options:
11 | - label: I have searched the existing issues
12 | required: true
13 |
14 | - type: textarea
15 | id: summary
16 | attributes:
17 | label: Issue summary
18 | description: Please provide 1-2 short sentences describing where the documentation should be improved. If applicable,
19 | please provide the URL of the file that needs updating.
20 | placeholder: Description of where an update is needed, URL of the file if possible.
21 | validations:
22 | required: true
23 |
24 | - type: textarea
25 | id: expected
26 | attributes:
27 | label: What documentation should be provided?
28 | description: Please provide a description of what information should be captured or corrected in the documentation.
29 | validations:
30 | required: true
31 |
--------------------------------------------------------------------------------
/.idea/runConfigurations/Template__Run_ML_on_local_machine.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
--------------------------------------------------------------------------------
/InnerEye/ML/utils/model_metadata_util.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------------------------
2 | # Copyright (c) Microsoft Corporation. All rights reserved.
3 | # Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
4 | # ------------------------------------------------------------------------------------------
5 | import random
6 | from typing import List
7 |
8 | from InnerEye.Common.type_annotations import TupleInt3
9 |
10 |
11 | def random_colour(rng: random.Random) -> TupleInt3:
12 | """
13 | Generates a random colour in RGB given a random number generator
14 |
15 | :param rng: Random number generator
16 | :return: Tuple with random colour in RGB
17 | """
18 | r = rng.randint(0, 255)
19 | g = rng.randint(0, 255)
20 | b = rng.randint(0, 255)
21 | return r, g, b
22 |
23 |
24 | def generate_random_colours_list(rng: random.Random, size: int) -> List[TupleInt3]:
25 | """
26 | Generates a list of random colours in RGB given a random number generator and the size of this list
27 |
28 | :param rng: random number generator
29 | :param size: size of the list
30 | :return: list of random colours in RGB
31 | """
32 | return [random_colour(rng) for _ in range(size)]
33 |
--------------------------------------------------------------------------------
/.idea/runConfigurations/Template__Submit_image_for_inference.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
--------------------------------------------------------------------------------
/InnerEye/ML/configs/regression/LungRegression.py:
--------------------------------------------------------------------------------
1 | import pandas as pd
2 |
3 | from InnerEye.ML.configs.segmentation.Lung import Lung
4 | from InnerEye.ML.utils.split_dataset import DatasetSplits
5 |
6 |
7 | class LungRegression(Lung):
8 | """
9 | Model used for regression testing the Lung model. Uses the same data as the published model
10 | but only runs for a small number of epochs and a smaller subset of the data. This is to ensure that
11 | the training time is kept under 30 minutes to prevents slowing down PRs significantly.
12 | """
13 |
14 | def __init__(self) -> None:
15 | super().__init__(
16 | azure_dataset_id="lung_for_regression_test_11_2022",
17 | train_batch_size=3,
18 | num_epochs=25,
19 | pl_deterministic=True,
20 | test_crop_size=(64, 256, 256),
21 | )
22 |
23 | def get_model_train_test_dataset_splits(self, dataset_df: pd.DataFrame) -> DatasetSplits:
24 | train = list(map(str, range(0, 10)))
25 | val = list(map(str, range(10, 12)))
26 | test = list(map(str, range(12, 13)))
27 |
28 | return DatasetSplits.from_subject_ids(
29 | df=dataset_df,
30 | train_ids=train,
31 | val_ids=val,
32 | test_ids=test,
33 | )
34 |
--------------------------------------------------------------------------------
/.idea/runConfigurations/pytest_all_simple_tests.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/train_and_test_data/scalar_prediction_target_metrics.csv:
--------------------------------------------------------------------------------
1 | prediction_target,epoch,subject,model_output,label,cross_validation_split_index,data_split
2 | Default,1,S4,0.5216594338417053,0.0,-1,Train
3 | Default,1,S5,0.5216594338417053,1.0,-1,Train
4 | Default,1,S6,0.5216594338417053,0.0,-1,Train
5 | Default,1,S7,0.5216594338417053,0.0,-1,Train
6 | Default,1,S8,0.5213258266448975,0.0,-1,Train
7 | Default,1,S1,0.5281670689582825,1.0,-1,Train
8 | Default,2,S1,0.5280245542526245,1.0,-1,Train
9 | Default,2,S4,0.521056056022644,0.0,-1,Train
10 | Default,2,S7,0.521056056022644,0.0,-1,Train
11 | Default,2,S6,0.521056056022644,0.0,-1,Train
12 | Default,2,S8,0.5207627415657043,0.0,-1,Train
13 | Default,2,S5,0.5207627415657043,1.0,-1,Train
14 | Default,3,S5,0.5205143690109253,1.0,-1,Train
15 | Default,3,S8,0.5205143690109253,0.0,-1,Train
16 | Default,3,S1,0.5277201533317566,1.0,-1,Train
17 | Default,3,S7,0.5205143690109253,0.0,-1,Train
18 | Default,3,S4,0.5202868580818176,0.0,-1,Train
19 | Default,3,S6,0.5202868580818176,0.0,-1,Train
20 | Default,4,S1,0.5274553894996643,1.0,-1,Train
21 | Default,4,S8,0.520030677318573,0.0,-1,Train
22 | Default,4,S5,0.520030677318573,1.0,-1,Train
23 | Default,4,S4,0.520030677318573,0.0,-1,Train
24 | Default,4,S7,0.5197962522506714,0.0,-1,Train
25 | Default,4,S6,0.5197962522506714,0.0,-1,Train
26 |
--------------------------------------------------------------------------------
/InnerEye/ML/SSL/datamodules_and_datasets/cifar_datasets.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------------------------
2 | # Copyright (c) Microsoft Corporation. All rights reserved.
3 | # Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
4 | # ------------------------------------------------------------------------------------------
5 |
6 | from torchvision.datasets import CIFAR10, CIFAR100
7 |
8 | from InnerEye.ML.SSL.datamodules_and_datasets.dataset_cls_utils import InnerEyeDataClassBaseWithReturnIndex
9 |
10 |
11 | class InnerEyeCIFAR10(InnerEyeDataClassBaseWithReturnIndex, CIFAR10):
12 | """
13 | Wrapper class around torchvision CIFAR10 class to optionally return the
14 | index on top of the image and the label in __getitem__ as well as defining num_classes property.
15 | """
16 |
17 | @property
18 | def num_classes(self) -> int:
19 | return 10
20 |
21 |
22 | class InnerEyeCIFAR100(InnerEyeDataClassBaseWithReturnIndex, CIFAR100):
23 | """
24 | Wrapper class around torchvision CIFAR100 class class to optionally return the
25 | index on top of the image and the label in __getitem__ as well as defining num_classes property.
26 | """
27 |
28 | @property
29 | def num_classes(self) -> int:
30 | return 100
31 |
--------------------------------------------------------------------------------
/docs/source/index.rst:
--------------------------------------------------------------------------------
1 | .. InnerEye documentation master file, created by
2 | sphinx-quickstart on Sun Jun 28 18:04:34 2020.
3 | You can adapt this file completely to your liking, but it should at least
4 | contain the root `toctree` directive.
5 |
6 | InnerEye-DeepLearning Documentation
7 | ===================================
8 |
9 | .. toctree::
10 | :maxdepth: 1
11 | :caption: Overview and user guides
12 |
13 | md/README.md
14 | md/environment.md
15 | md/WSL.md
16 | md/hello_world_model.md
17 | md/setting_up_aml.md
18 | md/creating_dataset.md
19 | md/building_models.md
20 | md/sample_tasks.md
21 | md/bring_your_own_model.md
22 | md/debugging_and_monitoring.md
23 | md/model_diagnostics.md
24 | md/move_model.md
25 | rst/models
26 |
27 | .. toctree::
28 | :maxdepth: 1
29 | :caption: Further reading for contributors
30 |
31 | md/software_design_overview.md
32 | md/testing.md
33 | md/contributing.md
34 | md/deploy_on_aml.md
35 | md/fastmri.md
36 | md/innereye_as_submodule.md
37 | md/releases.md
38 | md/self_supervised_models.md
39 | md/CHANGELOG.md
40 |
41 | .. toctree::
42 | :caption: API documentation
43 | :maxdepth: 2
44 |
45 | rst/api/index
46 |
47 |
48 | Indices and tables
49 | ==================
50 |
51 | * :ref:`genindex`
52 | * :ref:`modindex`
53 |
--------------------------------------------------------------------------------
/.idea/runConfigurations/Template__Run_ML_on_AzureML.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
--------------------------------------------------------------------------------
/InnerEye/ML/SSL/datamodules_and_datasets/dataset_cls_utils.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------------------------
2 | # Copyright (c) Microsoft Corporation. All rights reserved.
3 | # Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
4 | # ------------------------------------------------------------------------------------------
5 | from typing import Any, Tuple, Union
6 |
7 | import torch
8 |
9 | OptionalIndexInputAndLabel = Union[Tuple[torch.Tensor, int], Tuple[int, torch.Tensor, int]]
10 |
11 |
12 | class InnerEyeDataClassBaseWithReturnIndex:
13 | """
14 | Class to be use with double inheritance with a VisionDataset.
15 | Overloads the __getitem__ function so that we can optionally also return
16 | the index within the dataset.
17 | """
18 |
19 | def __init__(self, root: str, return_index: bool, **kwargs: Any) -> None:
20 | self.return_index = return_index
21 | super().__init__(root=root, **kwargs) # type: ignore
22 |
23 | def __getitem__(self, index: int) -> Any:
24 | item = super().__getitem__(index) # type: ignore
25 | if self.return_index:
26 | return (index, *item)
27 | else:
28 | return item
29 |
30 | @property
31 | def num_classes(self) -> int:
32 | raise NotImplementedError
33 |
--------------------------------------------------------------------------------
/.idea/runConfigurations/Template__normalize_and_visualize_dataset.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
--------------------------------------------------------------------------------
/GeoPol.xml:
--------------------------------------------------------------------------------
1 |
7 |
8 |
9 |
10 |
12 |
13 | ]>
14 |
15 |
16 |
17 | &GitReposFolder;\github\&GitRepoName;
18 | &GitRepoName;
19 |
20 |
21 | .
22 |
23 |
24 | .gitignore
25 | THIRDPARTYNOTICES.md
26 | GeoPol.xml
27 | *.nii.gz
28 |
29 |
30 |
--------------------------------------------------------------------------------
/docs/source/rst/api/ML/utils.rst:
--------------------------------------------------------------------------------
1 | Utils
2 | =====
3 |
4 | Data Utils
5 | -----------
6 | .. automodule:: InnerEye.ML.utils.csv_util
7 |
8 | .. automodule:: InnerEye.ML.utils.dataset_util
9 |
10 | .. automodule:: InnerEye.ML.utils.hdf5_util
11 |
12 | .. automodule:: InnerEye.ML.utils.image_util
13 |
14 | .. automodule:: InnerEye.ML.utils.io_util
15 |
16 | .. automodule:: InnerEye.ML.utils.split_dataset
17 |
18 | .. automodule:: InnerEye.ML.utils.features_util
19 |
20 | .. automodule:: InnerEye.ML.utils.transforms
21 |
22 | Model Utils
23 | -----------
24 |
25 | .. automodule:: InnerEye.ML.utils.checkpoint_handling
26 |
27 | .. automodule:: InnerEye.ML.utils.config_loader
28 |
29 | .. automodule:: InnerEye.ML.utils.model_util
30 |
31 | .. automodule:: InnerEye.ML.utils.layer_util
32 |
33 | .. automodule:: InnerEye.ML.utils.model_metadata_util
34 |
35 | Training Utils
36 | --------------
37 |
38 | .. automodule:: InnerEye.ML.utils.lr_scheduler
39 |
40 | .. automodule:: InnerEye.ML.utils.metrics_util
41 |
42 | .. automodule:: InnerEye.ML.utils.sequence_utils
43 |
44 | .. automodule:: InnerEye.ML.utils.supervised_criterion
45 |
46 | .. automodule:: InnerEye.ML.utils.run_recovery
47 |
48 | .. automodule:: InnerEye.ML.utils.surface_distance_utils
49 |
50 | .. automodule:: InnerEye.ML.utils.temperature_scaling
51 |
52 | .. automodule:: InnerEye.ML.utils.device_aware_module
53 |
54 | Other / Misc Utils
55 | ------------------
56 | .. automodule:: InnerEye.ML.utils.ml_util
57 |
58 | .. automodule:: InnerEye.ML.utils.plotting_util
59 |
--------------------------------------------------------------------------------
/docs/source/md/model_diagnostics.md:
--------------------------------------------------------------------------------
1 | # Model Diagnostics
2 |
3 | The InnerEye toolbox has extensive reporting about the model building process, as well as the performance
4 | of the final model. Our goal is to provide as much insight as possible about the critical steps (and
5 | pitfalls) of building a model.
6 |
7 | ## Patch sampling for segmentation models
8 |
9 | When building a segmentation model, one of the crucial steps is how equally-shaped crops are taken from
10 | the raw medical image, that are later fed into the model training. An outline of that process is
11 | given [here](https://github.com/microsoft/InnerEye-DeepLearning/wiki/Adjusting-and-tuning-a-segmentation-model).
12 |
13 | At the start of training, the toolbox inspects the first 10 images of the training set. For each of them,
14 | 1000 random crops are drawn at random, similar to how they would be drawn during training. From that, a
15 | heatmap is constructed, where each voxel value contains how often that specific voxels was actually contained
16 | in the random crop (a value between 0 and 1000). The heatmap is stored as a Nifti file, alongside the
17 | original scan, in folder `outputs/patch_sampling/`. When running inside AzureML, navigate to the
18 | "Outputs" tab, and go to the folder (see screenshot below).
19 |
20 | In addition, for each patient, 3 thumbnail images are generated, that overlay the heatmap on top of the
21 | scan. Dark red indicates voxels that are sampled very often, transparent red indicates voxels that are used
22 | infrequently.
23 |
24 | Example thumbnail when viewed in the AzureML UI:
25 | 
26 |
--------------------------------------------------------------------------------
/InnerEye/Common/fixed_paths_for_tests.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------------------------
2 | # Copyright (c) Microsoft Corporation. All rights reserved.
3 | # Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
4 | # ------------------------------------------------------------------------------------------
5 | import os
6 | from pathlib import Path
7 | from typing import Optional
8 |
9 | from InnerEye.Common.type_annotations import PathOrString
10 |
11 |
12 | def tests_root_directory(path: Optional[PathOrString] = None) -> Path:
13 | """
14 | Gets the full path to the root directory that holds the tests.
15 | If a relative path is provided then concatenate it with the absolute path
16 | to the repository root.
17 |
18 | :return: The full path to the repository's root directory, with symlinks resolved if any.
19 | """
20 | root = Path(os.path.realpath(__file__)).parent.parent.parent / "Tests"
21 | return root / path if path else root
22 |
23 |
24 | def full_ml_test_data_path(path: str = "") -> Path:
25 | """
26 | Takes a relative path inside of the Tests/ML/test_data folder, and returns its
27 | full absolute path.
28 |
29 | :param path: A path relative to the ML/tests/test_data
30 | :return: The full absolute path of the argument.
31 | """
32 | return _full_test_data_path("ML", path)
33 |
34 |
35 | def _full_test_data_path(prefix: str, suffix: str) -> Path:
36 | root = tests_root_directory()
37 | return root / prefix / "test_data" / suffix
38 |
39 |
40 | TEST_OUTPUTS_PATH = tests_root_directory().parent / "test_outputs"
41 |
--------------------------------------------------------------------------------
/.github/workflows/main.yml:
--------------------------------------------------------------------------------
1 | name: Add new Issues to InnerEye-OSS project
2 | on:
3 | issues:
4 | types:
5 | - opened
6 | jobs:
7 | track_issue:
8 | runs-on: ubuntu-latest
9 | steps:
10 | - name: Get project data
11 | env:
12 | GITHUB_TOKEN: ${{ secrets.INNEREYE_OSS_PROJECT_ACCESS_TOKEN }}
13 | ORGANIZATION: Microsoft
14 | PROJECT_NUMBER: 320
15 | run: |
16 | gh api graphql -f query='
17 | query($org: String!, $number: Int!) {
18 | organization(login: $org){
19 | projectNext(number: $number) {
20 | id
21 | fields(first:20) {
22 | nodes {
23 | id
24 | name
25 | settings
26 | }
27 | }
28 | }
29 | }
30 | }' -f org=$ORGANIZATION -F number=$PROJECT_NUMBER > project_data.json
31 |
32 | echo 'PROJECT_ID='$(jq '.data.organization.projectNext.id' project_data.json) >> $GITHUB_ENV
33 |
34 | - name: Add issue to project
35 | env:
36 | GITHUB_TOKEN: ${{ secrets.INNEREYE_OSS_PROJECT_ACCESS_TOKEN }}
37 | ISSUE_ID: ${{ github.event.issue.node_id }}
38 | run: |
39 | item_id="$( gh api graphql -f query='
40 | mutation($project:ID!, $issue:ID!) {
41 | addProjectNextItem(input: {projectId: $project, contentId: $issue}) {
42 | projectNextItem {
43 | id
44 | }
45 | }
46 | }' -f project=$PROJECT_ID -f issue=$ISSUE_ID --jq '.data.addProjectNextItem.projectNextItem.id')"
47 |
--------------------------------------------------------------------------------
/InnerEye/ML/model_inference_config.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------------------------
2 | # Copyright (c) Microsoft Corporation. All rights reserved.
3 | # Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
4 | # ------------------------------------------------------------------------------------------
5 | from dataclasses import dataclass
6 | from typing import List
7 | from pathlib import Path
8 |
9 | from dataclasses_json import dataclass_json
10 |
11 | from InnerEye.Common.common_util import MAX_PATH_LENGTH, check_properties_are_not_none, is_long_path
12 |
13 |
14 | @dataclass_json
15 | @dataclass
16 | class ModelInferenceConfig:
17 | """Class for configuring a model for inference"""
18 | model_name: str
19 | checkpoint_paths: List[str]
20 | model_configs_namespace: str = ''
21 |
22 | def __post_init__(self) -> None:
23 | check_properties_are_not_none(self)
24 | # check to make sure paths are no long paths are provided
25 | long_paths = list(filter(is_long_path, self.checkpoint_paths))
26 | if long_paths:
27 | raise ValueError(f"Following paths: {long_paths} are greater than {MAX_PATH_LENGTH}")
28 |
29 |
30 | def read_model_inference_config(path_to_model_inference_config: Path) -> ModelInferenceConfig:
31 | """
32 | Read the model inference configuration from a json file, and instantiate a ModelInferenceConfig object using this.
33 | """
34 | model_inference_config_json = path_to_model_inference_config.read_text(encoding='utf-8')
35 | model_inference_config = ModelInferenceConfig.from_json(model_inference_config_json) # type: ignore
36 | return model_inference_config
37 |
--------------------------------------------------------------------------------
/azure-pipelines/inner_eye_env.yml:
--------------------------------------------------------------------------------
1 | steps:
2 | - template: checkout.yml
3 |
4 | - template: store_settings.yml
5 |
6 | - template: prepare_conda.yml
7 |
8 | - bash: echo "##vso[task.setvariable variable=conda_env_dir]/usr/share/miniconda/envs"
9 | displayName: "Set the Conda environment folder (Linux)"
10 | condition: eq(variables['Agent.OS'], 'Linux')
11 |
12 | - bash: echo "##vso[task.setvariable variable=conda_env_dir]C:/Miniconda/envs"
13 | displayName: "Set the Conda environment folder(Windows)"
14 | condition: eq(variables['Agent.OS'], 'Windows_NT')
15 |
16 | - bash: echo $(conda_env_dir)
17 | displayName: 'Printing Conda environment folder'
18 |
19 | # https://docs.microsoft.com/en-us/azure/devops/pipelines/release/caching?view=azure-devops#pythonanaconda
20 | - task: Cache@2
21 | displayName: Use cached Conda environment
22 | inputs:
23 | # Beware of changing the cache key or path independently, safest to change in sync
24 | key: 'conda_env | "$(Agent.OS)" | environment.yml'
25 | cacheHitVar: CONDA_CACHE_RESTORED
26 | path: $(conda_env_dir)
27 |
28 | - bash: conda env create --file environment.yml
29 | displayName: Create Anaconda environment
30 | failOnStderr: false # Conda env create does not have an option to suppress warnings generated in wheel.py
31 | condition: eq(variables.CONDA_CACHE_RESTORED, 'false')
32 |
33 | - bash: |
34 | source activate InnerEye
35 | which python
36 | conda info
37 | pip freeze
38 | failOnStderr: false
39 | displayName: Print package list and Conda info
40 | condition: succeededOrFailed()
41 |
42 | - bash: source activate InnerEye
43 | displayName: Check if InnerEye environment is present
44 |
--------------------------------------------------------------------------------
/InnerEye/ML/visualizers/regression_visualization.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------------------------
2 | # Copyright (c) Microsoft Corporation. All rights reserved.
3 | # Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
4 | # ------------------------------------------------------------------------------------------
5 | from typing import Optional
6 |
7 | import matplotlib.pyplot as plt
8 | import numpy as np
9 |
10 |
11 | def plot_variation_error_prediction(
12 | labels: np.ndarray,
13 | predictions: np.ndarray,
14 | filename: Optional[str] = None) -> None:
15 | """
16 | Plots the absolute prediction errors as well as the predicted values
17 | against the ground truth values.
18 |
19 | :param labels: ground truth labels
20 | :param predictions: model outputs
21 | :param filename: location to save the plot to. If None show the plot instead.
22 | """
23 | fig, ax = plt.subplots(1, 2, figsize=(15, 5))
24 | errors = np.abs(predictions-labels)
25 | ax[0].scatter(labels, errors, marker="x")
26 | ax[0].set_xlabel("Ground truth")
27 | ax[0].set_ylabel("Absolute error")
28 | ax[0].set_title("Error in function of ground truth value")
29 |
30 | ax[1].scatter(labels, predictions, marker="x")
31 | # noinspection PyArgumentList
32 | x = np.linspace(labels.min(), labels.max(), 10)
33 | ax[1].plot(x, x, "--", linewidth=0.5)
34 | ax[1].set_xlabel("Ground truth")
35 | ax[1].set_ylabel("Predicted value")
36 | ax[1].set_title("Predicted value in function of ground truth")
37 |
38 | if filename is None:
39 | plt.show()
40 | else:
41 | plt.savefig(filename, dpi=75)
42 |
--------------------------------------------------------------------------------
/InnerEye/ML/utils/run_recovery.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------------------------
2 | # Copyright (c) Microsoft Corporation. All rights reserved.
3 | # Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
4 | # ------------------------------------------------------------------------------------------
5 | from __future__ import annotations
6 |
7 | import logging
8 | from dataclasses import dataclass
9 | from pathlib import Path
10 | from typing import List
11 |
12 | from InnerEye.Common.common_util import check_properties_are_not_none
13 | from InnerEye.ML.common import get_best_checkpoint_path
14 |
15 |
16 | @dataclass(frozen=True)
17 | class RunRecovery:
18 | """
19 | Class to encapsulate information relating to run recovery (eg: check point paths for parent and child runs)
20 | """
21 | checkpoints_roots: List[Path]
22 |
23 | def get_recovery_checkpoint_paths(self) -> List[Path]:
24 | from InnerEye.ML.utils.checkpoint_handling import get_recovery_checkpoint_path
25 | return [get_recovery_checkpoint_path(x) for x in self.checkpoints_roots]
26 |
27 | def get_best_checkpoint_paths(self) -> List[Path]:
28 | return [get_best_checkpoint_path(x) for x in self.checkpoints_roots]
29 |
30 | def _validate(self) -> None:
31 | check_properties_are_not_none(self)
32 | if len(self.checkpoints_roots) == 0:
33 | raise ValueError("checkpoints_roots must not be empty")
34 |
35 | def __post_init__(self) -> None:
36 | self._validate()
37 | logging.info(f"Storing {len(self.checkpoints_roots)}checkpoints roots:")
38 | for p in self.checkpoints_roots:
39 | logging.info(str(p))
40 |
--------------------------------------------------------------------------------
/Tests/Scripts/test_move_model.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------------------------
2 | # Copyright (c) Microsoft Corporation. All rights reserved.
3 | # Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
4 | # ------------------------------------------------------------------------------------------
5 |
6 | import pytest
7 |
8 | from InnerEye.Azure.azure_config import AzureConfig
9 | from InnerEye.Common import fixed_paths
10 | from InnerEye.Common.output_directories import OutputFolderForTests
11 | from InnerEye.Scripts.move_model import MoveModelConfig, PYTHON_ENVIRONMENT_NAME, move
12 |
13 | MODEL_ID = "PassThroughModel:1"
14 | ENSEMBLE_MODEL_ID = "BasicModel2Epochs:8351"
15 |
16 |
17 | @pytest.mark.parametrize("model_id", [MODEL_ID, ENSEMBLE_MODEL_ID])
18 | def test_download_and_upload(model_id: str, test_output_dirs: OutputFolderForTests) -> None:
19 | """
20 | Test that downloads and uploads a model to a workspace
21 | """
22 | azure_config = AzureConfig.from_yaml(yaml_file_path=fixed_paths.SETTINGS_YAML_FILE,
23 | project_root=fixed_paths.repository_root_directory())
24 | ws = azure_config.get_workspace()
25 | config_download = MoveModelConfig(model_id=model_id, path=str(test_output_dirs.root_dir), action="download")
26 | move(ws, config_download)
27 | assert (test_output_dirs.root_dir / model_id.replace(":", "_")).is_dir()
28 | config_upload = MoveModelConfig(model_id=model_id, path=str(test_output_dirs.root_dir), action="upload")
29 | model = move(ws, config_upload)
30 | assert model is not None
31 | assert PYTHON_ENVIRONMENT_NAME in model.tags
32 | assert model.description != ""
33 |
--------------------------------------------------------------------------------
/create_and_lock_environment.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | os_name=$(uname)
4 | if [[ ! $os_name == *"Linux"* ]]; then
5 | echo "ERROR: cannot run AML environment locking in non-linux environment. Windows users can do this using WSL - https://docs.microsoft.com/en-us/windows/wsl/install"
6 | exit 1
7 | else
8 | echo "Starting AML environment locking..."
9 | fi
10 |
11 | # get environment name from primary dependencies YAML file
12 | name_line="$(cat primary_deps.yml | grep 'name:')"
13 | IFS=':' read -ra name_arr <<< "$name_line"
14 | env_name="${name_arr[1]}"
15 |
16 | # clear old conda envs, create new one
17 | export CONDA_ALWAYS_YES="true"
18 | conda env remove --name ${env_name}
19 | conda env create --file primary_deps.yml
20 |
21 | # export new environment to environment.yml
22 | conda env export -n ${env_name} | grep -v "prefix:" > environment.yml
23 | unset CONDA_ALWAYS_YES
24 |
25 | # remove python version hash (technically not locked, so still potential for problems here if python secondary deps change)
26 | while IFS='' read -r line; do
27 | if [[ $line == *"- python="* ]]; then
28 |
29 | IFS='=' read -ra python_arr <<< "$line"
30 | unset python_arr[-1]
31 | echo "${python_arr[0]}"="${python_arr[1]}"
32 | elif [[ ! $line == "#"* ]]; then
33 | echo "${line}"
34 | fi
35 | done < environment.yml > environment.yml.tmp
36 | echo "# WARNING - DO NOT EDIT THIS FILE MANUALLY" > environment.yml
37 | echo "# Please refer to the environment documentation for instructions on how to create a new version of this file: https://github.com/microsoft/InnerEye-DeepLearning/blob/main/docs/environment.md" >> environment.yml
38 | cat environment.yml.tmp >> environment.yml
39 | rm environment.yml.tmp
40 | cp environment.yml TestSubmodule/environment.yml
41 |
--------------------------------------------------------------------------------
/Tests/ML/test_normalize_and_visualize.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------------------------
2 | # Copyright (c) Microsoft Corporation. All rights reserved.
3 | # Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
4 | # ------------------------------------------------------------------------------------------
5 | from unittest import mock
6 |
7 | from InnerEye.Common import fixed_paths
8 | from InnerEye.ML.normalize_and_visualize_dataset import get_configs
9 | from Tests.ML.configs.DummyModel import DummyModel
10 |
11 |
12 | def test_visualize_commandline1() -> None:
13 | """
14 | Testing for a bug in commandline processing: The model configuration was always overwritten with all the default
15 | values of each field in Config, rather than only the overrides specified on the commandline.
16 | :return:
17 | """
18 | default_config = DummyModel()
19 | old_photonorm = default_config.norm_method
20 | old_random_seed = default_config.get_effective_random_seed()
21 | new_dataset = "new_dataset"
22 | assert default_config.azure_dataset_id != new_dataset
23 | with mock.patch("sys.argv", ["", f"--azure_dataset_id={new_dataset}"]):
24 | updated_config, runner_config, _ = get_configs(default_config, yaml_file_path=fixed_paths.SETTINGS_YAML_FILE)
25 | assert updated_config.azure_dataset_id == new_dataset
26 | # These two values were not specified on the commandline, and should be at their original values.
27 | assert updated_config.norm_method == old_photonorm
28 | assert updated_config.get_effective_random_seed() == old_random_seed
29 | # Credentials and variables should have been picked up from yaml files
30 | assert len(runner_config.azureml_datastore) > 0
31 |
--------------------------------------------------------------------------------
/Tests/Azure/test_recovery_id.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------------------------
2 | # Copyright (c) Microsoft Corporation. All rights reserved.
3 | # Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
4 | # ------------------------------------------------------------------------------------------
5 | import pytest
6 |
7 | from InnerEye.Azure.azure_util import split_recovery_id
8 |
9 |
10 | @pytest.mark.parametrize(["id", "expected1", "expected2"],
11 | [("foo:bar", "foo", "bar"),
12 | ("foo:bar_ab_cd", "foo", "bar_ab_cd"),
13 | ("a_b_c_00_123", "a_b_c", "a_b_c_00_123"),
14 | ("baz_00_123", "baz", "baz_00_123"),
15 | ("foo_bar_abc_123_456", "foo_bar_abc", "foo_bar_abc_123_456"),
16 | # This is the run ID of a hyperdrive parent run. It only has one numeric part at the end
17 | ("foo_bar_123", "foo_bar", "foo_bar_123"),
18 | # This is a hyperdrive child run
19 | ("foo_bar_123_3", "foo_bar", "foo_bar_123_3"),
20 | ])
21 | def test_split_recovery_id(id: str, expected1: str, expected2: str) -> None:
22 | """
23 | Check that run recovery ids are correctly parsed into experiment and run id.
24 | """
25 | assert split_recovery_id(id) == (expected1, expected2)
26 |
27 |
28 | @pytest.mark.parametrize(["id"], [["foo:bar:baz"], ["foo_bar"]])
29 | def test_split_recovery_id_fails(id: str) -> None:
30 | """
31 | Test that invalid run recovery ids raise an error when parsed.
32 | """
33 | with pytest.raises(ValueError):
34 | split_recovery_id(id)
35 |
--------------------------------------------------------------------------------
/Tests/SSL/test_encoders.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------------------------
2 | # Copyright (c) Microsoft Corporation. All rights reserved.
3 | # Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
4 | # ------------------------------------------------------------------------------------------
5 | from pl_bolts.models.self_supervised.resnets import ResNet
6 |
7 | from InnerEye.ML.SSL.encoders import DenseNet121Encoder, SSLEncoder
8 | from InnerEye.ML.SSL.lightning_containers.ssl_container import EncoderName
9 |
10 |
11 | def test_get_encoder_dim_within_encoder_class() -> None:
12 | """
13 | Tests initialization of various SSLEncoder and computation of corresponding output_feature_dim
14 | """
15 | resnet18 = SSLEncoder(EncoderName.resnet18.value)
16 | assert isinstance(resnet18.cnn_model, ResNet)
17 | assert resnet18.get_output_feature_dim() == 512
18 | resnet50 = SSLEncoder(EncoderName.resnet50.value)
19 | assert isinstance(resnet18.cnn_model, ResNet)
20 | assert resnet50.get_output_feature_dim() == 2048
21 | densenet121 = SSLEncoder(EncoderName.densenet121.value)
22 | assert isinstance(densenet121.cnn_model, DenseNet121Encoder)
23 | assert densenet121.get_output_feature_dim() == 1024
24 |
25 |
26 | def test_use7x7conv_flag_in_encoder() -> None:
27 | """
28 | Tests the use_7x7_first_conv_in_resnet flag effect on encoder definition
29 | """
30 | resnet18 = SSLEncoder(EncoderName.resnet18.value, use_7x7_first_conv_in_resnet=True)
31 | assert resnet18.cnn_model.conv1.kernel_size == (7, 7) # type: ignore
32 | resnet18_for_cifar = SSLEncoder(EncoderName.resnet18.value, use_7x7_first_conv_in_resnet=False)
33 | assert resnet18_for_cifar.cnn_model.conv1.kernel_size == (3, 3) # type: ignore
34 |
--------------------------------------------------------------------------------
/conftest.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------------------------
2 | # Copyright (c) Microsoft Corporation. All rights reserved.
3 | # Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
4 | # ------------------------------------------------------------------------------------------
5 | """
6 | Global PyTest configuration -- used to define global fixtures for the entire test suite
7 |
8 | DO NOT RENAME THIS FILE: (https://docs.pytest.org/en/latest/fixture.html#sharing-a-fixture-across-tests-in-a-module
9 | -or-class-session)
10 | """
11 | import uuid
12 | from typing import Generator
13 |
14 | import pytest
15 |
16 | from InnerEye.Common.fixed_paths import add_submodules_to_path
17 | from InnerEye.Common.fixed_paths_for_tests import TEST_OUTPUTS_PATH
18 | from InnerEye.Common.output_directories import OutputFolderForTests, remove_and_create_folder
19 |
20 | # This needs to be right at the start of conftest, so that already test collection has access to all submodules
21 | add_submodules_to_path()
22 |
23 |
24 | @pytest.fixture(autouse=True, scope='session')
25 | def test_suite_setup() -> Generator:
26 | # create a default outputs root for all tests
27 | remove_and_create_folder(TEST_OUTPUTS_PATH)
28 | # run the entire test suite
29 | yield
30 |
31 |
32 | @pytest.fixture
33 | def test_output_dirs() -> Generator:
34 | """
35 | Fixture to automatically create a random directory before executing a test and then
36 | removing this directory after the test has been executed.
37 | """
38 | # create dirs before executing the test
39 | root_dir = TEST_OUTPUTS_PATH / str(uuid.uuid4().hex)
40 | remove_and_create_folder(root_dir)
41 | print(f"Created temporary folder for test: {root_dir}")
42 | # let the test function run
43 | yield OutputFolderForTests(root_dir=root_dir)
44 |
--------------------------------------------------------------------------------
/InnerEye/Common/output_directories.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------------------------
2 | # Copyright (c) Microsoft Corporation. All rights reserved.
3 | # Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
4 | # ------------------------------------------------------------------------------------------
5 | import shutil
6 | from dataclasses import dataclass
7 | from pathlib import Path
8 |
9 | from InnerEye.Common.type_annotations import PathOrString
10 |
11 |
12 | def remove_and_create_folder(folder: PathOrString) -> None:
13 | """
14 | Delete the folder if it exists, and remakes it. This method ignores errors that can come from
15 | an explorer window still being open inside of the test result folder.
16 | """
17 | folder = Path(folder)
18 |
19 | if folder.is_dir():
20 | shutil.rmtree(folder, ignore_errors=True)
21 |
22 | folder.mkdir(exist_ok=True)
23 |
24 |
25 | @dataclass(frozen=True)
26 | class OutputFolderForTests:
27 | """
28 | Data class for the output directories for a given test
29 | """
30 | root_dir: Path
31 |
32 | def create_file_or_folder_path(self, file_or_folder_name: str) -> Path:
33 | """
34 | Creates a full path for the given file or folder name relative to the root directory stored in the present
35 | object.
36 |
37 | :param file_or_folder_name: Name of file or folder to be created under root_dir
38 | """
39 | return self.root_dir / file_or_folder_name
40 |
41 | def make_sub_dir(self, dir_name: str) -> Path:
42 | """
43 | Makes a sub directory under root_dir
44 |
45 | :param dir_name: Name of subdirectory to be created.
46 | """
47 | sub_dir_path = self.create_file_or_folder_path(dir_name)
48 | sub_dir_path.mkdir()
49 | return sub_dir_path
50 |
--------------------------------------------------------------------------------
/InnerEye/ML/configs/ssl/CovidContainers.py:
--------------------------------------------------------------------------------
1 | from typing import Any
2 |
3 | from InnerEye.ML.SSL.lightning_containers.ssl_container import EncoderName, SSLContainer, SSLDatasetName
4 | from InnerEye.ML.SSL.utils import SSLTrainingType
5 | from InnerEye.ML.configs.ssl.CXR_SSL_configs import NIH_AZURE_DATASET_ID, path_encoder_augmentation_cxr, \
6 | path_linear_head_augmentation_cxr
7 |
8 | COVID_DATASET_ID = "id-of-your-dataset"
9 |
10 |
11 | class NIH_COVID_BYOL(SSLContainer):
12 | """
13 | Class to train a SSL model on NIH dataset and monitor embeddings quality on a Covid Dataset.
14 | """
15 |
16 | def __init__(self,
17 | covid_dataset_id: str = COVID_DATASET_ID,
18 | pretraining_dataset_id: str = NIH_AZURE_DATASET_ID,
19 | **kwargs: Any):
20 | super().__init__(ssl_training_dataset_name=SSLDatasetName.NIHCXR,
21 | linear_head_dataset_name=SSLDatasetName.Covid,
22 | random_seed=1,
23 | num_epochs=500,
24 | ssl_training_batch_size=75, # This runs with 16 gpus (4 nodes)
25 | num_workers=12,
26 | ssl_encoder=EncoderName.densenet121,
27 | ssl_training_type=SSLTrainingType.BYOL,
28 | use_balanced_binary_loss_for_linear_head=True,
29 | ssl_augmentation_config=path_encoder_augmentation_cxr,
30 | extra_azure_dataset_ids=[covid_dataset_id],
31 | azure_dataset_id=pretraining_dataset_id,
32 | linear_head_augmentation_config=path_linear_head_augmentation_cxr,
33 | online_evaluator_lr=1e-5,
34 | linear_head_batch_size=64,
35 | pl_find_unused_parameters=True,
36 | **kwargs)
37 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/sequence_data_for_classification/dataset.csv:
--------------------------------------------------------------------------------
1 | subject,VISIT,AGE,CAT0,CAT1,CAT2,NUM1,NUM2,NUM3,NUM4,IMG,Position,Label
2 | 2137.00005,V1,92,Foo,category_1,category_A,362,,,71,img_1,0,
3 | 2137.00005,V2,92,Foo,category_1,category_A,357,,,693,img_1,1,0
4 | 2137.00005,V3,92,Foo,category_1,category_A,355,,,642,,2,0
5 | 2137.00005,V5,92,Foo,category_1,category_A,355,,,63,,3,0
6 | 2137.00005,V6,92,Foo,category_1,category_A,348,,,364,,4,0
7 | 2137.00005,V7,92,Foo,category_1,category_A,316,,,638,,5,0
8 | 2137.00005,V8,92,Foo,category_1,category_A,349,,,6448,,6,0
9 | 2137.00005,V9,92,Foo,category_1,category_A,361,,,674,,7,0
10 | 2137.00005,V10,92,Foo,category_1,category_A,350,,,8,,8,0
11 | 2627.00001,V1,50,Bar,category_1,category_B,477,1,2,38,img_1,0,0
12 | 2627.00001,V2,50,Bar,category_1,category_B,220,1,2,33,img_1,1,0
13 | 2627.00001,VST 3,50,Bar,category_1,category_B,222,0.08,0.28,60,img_1,2,0
14 | 2627.00001,V5,50,Bar,category_1,category_B,215,,,65,img_1,3,0
15 | 2627.00001,V6,50,Bar,category_1,category_B,213,,,68,,4,0
16 | 2627.00001,V7,50,Bar,category_1,category_B,217,,,59,,5,0
17 | 2627.00001,V8,50,Bar,category_1,category_B,210,,,999,,6,0
18 | 2627.00001,VST 9,50,Bar,category_1,category_B,217,0,0.01,153,img_2,7,0
19 | 2627.00001,V10,50,Bar,category_1,category_B,224,,,93,,8,0
20 | 3250.00005,VST 1,50,Bar,category_1,category_B,344,0,291.81,76,img_3,0,0
21 | 3250.00005,VST 2,50,Bar,category_1,category_B,233,0,6.2,99,img_4,1,0
22 | 3250.00005,VST 3,50,Bar,category_1,category_B,212,0,0,84,img_6,2,0
23 | 3250.00005,VST 5,50,Bar,category_1,category_B,215,0,0,84,img_5,3,0
24 | 3250.00005,VST 6,50,Bar,category_1,category_B,215,0,0,82,img_7,4,0
25 | 3250.00005,VST 7,25,Bar,category_1,category_B,233,0,0,84,img_8,5,0
26 | 3250.00005,VST 8,25,Bar,category_1,category_B,218,0,0,84,img_9,6,0
27 | 3250.00005,VST 9,25,Bar,category_1,category_B,221,0,0,84,img_10,7,0
28 | 3250.00005,VST 10,25,Bar,category_1,category_B,238,0,0.02,84,img_11,8,0
29 |
--------------------------------------------------------------------------------
/TestSubmodule/test_submodule_runner.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------------------------
2 | # Copyright (c) Microsoft Corporation. All rights reserved.
3 | # Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
4 | # ------------------------------------------------------------------------------------------
5 |
6 | import sys
7 | from pathlib import Path
8 |
9 |
10 | # This file here mimics how the InnerEye code would be used as a git submodule. The test script will
11 | # copy the InnerEye code to a folder called Submodule. The test will then invoke the present file as a runner,
12 | # and train a model in AzureML.
13 |
14 | repository_root = Path(__file__).absolute().parent.parent
15 |
16 |
17 | def add_package_to_sys_path_if_needed() -> None:
18 | """
19 | Checks if the Python paths in sys.path already contain the /Submodule folder. If not, add it.
20 | """
21 | is_package_in_path = False
22 | innereye_submodule_folder = repository_root / "Submodule"
23 | for path_str in sys.path:
24 | path = Path(path_str)
25 | if path == innereye_submodule_folder:
26 | is_package_in_path = True
27 | break
28 | if not is_package_in_path:
29 | print(f"Adding {innereye_submodule_folder} to sys.path")
30 | sys.path.append(str(innereye_submodule_folder))
31 |
32 |
33 | def main() -> None:
34 | try:
35 | from InnerEye import ML # noqa: 411
36 | except:
37 | add_package_to_sys_path_if_needed()
38 |
39 | from InnerEye.ML import runner
40 | from InnerEye.Common import fixed_paths
41 | print(f"Repository root: {repository_root}")
42 | runner.run(project_root=repository_root,
43 | yaml_config_file=fixed_paths.SETTINGS_YAML_FILE,
44 | post_cross_validation_hook=None)
45 |
46 |
47 | if __name__ == '__main__':
48 | main()
49 |
--------------------------------------------------------------------------------
/InnerEye/ML/SSL/lightning_modules/byol/byol_models.py:
--------------------------------------------------------------------------------
1 | from typing import Any
2 |
3 | from torch import Tensor as T, nn
4 |
5 | from InnerEye.ML.SSL.encoders import SSLEncoder
6 |
7 |
8 | class _MLP(nn.Module):
9 | """
10 | Fully connected layers to map between image embeddings and projection space where pairs of images are compared.
11 | """
12 |
13 | def __init__(self, input_dim: int, hidden_dim: int, output_dim: int) -> None:
14 | """
15 | :param input_dim: Input embedding feature size
16 | :param hidden_dim: Hidden layer size in MLP
17 | :param output_dim: Output projection size
18 | """
19 | super().__init__()
20 | self.output_dim = output_dim
21 | self.input_dim = input_dim
22 | self.model = nn.Sequential(
23 | nn.Linear(input_dim, hidden_dim, bias=False),
24 | nn.BatchNorm1d(hidden_dim),
25 | nn.ReLU(inplace=True),
26 | nn.Linear(hidden_dim, output_dim, bias=True))
27 |
28 | def forward(self, x: T) -> T:
29 | x = self.model(x)
30 | return x
31 |
32 |
33 | class SiameseArm(nn.Module):
34 | """
35 | Implements the image encoder (f), projection (g) and predictor (q) modules used in BYOL.
36 | """
37 |
38 | def __init__(self, *encoder_kwargs: Any) -> None:
39 | super().__init__()
40 |
41 | self.encoder = SSLEncoder(*encoder_kwargs) # Encoder
42 | self.projector = _MLP(input_dim=self.encoder.get_output_feature_dim(), hidden_dim=2048, output_dim=128)
43 | self.predictor = _MLP(input_dim=self.projector.output_dim, hidden_dim=128, output_dim=128)
44 |
45 | def forward(self, x: T) -> T:
46 | y = self.encoder(x)
47 | z = self.projector(y)
48 | h = self.predictor(z)
49 | return h
50 |
51 | def forward_until_predictor(self, x: T) -> T:
52 | y = self.encoder(x)
53 | return self.projector(y)
54 |
--------------------------------------------------------------------------------
/Tests/ML/test_data/dataset.csv:
--------------------------------------------------------------------------------
1 | subject,filePath,channel,institutionId
2 | 1,train_and_test_data/id1_channel1.nii.gz,channel1,1
3 | 1,train_and_test_data/id1_channel1.nii.gz,channel2,1
4 | 1,train_and_test_data/id1_mask.nii.gz,mask,1
5 | 1,train_and_test_data/id1_region.nii.gz,region,1
6 | 1,train_and_test_data/id1_region.nii.gz,region_1,1
7 | 2,train_and_test_data/id2_channel1.nii.gz,channel1,2
8 | 2,train_and_test_data/id2_channel1.nii.gz,channel2,2
9 | 2,train_and_test_data/id2_mask.nii.gz,mask,2
10 | 2,train_and_test_data/id2_region.nii.gz,region,2
11 | 2,train_and_test_data/id2_region.nii.gz,region_1,2
12 | 3,train_and_test_data/id2_channel1.nii.gz,channel1,3
13 | 3,train_and_test_data/id2_channel1.nii.gz,channel2,3
14 | 3,train_and_test_data/id2_mask.nii.gz,mask,3
15 | 3,train_and_test_data/id2_region.nii.gz,region,3
16 | 3,train_and_test_data/id2_region.nii.gz,region_1,3
17 | 4,train_and_test_data/id2_channel1.nii.gz,channel1,3
18 | 4,train_and_test_data/id2_channel1.nii.gz,channel2,3
19 | 4,train_and_test_data/id2_mask.nii.gz,mask,3
20 | 4,train_and_test_data/id2_region.nii.gz,region,3
21 | 4,train_and_test_data/id2_region.nii.gz,region_1,3
22 | 5,train_and_test_data/id2_channel1.nii.gz,channel1,3
23 | 5,train_and_test_data/id2_channel1.nii.gz,channel2,3
24 | 5,train_and_test_data/id2_mask.nii.gz,mask,3
25 | 5,train_and_test_data/id2_region.nii.gz,region,3
26 | 5,train_and_test_data/id2_region.nii.gz,region_1,3
27 | 6,train_and_test_data/id2_channel1.nii.gz,channel1,3
28 | 6,train_and_test_data/id2_channel1.nii.gz,channel2,3
29 | 6,train_and_test_data/id2_mask.nii.gz,mask,3
30 | 6,train_and_test_data/id2_region.nii.gz,region,3
31 | 6,train_and_test_data/id2_region.nii.gz,region_1,3
32 | 7,train_and_test_data/id2_channel1.nii.gz,channel1,4
33 | 7,train_and_test_data/id2_channel1.nii.gz,channel2,4
34 | 7,train_and_test_data/id2_mask.nii.gz,mask,4
35 | 7,train_and_test_data/id2_region.nii.gz,region,4
36 | 7,train_and_test_data/id2_region.nii.gz,region_1,4
37 |
--------------------------------------------------------------------------------
/.idea/inspectionProfiles/Project_Default.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
11 |
12 |
13 |
21 |
22 |
23 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
--------------------------------------------------------------------------------
/InnerEye/Azure/parser_util.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------------------------
2 | # Copyright (c) Microsoft Corporation. All rights reserved.
3 | # Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
4 | # ------------------------------------------------------------------------------------------
5 | from enum import Enum
6 | from typing import Any
7 |
8 |
9 | def _is_empty(item: Any) -> bool:
10 | """
11 | Returns True if the argument has length 0.
12 |
13 | :param item: Object to check.
14 | :return: True if the argument has length 0. False otherwise.
15 | """
16 | return hasattr(item, '__len__') and len(item) == 0
17 |
18 |
19 | def _is_empty_or_empty_string_list(item: Any) -> bool:
20 | """
21 | Returns True if the argument has length 0, or a list with a single element that has length 0.
22 |
23 | :param item: Object to check.
24 | :return: True if argument has length 0, or a list with a single element that has length 0. False otherwise.
25 | """
26 | if _is_empty(item):
27 | return True
28 | if hasattr(item, '__len__') and len(item) == 1 and _is_empty(item[0]):
29 | return True
30 | return False
31 |
32 |
33 | def value_to_string(x: object) -> str:
34 | """
35 | Returns a string representation of x, with special treatment of Enums (return their value)
36 | and lists (return comma-separated list).
37 |
38 | :param x: Object to convert to string
39 | :return: The string representation of the object.
40 | Special cases: For Enums, returns their value, for lists, returns a comma-separated list.
41 | """
42 | if isinstance(x, str):
43 | return x
44 | if isinstance(x, Enum):
45 | # noinspection PyUnresolvedReferences
46 | return x.value
47 | if isinstance(x, list):
48 | return ",".join(value_to_string(item) for item in x)
49 | return str(x)
50 |
--------------------------------------------------------------------------------
/Tests/Scripts/test_run_scoring.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------------------------
2 | # Copyright (c) Microsoft Corporation. All rights reserved.
3 | # Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
4 | # ------------------------------------------------------------------------------------------
5 | from pathlib import Path
6 | from unittest import mock
7 |
8 | import pytest
9 |
10 | from InnerEye.Scripts import download_model_and_run_scoring
11 |
12 |
13 | def get_test_script() -> Path:
14 | """
15 | Returns the full path to a testing script that lives inside of the test suite.
16 | :return:
17 | """
18 | current = Path(__file__).parent
19 | script = current / "script_for_tests.py"
20 | assert script.is_file(), f"File {script} not found"
21 | return script
22 |
23 |
24 | @pytest.mark.parametrize(["script_arg", "expect_failure"],
25 | [("correct", False),
26 | ("failing", True)])
27 | def test_run_scoring(script_arg: str, expect_failure: bool) -> None:
28 | """
29 | Test if we can invoke a script via the scoring pipeline. Passing invalid arguments should make cause failure.
30 | """
31 | scoring_script = Path(__file__).parent / "script_for_tests.py"
32 | assert scoring_script.is_file(), f"The script to invoke does not exist: {scoring_script}"
33 | scoring_folder = str(scoring_script.parent)
34 | # Invoke the script, and pass in a single string as the argument. Based on that, the script will either fail
35 | # or succeed.
36 | args = ["--model-folder", str(scoring_folder), scoring_script.name, script_arg]
37 | with mock.patch("sys.argv", [""] + args):
38 | with pytest.raises(SystemExit) as ex:
39 | download_model_and_run_scoring.run()
40 | expected_exit_code = 1 if expect_failure else 0
41 | assert ex.value.args[0] == expected_exit_code
42 |
--------------------------------------------------------------------------------
/InnerEye/Scripts/check_annotation_quality.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------------------------
2 | # Copyright (c) Microsoft Corporation. All rights reserved.
3 | # Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
4 | # ------------------------------------------------------------------------------------------
5 | import argparse
6 | from pathlib import Path
7 | from typing import Any
8 |
9 | import pandas as pd
10 |
11 | from InnerEye.ML.common import DATASET_CSV_FILE_NAME
12 | from InnerEye.ML.utils.dataset_util import add_label_stats_to_dataframe
13 |
14 |
15 | def main(args: Any) -> None:
16 | dataframe_path = args.data_root_dir / Path(DATASET_CSV_FILE_NAME)
17 | dataframe = add_label_stats_to_dataframe(input_dataframe=pd.read_csv(str(dataframe_path)),
18 | dataset_root_directory=args.data_root_dir,
19 | target_label_names=args.target_label_names)
20 |
21 | # Write the dataframe in a file and exit
22 | dataframe.to_csv(args.output_csv_path)
23 |
24 |
25 | if __name__ == "__main__":
26 | parser = argparse.ArgumentParser()
27 | # noinspection PyTypeChecker
28 | parser.add_argument("--data-root-dir", type=Path, help="Path to data root directory")
29 | parser.add_argument("--target-label-names", nargs='+', type=str, help="Names of target structures e.g. prostate")
30 | # noinspection PyTypeChecker
31 | parser.add_argument("--output-csv-path", type=Path, help="Path to output csv file")
32 | args = parser.parse_args()
33 |
34 | # Sample run
35 | # python check_annotation_quality.py
36 | # --data-root-dir "/path/to/your/data/directory"
37 | # --target-label-names femur_r femur_l rectum prostate bladder seminalvesicles
38 | # --output-csv-path "/path/to/your/data/directory/label_stats.csv"
39 |
40 | # Dataset blob folder
41 | main(args)
42 |
--------------------------------------------------------------------------------
/InnerEye/ML/models/losses/mixture.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------------------------
2 | # Copyright (c) Microsoft Corporation. All rights reserved.
3 | # Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
4 | # ------------------------------------------------------------------------------------------
5 | from typing import Any, List, Tuple
6 |
7 | import torch
8 |
9 | from InnerEye.ML.utils.supervised_criterion import SupervisedLearningCriterion
10 |
11 |
12 | class MixtureLoss(SupervisedLearningCriterion):
13 |
14 | def __init__(self, components: List[Tuple[float, SupervisedLearningCriterion]]):
15 | """
16 | Loss function defined as a weighted mixture (interpolation) of other loss functions.
17 |
18 | :param components: a non-empty list of weights and loss function instances.
19 | """
20 | super().__init__()
21 | if not components:
22 | raise ValueError("At least one (weight, loss_function) pair must be supplied.")
23 | self.components = components
24 |
25 | def forward_minibatch(self, output: torch.Tensor, target: torch.Tensor, **kwargs: Any) -> torch.Tensor:
26 | """
27 | Wrapper for mixture loss function implemented in PyTorch. Arguments should be suitable for the
28 | component loss functions, typically:
29 |
30 | :param output: Class logits (unnormalised), e.g. in 3D : BxCxWxHxD or in 1D BxC
31 | :param target: Target labels encoded in one-hot representation, e.g. in 3D BxCxWxHxD or in 1D BxC
32 | """
33 | result = None
34 | for (weight, loss_function) in self.components:
35 | loss = weight * loss_function(output, target, **kwargs)
36 | if result is None:
37 | result = loss
38 | else:
39 | result = result + loss
40 | assert result is not None
41 | torch.cuda.empty_cache()
42 | return result
43 |
--------------------------------------------------------------------------------
/Tests/ML/pipelines/test_ensemble.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------------------------
2 | # Copyright (c) Microsoft Corporation. All rights reserved.
3 | # Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
4 | # ------------------------------------------------------------------------------------------
5 | import numpy as np
6 | import torch
7 |
8 | from InnerEye.ML.config import EnsembleAggregationType
9 | from InnerEye.ML.pipelines.ensemble import EnsemblePipeline
10 | from InnerEye.ML.pipelines.inference import InferencePipeline
11 | from InnerEye.ML.utils.image_util import posteriors_to_segmentation
12 |
13 |
14 | def test_aggregate_results() -> None:
15 | """
16 | Test to make sure inference results are aggregated as expected
17 | """
18 | torch.manual_seed(1)
19 | num_models = 3
20 | # set expected posteriors
21 | model_results = []
22 | # create results for each model
23 | for x in range(num_models):
24 | posteriors = torch.nn.functional.softmax(torch.rand(3, 3, 3, 3), dim=0).numpy()
25 | model_results.append(InferencePipeline.Result(
26 | patient_id=0,
27 | posteriors=posteriors,
28 | segmentation=posteriors_to_segmentation(posteriors),
29 | voxel_spacing_mm=(1, 1, 1)
30 | ))
31 |
32 | # We calculate expected_posteriors before aggregating, as aggregation modifies model_results.
33 | expected_posteriors = np.mean([x.posteriors for x in model_results], axis=0)
34 | ensemble_result = EnsemblePipeline.aggregate_results(model_results,
35 | aggregation_type=EnsembleAggregationType.Average)
36 | assert ensemble_result.patient_id == model_results[0].patient_id
37 |
38 | assert np.array_equal(ensemble_result.posteriors, expected_posteriors)
39 | assert np.array_equal(ensemble_result.segmentation, posteriors_to_segmentation(expected_posteriors))
40 |
--------------------------------------------------------------------------------
/Tests/ML/models/losses/test_mixture_loss.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------------------------
2 | # Copyright (c) Microsoft Corporation. All rights reserved.
3 | # Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
4 | # ------------------------------------------------------------------------------------------
5 | import torch
6 |
7 | from InnerEye.ML.config import SegmentationLoss
8 | from InnerEye.ML.models.losses.mixture import MixtureLoss
9 | from InnerEye.ML.utils.model_util import create_segmentation_loss_component
10 | from Tests.ML.configs.DummyModel import DummyModel
11 |
12 |
13 | def test_single_element() -> None:
14 | config = DummyModel()
15 | element = create_segmentation_loss_component(config, SegmentationLoss.CrossEntropy, power=None)
16 | mixture = MixtureLoss([(1.0, element)])
17 | target = torch.tensor([[[0, 0, 1], [1, 1, 0]]], dtype=torch.float32)
18 | logits = torch.tensor([[[-1e9, -1e9, 0], [0, 0, 0]]], dtype=torch.float32)
19 | # Extract class indices
20 | element_loss = element(logits, target)
21 | mixture_loss = mixture(logits, target)
22 | assert torch.isclose(element_loss, mixture_loss)
23 |
24 |
25 | def test_two_elements() -> None:
26 | config = DummyModel()
27 | element1 = create_segmentation_loss_component(config, SegmentationLoss.CrossEntropy, power=None)
28 | element2 = create_segmentation_loss_component(config, SegmentationLoss.SoftDice, power=None)
29 | weight1, weight2 = 0.3, 0.7
30 | mixture = MixtureLoss([(weight1, element1), (weight2, element2)])
31 | target = torch.tensor([[[0, 0, 1], [1, 1, 0]]], dtype=torch.float32)
32 | logits = torch.tensor([[[-1e9, -1e9, 0], [0, 0, 0]]], dtype=torch.float32)
33 | # Extract class indices
34 | element1_loss = element1(logits, target)
35 | element2_loss = element2(logits, target)
36 | mixture_loss = mixture(logits, target)
37 | assert torch.isclose(weight1 * element1_loss + weight2 * element2_loss, mixture_loss)
38 |
--------------------------------------------------------------------------------
/InnerEye/Common/spawn_subprocess.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------------------------
2 | # Copyright (c) Microsoft Corporation. All rights reserved.
3 | # Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
4 | # ------------------------------------------------------------------------------------------
5 | import os
6 | import subprocess
7 | from typing import Dict, List, Optional, Tuple
8 |
9 |
10 | def spawn_and_monitor_subprocess(process: str,
11 | args: List[str],
12 | env: Optional[Dict[str, str]] = None) -> \
13 | Tuple[int, List[str]]:
14 | """
15 | Helper function to start a subprocess, passing in a given set of arguments, and monitor it.
16 | Returns the subprocess exit code and the list of lines written to stdout.
17 |
18 | :param process: The name and path of the executable to spawn.
19 | :param args: The args to the process.
20 | :param env: The environment variables that the new process will run with. If not provided, copy the
21 | environment from the current process.
22 | :return: Return code after the process has finished, and the list of lines that were written to stdout by the
23 | subprocess.
24 | """
25 | if env is None:
26 | env = dict(os.environ.items())
27 | p = subprocess.Popen(
28 | args=[process] + args,
29 | shell=False,
30 | stdout=subprocess.PIPE,
31 | stderr=subprocess.STDOUT,
32 | env=env,
33 | universal_newlines=True
34 | )
35 |
36 | # For mypy. We have set stdout in the arg list of Popen, so should be readable.
37 | assert p.stdout
38 |
39 | # Read and print all the lines that are printed by the subprocess
40 | stdout_lines = []
41 | for line in iter(p.stdout.readline, ""):
42 | line = line.strip()
43 | stdout_lines.append(line)
44 | print(line)
45 | p.stdout.close()
46 | return_code = p.wait()
47 | return return_code, stdout_lines
48 |
--------------------------------------------------------------------------------
/Tests/ML/utils/test_transforms.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------------------------
2 | # Copyright (c) Microsoft Corporation. All rights reserved.
3 | # Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
4 | # ------------------------------------------------------------------------------------------
5 | import pytest
6 | import torch
7 |
8 | from InnerEye.Common import common_util
9 | from InnerEye.Common.common_util import is_gpu_tensor
10 | from InnerEye.ML.utils.transforms import Compose3D, Transform3D
11 | from Tests.ML.util import no_gpu_available
12 |
13 |
14 | @pytest.mark.skipif(common_util.is_windows(), reason="Has issues on windows build")
15 | @pytest.mark.gpu
16 | @pytest.mark.skipif(no_gpu_available, reason="Testing Transforms with GPU tesors requires a GPU")
17 | def test_transform_compose_gpu() -> None:
18 | test_transform_compose(use_gpu=True)
19 |
20 |
21 | def test_transform_compose(use_gpu: bool = False) -> None:
22 | class Identity(Transform3D[torch.Tensor]):
23 | def __call__(self, sample: torch.Tensor) -> torch.Tensor:
24 | return self.get_gpu_tensor_if_possible(sample)
25 |
26 | class Square(Transform3D[torch.Tensor]):
27 | def __call__(self, sample: torch.Tensor) -> torch.Tensor:
28 | return self.get_gpu_tensor_if_possible(sample) ** 2
29 |
30 | a = torch.randint(low=2, high=4, size=[1])
31 | if use_gpu:
32 | a = a.cuda()
33 |
34 | # test that composition of multiple identity operations holds
35 | identity_compose = Compose3D([Identity(use_gpu=use_gpu)] * 3)
36 | a_t = identity_compose(a)
37 | assert torch.equal(Compose3D.apply(identity_compose, a), a_t)
38 | assert torch.equal(a_t, a)
39 | assert is_gpu_tensor(a_t) == use_gpu
40 |
41 | # test that composition of multiple square operations holds
42 | square_compose = Compose3D([Square(use_gpu=use_gpu)] * 3)
43 | assert torch.equal(square_compose(a), a ** 8)
44 | assert torch.equal(Compose3D.apply(square_compose, a), a ** 8)
45 |
--------------------------------------------------------------------------------
/Tests/Azure/test_download_pytest.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------------------------
2 | # Copyright (c) Microsoft Corporation. All rights reserved.
3 | # Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
4 | # ------------------------------------------------------------------------------------------
5 | from pathlib import Path
6 | from typing import Optional
7 |
8 | import pytest
9 | from azureml.core import Experiment
10 |
11 | from InnerEye.Azure.azure_util import to_azure_friendly_string
12 | from InnerEye.Azure.run_pytest import download_pytest_result
13 | from InnerEye.Common.output_directories import OutputFolderForTests
14 | from Tests.ML.util import get_default_azure_config
15 |
16 |
17 | def test_download_pytest_file(test_output_dirs: OutputFolderForTests) -> None:
18 | output_dir = test_output_dirs.root_dir
19 | azure_config = get_default_azure_config()
20 | workspace = azure_config.get_workspace()
21 |
22 | def get_run_and_download_pytest(branch: str, number: int) -> Optional[Path]:
23 | experiment = Experiment(workspace, name=to_azure_friendly_string(branch))
24 | runs = [run for run in experiment.get_runs() if run.number == number]
25 | if len(runs) != 1:
26 | raise ValueError(f"Expected to get exactly 1 run in experiment {experiment.name}")
27 | return download_pytest_result(runs[0], output_dir)
28 |
29 | # PR 49 is a recent successful build that generated a pytest file.
30 | # Run 6 in that experiment was canceled did not yet write the pytest file:
31 | with pytest.raises(ValueError) as ex:
32 | get_run_and_download_pytest("refs/pull/219/merge", 6)
33 | assert "No pytest result file" in str(ex)
34 | downloaded = get_run_and_download_pytest("refs/pull/219/merge", 7)
35 | assert downloaded is not None
36 | assert downloaded.exists()
37 | # Delete the file - it should be cleaned up with the test output directories though.
38 | # If the file remained, it would be uploaded as a test result file to Azure DevOps
39 | downloaded.unlink()
40 |
--------------------------------------------------------------------------------
/.github/workflows/codeql-analysis.yml:
--------------------------------------------------------------------------------
1 | name: "CodeQL"
2 |
3 | on:
4 | push:
5 | branches: [ main, recon ]
6 | pull_request:
7 | # The branches below must be a subset of the branches above
8 | branches: [ main ]
9 | schedule:
10 | - cron: '45 4 * * 1'
11 |
12 | jobs:
13 | codeql_analyze:
14 | name: CodeQL Analyze
15 | runs-on: ubuntu-latest
16 |
17 | strategy:
18 | fail-fast: false
19 | matrix:
20 | language: [ 'python' ]
21 | # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ]
22 | # Learn more:
23 | # https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed
24 |
25 | steps:
26 | - name: Checkout repository
27 | uses: actions/checkout@v2
28 |
29 | # Initializes the CodeQL tools for scanning.
30 | - name: Initialize CodeQL
31 | uses: github/codeql-action/init@v1
32 | with:
33 | languages: ${{ matrix.language }}
34 | # If you wish to specify custom queries, you can do so here or in a config file.
35 | # By default, queries listed here will override any specified in a config file.
36 | # Prefix the list here with "+" to use these queries and those in the config file.
37 | # queries: ./path/to/local/query, your-org/your-repo/queries@main
38 |
39 | # Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
40 | # If this step fails, then you should remove it and run the build manually (see below)
41 | - name: Autobuild
42 | uses: github/codeql-action/autobuild@v1
43 |
44 | # ℹ️ Command-line programs to run using the OS shell.
45 | # 📚 https://git.io/JvXDl
46 |
47 | # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
48 | # and modify them (or add more) to build your code if your project
49 | # uses a compiled language
50 |
51 | #- run: |
52 | # make bootstrap
53 | # make release
54 |
55 | - name: Perform CodeQL Analysis
56 | uses: github/codeql-action/analyze@v1
57 |
--------------------------------------------------------------------------------
/Tests/ML/utils/test_layer_utils.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------------------------
2 | # Copyright (c) Microsoft Corporation. All rights reserved.
3 | # Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
4 | # ------------------------------------------------------------------------------------------
5 | import pytest
6 |
7 | from InnerEye.ML.config import PaddingMode
8 | from InnerEye.ML.utils.layer_util import get_padding_from_kernel_size
9 |
10 |
11 | @pytest.mark.parametrize("padding_mode", [PaddingMode.NoPadding, PaddingMode.Zero, PaddingMode.Edge])
12 | def test_get_padding_from_kernel_size(padding_mode: PaddingMode) -> None:
13 | def check_padding(kernel_size, dilation, num_dimensions, expected) -> None: # type: ignore
14 | actual = get_padding_from_kernel_size(padding_mode, kernel_size, dilation, num_dimensions)
15 | if padding_mode == PaddingMode.NoPadding:
16 | assert actual == tuple(0 for _ in expected), "No padding should return all zeros."
17 | else:
18 | assert actual == expected
19 |
20 | # Scalar values for kernel size and dilation: Should expand to the given number of dimensions
21 | check_padding(kernel_size=1, dilation=1, num_dimensions=3, expected=(0, 0, 0))
22 | check_padding(kernel_size=3, dilation=1, num_dimensions=3, expected=(1, 1, 1))
23 | # If either kernel_size or dilation are sized, the number of dimensions should be ignored,
24 | # and number of dimensions should come from whatever argument has size
25 | check_padding(kernel_size=(3, 3), dilation=1, num_dimensions=10, expected=(1, 1))
26 | check_padding(kernel_size=3, dilation=(1, 1), num_dimensions=10, expected=(1, 1))
27 | # Non-isotropic kernels
28 | check_padding(kernel_size=(3, 3, 1), dilation=1, num_dimensions=10, expected=(1, 1, 0))
29 | # With dilation: Dimension where the kernel size is 1 should not be padded, because
30 | # no reduction in size is happening along that dimension (see test_degenerate_conv_with_dilation)
31 | check_padding(kernel_size=(3, 3, 1), dilation=5, num_dimensions=3, expected=(5, 5, 0))
32 |
--------------------------------------------------------------------------------