├── DeepCSR └── deepcsr │ └── 1.0 │ ├── src │ ├── __init__.py │ ├── metrics.py │ └── utils.py │ ├── CBSI.tar.gz │ ├── weights │ └── best_model.pth │ ├── spec.yaml │ ├── Dockerfile │ ├── requirements.yml │ └── model_card.yaml ├── .datalad ├── config └── .gitattributes ├── .github ├── workflows │ ├── package.json │ ├── getFileExtension.py │ ├── assignIssue.yml │ ├── change_label.yml │ ├── getModelInfo.py │ ├── getPythonScripts.py │ ├── add_model.yml │ ├── update_yaml_info.py │ ├── package-lock.json │ ├── get_model_data.yml │ └── get_model_data.js ├── issue-branch.yml └── ISSUE_TEMPLATE │ ├── config.yml │ └── addModel.yml ├── lcn └── parcnet │ └── 1.0.0 │ ├── requirements.txt │ ├── weights │ └── dktatlas_identity_0.000_0.000_unet2d_320_0.050_60_pos_20_1.0.0.ckpt │ ├── Dockerfile │ ├── model_card.yaml │ ├── spec.yaml │ ├── predict.py │ └── parc.py ├── DDIG ├── SynthStrip │ └── 1.0.0 │ │ ├── requirements.txt │ │ ├── weights │ │ └── synthstrip.1.pt │ │ ├── Dockerfile │ │ ├── spec.yaml │ │ └── model_card.yaml ├── VoxelMorph │ └── 1.0.0 │ │ ├── weights │ │ └── vxm_dense_brain_T1_3D_mse.h5 │ │ ├── requirements.txt │ │ ├── Dockerfile │ │ ├── spec.yaml │ │ ├── model_card.yaml │ │ └── register.py └── SynthMorph │ └── 1.0.0 │ ├── brains │ ├── weights │ │ └── brains-dice-vel-0.5-res-16-256f.h5 │ ├── requirements.txt │ ├── Dockerfile │ ├── spec.yaml │ ├── model_card.yaml │ └── register.py │ └── shapes │ ├── weights │ └── shapes-dice-vel-3-res-8-16-32-256f.h5 │ ├── requirements.txt │ ├── Dockerfile │ ├── spec.yaml │ ├── model_card.yaml │ └── register.py ├── images ├── brain-extraction │ ├── unet-best-prediction.png │ └── unet-worst-prediction.png └── brain-generation │ ├── progressivegan_generation_axial.png │ ├── progressivegan_generation_coronal.png │ └── progressivegan_generation_sagittal.png ├── UCL ├── SynthSeg │ └── 1.0.0 │ │ ├── weights │ │ └── SynthSeg.h5 │ │ ├── Dockerfile │ │ ├── requirements.txt │ │ ├── spec.yaml │ │ ├── model_card.yaml │ │ └── predict.py └── SynthSR │ └── 1.0.0 │ ├── general │ ├── weights │ │ └── SynthSR_v10_210712.h5 │ ├── Dockerfile │ ├── requirements.txt │ ├── model_card.yaml │ ├── spec.yaml │ └── predict.py │ └── hyperfine │ ├── weights │ └── SynthSR_v10_210712_hyperfine.h5 │ ├── Dockerfile │ ├── requirements.txt │ ├── spec.yaml │ └── model_card.yaml ├── neuronets ├── kwyk │ └── 0.4.1 │ │ ├── bwn │ │ ├── weights │ │ │ ├── saved_model.pb │ │ │ └── variables │ │ │ │ ├── variables.index │ │ │ │ └── variables.data-00000-of-00001 │ │ ├── Dockerfile │ │ ├── spec.yaml │ │ └── model_card.yaml │ │ ├── bwn_multi │ │ ├── weights │ │ │ ├── saved_model.pb │ │ │ └── variables │ │ │ │ ├── variables.index │ │ │ │ └── variables.data-00000-of-00001 │ │ ├── Dockerfile │ │ ├── spec.yaml │ │ └── model_card.yaml │ │ └── bvwn_multi_prior │ │ ├── weights │ │ ├── saved_model.pb │ │ └── variables │ │ │ ├── variables.index │ │ │ └── variables.data-00000-of-00001 │ │ ├── Dockerfile │ │ ├── spec.yaml │ │ └── model_card.yaml ├── braingen │ └── 0.1.0 │ │ ├── generator_res_16 │ │ ├── weights │ │ │ ├── saved_model.pb │ │ │ └── variables │ │ │ │ ├── variables.index │ │ │ │ └── variables.data-00000-of-00001 │ │ ├── Dockerfile │ │ ├── model_card.yaml │ │ ├── spec.yaml │ │ └── generate.py │ │ ├── generator_res_32 │ │ ├── weights │ │ │ ├── saved_model.pb │ │ │ └── variables │ │ │ │ ├── variables.index │ │ │ │ └── variables.data-00000-of-00001 │ │ ├── Dockerfile │ │ ├── model_card.yaml │ │ ├── spec.yaml │ │ └── generate.py │ │ ├── generator_res_64 │ │ ├── weights │ │ │ ├── saved_model.pb │ │ │ └── variables │ │ │ │ ├── variables.index │ │ │ │ └── variables.data-00000-of-00001 │ │ ├── Dockerfile │ │ ├── model_card.yaml │ │ ├── spec.yaml │ │ └── generate.py │ │ ├── generator_res_8 │ │ ├── weights │ │ │ ├── saved_model.pb │ │ │ └── variables │ │ │ │ ├── variables.index │ │ │ │ └── variables.data-00000-of-00001 │ │ ├── Dockerfile │ │ ├── model_card.yaml │ │ ├── spec.yaml │ │ └── generate.py │ │ ├── generator_res_128 │ │ ├── weights │ │ │ ├── saved_model.pb │ │ │ └── variables │ │ │ │ ├── variables.index │ │ │ │ └── variables.data-00000-of-00001 │ │ ├── Dockerfile │ │ ├── model_card.yaml │ │ ├── spec.yaml │ │ └── generate.py │ │ └── generator_res_256 │ │ ├── weights │ │ ├── saved_model.pb │ │ └── variables │ │ │ ├── variables.index │ │ │ └── variables.data-00000-of-00001 │ │ ├── Dockerfile │ │ ├── model_card.yaml │ │ ├── spec.yaml │ │ └── generate.py ├── ams │ └── 0.1.0 │ │ ├── weights │ │ └── meningioma_T1wc_128iso_v1.h5 │ │ ├── Dockerfile │ │ ├── model_card.yaml │ │ └── spec.yaml └── brainy │ └── 0.1.0 │ ├── weights │ └── brain-extraction-unet-128iso-model.h5 │ ├── Dockerfile │ ├── model_card.yaml │ └── spec.yaml ├── CHANGELOG.MD ├── .gitattributes ├── docs ├── model_card.yaml └── spec.yaml ├── .gitignore ├── README.md ├── add_model_instructions.md └── Schema └── model_card-schema.yaml /DeepCSR/deepcsr/1.0/src/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.datalad/config: -------------------------------------------------------------------------------- 1 | [datalad "dataset"] 2 | id = cde07100-2efb-4826-9401-d811de6fd6b9 3 | -------------------------------------------------------------------------------- /.github/workflows/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "dependencies": { 3 | "js-yaml": "^4.1.0" 4 | } 5 | } -------------------------------------------------------------------------------- /lcn/parcnet/1.0.0/requirements.txt: -------------------------------------------------------------------------------- 1 | pytorch >= 1.9.0 2 | torchvision >= 0.10.1 3 | nibabel >= 3.2.2 4 | -------------------------------------------------------------------------------- /DDIG/SynthStrip/1.0.0/requirements.txt: -------------------------------------------------------------------------------- 1 | # pytorch==1.10.2 2 | scipy==1.8.1 3 | surfa==0.2.0 4 | PyYAML 5 | -------------------------------------------------------------------------------- /DeepCSR/deepcsr/1.0/CBSI.tar.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuronets/trained-models/HEAD/DeepCSR/deepcsr/1.0/CBSI.tar.gz -------------------------------------------------------------------------------- /DeepCSR/deepcsr/1.0/weights/best_model.pth: -------------------------------------------------------------------------------- 1 | /annex/objects/URL--https&c%%drive.google.com%file%d-8c029761c103d42076917e4b13d91754 2 | -------------------------------------------------------------------------------- /images/brain-extraction/unet-best-prediction.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuronets/trained-models/HEAD/images/brain-extraction/unet-best-prediction.png -------------------------------------------------------------------------------- /images/brain-extraction/unet-worst-prediction.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuronets/trained-models/HEAD/images/brain-extraction/unet-worst-prediction.png -------------------------------------------------------------------------------- /.datalad/.gitattributes: -------------------------------------------------------------------------------- 1 | 2 | config annex.largefiles=nothing 3 | metadata/aggregate* annex.largefiles=nothing 4 | metadata/objects/** annex.largefiles=(anything) -------------------------------------------------------------------------------- /UCL/SynthSeg/1.0.0/weights/SynthSeg.h5: -------------------------------------------------------------------------------- 1 | ../../../../.git/annex/objects/WQ/Z8/MD5E-s53079056--5a563a6f527f2f2d534424885f65da33.h5/MD5E-s53079056--5a563a6f527f2f2d534424885f65da33.h5 -------------------------------------------------------------------------------- /images/brain-generation/progressivegan_generation_axial.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuronets/trained-models/HEAD/images/brain-generation/progressivegan_generation_axial.png -------------------------------------------------------------------------------- /images/brain-generation/progressivegan_generation_coronal.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuronets/trained-models/HEAD/images/brain-generation/progressivegan_generation_coronal.png -------------------------------------------------------------------------------- /images/brain-generation/progressivegan_generation_sagittal.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuronets/trained-models/HEAD/images/brain-generation/progressivegan_generation_sagittal.png -------------------------------------------------------------------------------- /neuronets/kwyk/0.4.1/bwn/weights/saved_model.pb: -------------------------------------------------------------------------------- 1 | ../../../../../.git/annex/objects/Kk/j6/MD5E-s186828--47acbe875596b17390605bd04ffdf733.pb/MD5E-s186828--47acbe875596b17390605bd04ffdf733.pb -------------------------------------------------------------------------------- /DDIG/SynthStrip/1.0.0/weights/synthstrip.1.pt: -------------------------------------------------------------------------------- 1 | ../../../../.git/annex/objects/xZ/jM/MD5E-s30851709--27f86a16f396ac6db3023c2f70b97ae1.1.pt/MD5E-s30851709--27f86a16f396ac6db3023c2f70b97ae1.1.pt -------------------------------------------------------------------------------- /neuronets/kwyk/0.4.1/bwn/weights/variables/variables.index: -------------------------------------------------------------------------------- 1 | ../../../../../../.git/annex/objects/5Q/KM/MD5E-s1713--79a271ef4214fb189dea16809a1be013/MD5E-s1713--79a271ef4214fb189dea16809a1be013 -------------------------------------------------------------------------------- /neuronets/kwyk/0.4.1/bwn_multi/weights/saved_model.pb: -------------------------------------------------------------------------------- 1 | ../../../../../.git/annex/objects/Kw/gq/MD5E-s195234--50d0e6fbeb3d84be1dbfd241726ee2e8.pb/MD5E-s195234--50d0e6fbeb3d84be1dbfd241726ee2e8.pb -------------------------------------------------------------------------------- /DDIG/VoxelMorph/1.0.0/weights/vxm_dense_brain_T1_3D_mse.h5: -------------------------------------------------------------------------------- 1 | ../../../../.git/annex/objects/5f/9K/MD5E-s1323288--59d7563cca2bc1df4060d7d18e2091da.h5/MD5E-s1323288--59d7563cca2bc1df4060d7d18e2091da.h5 -------------------------------------------------------------------------------- /UCL/SynthSR/1.0.0/general/weights/SynthSR_v10_210712.h5: -------------------------------------------------------------------------------- 1 | ../../../../../.git/annex/objects/3W/6Z/MD5E-s53075984--21857e965d9d71a32dc0c35ba5c593d2.h5/MD5E-s53075984--21857e965d9d71a32dc0c35ba5c593d2.h5 -------------------------------------------------------------------------------- /neuronets/kwyk/0.4.1/bwn_multi/weights/variables/variables.index: -------------------------------------------------------------------------------- 1 | ../../../../../../.git/annex/objects/V6/Wq/MD5E-s1713--1c6854c1d0277c78c3b05ba6db1d3546/MD5E-s1713--1c6854c1d0277c78c3b05ba6db1d3546 -------------------------------------------------------------------------------- /.github/issue-branch.yml: -------------------------------------------------------------------------------- 1 | branchName: 'issue-${issue.number}' 2 | openDraftPR: true 3 | prSkipCI: true 4 | autoCloseIssue: true 5 | commentMessage: 'Branch ${branchName} created for issue: ${issue.title}' -------------------------------------------------------------------------------- /neuronets/braingen/0.1.0/generator_res_16/weights/saved_model.pb: -------------------------------------------------------------------------------- 1 | ../../../../../.git/annex/objects/p5/v9/MD5E-s453518--2a957fb65d79250558556d5ae466d914.pb/MD5E-s453518--2a957fb65d79250558556d5ae466d914.pb -------------------------------------------------------------------------------- /neuronets/braingen/0.1.0/generator_res_32/weights/saved_model.pb: -------------------------------------------------------------------------------- 1 | ../../../../../.git/annex/objects/7x/wq/MD5E-s616502--310a6d8d7e7aa7f174df0ade2287d6c7.pb/MD5E-s616502--310a6d8d7e7aa7f174df0ade2287d6c7.pb -------------------------------------------------------------------------------- /neuronets/braingen/0.1.0/generator_res_64/weights/saved_model.pb: -------------------------------------------------------------------------------- 1 | ../../../../../.git/annex/objects/jV/Xj/MD5E-s848675--3d2ff98466b3c35565b44706899251f6.pb/MD5E-s848675--3d2ff98466b3c35565b44706899251f6.pb -------------------------------------------------------------------------------- /neuronets/braingen/0.1.0/generator_res_8/weights/saved_model.pb: -------------------------------------------------------------------------------- 1 | ../../../../../.git/annex/objects/FZ/ZZ/MD5E-s324156--4a559439b4172899ca14e555b17b7f3b.pb/MD5E-s324156--4a559439b4172899ca14e555b17b7f3b.pb -------------------------------------------------------------------------------- /neuronets/kwyk/0.4.1/bvwn_multi_prior/weights/saved_model.pb: -------------------------------------------------------------------------------- 1 | ../../../../../.git/annex/objects/10/WK/MD5E-s247307--7ba6904627f91ed8fe6e270e7fcb1d88.pb/MD5E-s247307--7ba6904627f91ed8fe6e270e7fcb1d88.pb -------------------------------------------------------------------------------- /neuronets/kwyk/0.4.1/bvwn_multi_prior/weights/variables/variables.index: -------------------------------------------------------------------------------- 1 | ../../../../../../.git/annex/objects/z9/4M/MD5E-s2005--3abe2db9aa9164338dd912e02a57cb46/MD5E-s2005--3abe2db9aa9164338dd912e02a57cb46 -------------------------------------------------------------------------------- /neuronets/braingen/0.1.0/generator_res_128/weights/saved_model.pb: -------------------------------------------------------------------------------- 1 | ../../../../../.git/annex/objects/10/w3/MD5E-s1219469--69aebf26b05c9aadf9cda5fb353468d4.pb/MD5E-s1219469--69aebf26b05c9aadf9cda5fb353468d4.pb -------------------------------------------------------------------------------- /neuronets/braingen/0.1.0/generator_res_128/weights/variables/variables.index: -------------------------------------------------------------------------------- 1 | ../../../../../../.git/annex/objects/8P/f3/MD5E-s1312--abb3670f7da6cae289daae9555e76d3e/MD5E-s1312--abb3670f7da6cae289daae9555e76d3e -------------------------------------------------------------------------------- /neuronets/braingen/0.1.0/generator_res_16/weights/variables/variables.index: -------------------------------------------------------------------------------- 1 | ../../../../../../.git/annex/objects/qq/60/MD5E-s931--86cc0cf95b14557eb5cb57932f85dfc0/MD5E-s931--86cc0cf95b14557eb5cb57932f85dfc0 -------------------------------------------------------------------------------- /neuronets/braingen/0.1.0/generator_res_256/weights/saved_model.pb: -------------------------------------------------------------------------------- 1 | ../../../../../.git/annex/objects/26/kZ/MD5E-s1874107--b88b143b75bf13fe14157c5d2807261d.pb/MD5E-s1874107--b88b143b75bf13fe14157c5d2807261d.pb -------------------------------------------------------------------------------- /neuronets/braingen/0.1.0/generator_res_256/weights/variables/variables.index: -------------------------------------------------------------------------------- 1 | ../../../../../../.git/annex/objects/FZ/f7/MD5E-s1451--162e397048cfafa0ab733f29ddd67750/MD5E-s1451--162e397048cfafa0ab733f29ddd67750 -------------------------------------------------------------------------------- /neuronets/braingen/0.1.0/generator_res_32/weights/variables/variables.index: -------------------------------------------------------------------------------- 1 | ../../../../../../.git/annex/objects/4q/vG/MD5E-s1056--a452d526f842ce95b6e37b5b6bddf90a/MD5E-s1056--a452d526f842ce95b6e37b5b6bddf90a -------------------------------------------------------------------------------- /neuronets/braingen/0.1.0/generator_res_64/weights/variables/variables.index: -------------------------------------------------------------------------------- 1 | ../../../../../../.git/annex/objects/58/9m/MD5E-s1192--964380e52f08235519debb92d2a5b73d/MD5E-s1192--964380e52f08235519debb92d2a5b73d -------------------------------------------------------------------------------- /neuronets/braingen/0.1.0/generator_res_8/weights/variables/variables.index: -------------------------------------------------------------------------------- 1 | ../../../../../../.git/annex/objects/2v/ZK/MD5E-s806--1af8deaacd0e989e3f6202902a9bb8a4/MD5E-s806--1af8deaacd0e989e3f6202902a9bb8a4 -------------------------------------------------------------------------------- /UCL/SynthSR/1.0.0/hyperfine/weights/SynthSR_v10_210712_hyperfine.h5: -------------------------------------------------------------------------------- 1 | ../../../../../.git/annex/objects/7X/Kx/MD5E-s53078720--76a217175bababc7ec3d7e5bfbaa3faf.h5/MD5E-s53078720--76a217175bababc7ec3d7e5bfbaa3faf.h5 -------------------------------------------------------------------------------- /neuronets/kwyk/0.4.1/bwn/weights/variables/variables.data-00000-of-00001: -------------------------------------------------------------------------------- 1 | ../../../../../../.git/annex/objects/55/3q/MD5E-s12011744--9f55f44619dceef130a9a3ef882d39f9/MD5E-s12011744--9f55f44619dceef130a9a3ef882d39f9 -------------------------------------------------------------------------------- /DDIG/SynthMorph/1.0.0/brains/weights/brains-dice-vel-0.5-res-16-256f.h5: -------------------------------------------------------------------------------- 1 | ../../../../../.git/annex/objects/5j/px/MD5E-s85150080--b94594327100b662ea82a4a0fa53d6a8.h5/MD5E-s85150080--b94594327100b662ea82a4a0fa53d6a8.h5 -------------------------------------------------------------------------------- /DDIG/SynthMorph/1.0.0/shapes/weights/shapes-dice-vel-3-res-8-16-32-256f.h5: -------------------------------------------------------------------------------- 1 | ../../../../../.git/annex/objects/mP/W9/MD5E-s85150080--a8d15a638650adfb0438a455e72df776.h5/MD5E-s85150080--a8d15a638650adfb0438a455e72df776.h5 -------------------------------------------------------------------------------- /neuronets/kwyk/0.4.1/bwn_multi/weights/variables/variables.data-00000-of-00001: -------------------------------------------------------------------------------- 1 | ../../../../../../.git/annex/objects/Gq/Xm/MD5E-s12011744--010efeb20ae5d9b443852fe5d328955d/MD5E-s12011744--010efeb20ae5d9b443852fe5d328955d -------------------------------------------------------------------------------- /neuronets/braingen/0.1.0/generator_res_8/weights/variables/variables.data-00000-of-00001: -------------------------------------------------------------------------------- 1 | ../../../../../../.git/annex/objects/X1/g1/MD5E-s41960678--1b1b728e8dc9db22075d316e6a68fc9f/MD5E-s41960678--1b1b728e8dc9db22075d316e6a68fc9f -------------------------------------------------------------------------------- /neuronets/kwyk/0.4.1/bvwn_multi_prior/weights/variables/variables.data-00000-of-00001: -------------------------------------------------------------------------------- 1 | ../../../../../../.git/annex/objects/7P/Xj/MD5E-s12014432--fd9b2da9b73e8ebc4c9d3d0b090b6d91/MD5E-s12014432--fd9b2da9b73e8ebc4c9d3d0b090b6d91 -------------------------------------------------------------------------------- /neuronets/braingen/0.1.0/generator_res_128/weights/variables/variables.data-00000-of-00001: -------------------------------------------------------------------------------- 1 | ../../../../../../.git/annex/objects/qZ/K7/MD5E-s69755394--ee8c428ea5b5548634c2f1e87e55e2eb/MD5E-s69755394--ee8c428ea5b5548634c2f1e87e55e2eb -------------------------------------------------------------------------------- /neuronets/braingen/0.1.0/generator_res_16/weights/variables/variables.data-00000-of-00001: -------------------------------------------------------------------------------- 1 | ../../../../../../.git/annex/objects/QG/Wk/MD5E-s58740720--8d8b9accd76af6e36375afaff3a62201/MD5E-s58740720--8d8b9accd76af6e36375afaff3a62201 -------------------------------------------------------------------------------- /neuronets/braingen/0.1.0/generator_res_256/weights/variables/variables.data-00000-of-00001: -------------------------------------------------------------------------------- 1 | ../../../../../../.git/annex/objects/4M/kP/MD5E-s69888277--7f6f491b9e44f28d8ef3e824a4ff785e/MD5E-s69888277--7f6f491b9e44f28d8ef3e824a4ff785e -------------------------------------------------------------------------------- /neuronets/braingen/0.1.0/generator_res_32/weights/variables/variables.data-00000-of-00001: -------------------------------------------------------------------------------- 1 | ../../../../../../.git/annex/objects/0J/f5/MD5E-s67131128--7c26a317d2e56bf03a2f0c21348210dd/MD5E-s67131128--7c26a317d2e56bf03a2f0c21348210dd -------------------------------------------------------------------------------- /neuronets/braingen/0.1.0/generator_res_64/weights/variables/variables.data-00000-of-00001: -------------------------------------------------------------------------------- 1 | ../../../../../../.git/annex/objects/vk/fK/MD5E-s69229565--cd2902466f680ec43e5d64eb5999e3e8/MD5E-s69229565--cd2902466f680ec43e5d64eb5999e3e8 -------------------------------------------------------------------------------- /lcn/parcnet/1.0.0/weights/dktatlas_identity_0.000_0.000_unet2d_320_0.050_60_pos_20_1.0.0.ckpt: -------------------------------------------------------------------------------- 1 | ../../../../.git/annex/objects/Kq/41/MD5E-s28357229--e9dfbe332cdb2702249793d745c43b44.0.ckpt/MD5E-s28357229--e9dfbe332cdb2702249793d745c43b44.0.ckpt -------------------------------------------------------------------------------- /neuronets/ams/0.1.0/weights/meningioma_T1wc_128iso_v1.h5: -------------------------------------------------------------------------------- 1 | ../../../../.git/annex/objects/zw/07/SHA256E-s57400024--1c76ac1d0c99458d6c0302908a555d390cbf6d34be468e7ac604311f1b4eb9d4.h5/SHA256E-s57400024--1c76ac1d0c99458d6c0302908a555d390cbf6d34be468e7ac604311f1b4eb9d4.h5 -------------------------------------------------------------------------------- /neuronets/brainy/0.1.0/weights/brain-extraction-unet-128iso-model.h5: -------------------------------------------------------------------------------- 1 | ../../../../.git/annex/objects/3X/Gm/SHA256E-s19169888--17d946f977134b70cb48ce3960d634517d9e0c6f09ca40fb7de5b86faeb94eb9.h5/SHA256E-s19169888--17d946f977134b70cb48ce3960d634517d9e0c6f09ca40fb7de5b86faeb94eb9.h5 -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/config.yml: -------------------------------------------------------------------------------- 1 | blank_issues_enabled: true 2 | contact_links: 3 | - name: "🤔 How to add a model?" 4 | url: "https://github.com/neuronets/trained-models/blob/master/add_model_instructions.md" 5 | about: Documentation on how to add a model to the zoo 6 | -------------------------------------------------------------------------------- /DDIG/VoxelMorph/1.0.0/requirements.txt: -------------------------------------------------------------------------------- 1 | git+https://github.com/adalca/pystrum.git@8cd5c483195971c0c51e9809f33aa04777aa35c8 2 | git+https://github.com/adalca/neurite.git@c735164ca973afc5c46b2814194f708762f73fec 3 | git+https://github.com/voxelmorph/voxelmorph.git@67bc9d209c2eb42dee09276b5d1cb77eb8587c54 4 | -------------------------------------------------------------------------------- /DDIG/SynthMorph/1.0.0/brains/requirements.txt: -------------------------------------------------------------------------------- 1 | git+https://github.com/adalca/pystrum.git@8cd5c483195971c0c51e9809f33aa04777aa35c8 2 | git+https://github.com/adalca/neurite.git@c735164ca973afc5c46b2814194f708762f73fec 3 | git+https://github.com/voxelmorph/voxelmorph.git@67bc9d209c2eb42dee09276b5d1cb77eb8587c54 4 | -------------------------------------------------------------------------------- /DDIG/SynthMorph/1.0.0/shapes/requirements.txt: -------------------------------------------------------------------------------- 1 | git+https://github.com/adalca/pystrum.git@8cd5c483195971c0c51e9809f33aa04777aa35c8 2 | git+https://github.com/adalca/neurite.git@c735164ca973afc5c46b2814194f708762f73fec 3 | git+https://github.com/voxelmorph/voxelmorph.git@67bc9d209c2eb42dee09276b5d1cb77eb8587c54 4 | -------------------------------------------------------------------------------- /lcn/parcnet/1.0.0/Dockerfile: -------------------------------------------------------------------------------- 1 | # syntax=docker/dockerfile:1 2 | 3 | FROM pytorch/pytorch:1.10.0-cuda11.3-cudnn8-runtime 4 | 5 | RUN apt-get update \ 6 | && apt-get install --yes --quiet --no-install-recommends \ 7 | ca-certificates \ 8 | git \ 9 | libgomp1 \ 10 | && rm -rf /var/lib/apt/lists/* 11 | 12 | ENV LC_ALL=C.UTF-8 \ 13 | LANG=C.UTF-8 14 | 15 | RUN pip install --no-cache-dir nibabel scipy PyYAML 16 | 17 | WORKDIR /work 18 | LABEL maintainer="Hoda Rajaei " 19 | -------------------------------------------------------------------------------- /.github/workflows/getFileExtension.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import sys 3 | 4 | # Get URL from standard input when the python file is run 5 | url = sys.argv[1] 6 | 7 | response = requests.get(url) 8 | 9 | # Get the filename from the response headers 10 | content_type = response.headers["Content-Disposition"] 11 | # Get the file extension 12 | extension = content_type.split("=")[1].split(";")[0] 13 | 14 | # Return everything after the first "." and without the last quote 15 | extension = extension[1:].strip('"') 16 | extension = extension[extension.find(".") + 1:] 17 | 18 | print(extension) -------------------------------------------------------------------------------- /.github/workflows/assignIssue.yml: -------------------------------------------------------------------------------- 1 | name: Assign Issue to User 2 | 3 | on: 4 | issues: 5 | types: [opened] 6 | 7 | jobs: 8 | assignUser: 9 | if: ${{ startsWith(github.event.issue.title, 'New Model:') || startsWith(github.event.issue.title, 'Update Model:') }} 10 | 11 | runs-on: ubuntu-latest 12 | 13 | permissions: 14 | issues: write 15 | 16 | steps: 17 | - name: 'Auto-assign issue' 18 | uses: pozil/auto-assign-issue@v1 19 | with: 20 | assignees: ${{ github.event.issue.user.login }} 21 | repo-token: ${{ secrets.GH_TOKEN }} -------------------------------------------------------------------------------- /UCL/SynthSeg/1.0.0/Dockerfile: -------------------------------------------------------------------------------- 1 | # syntax=docker/dockerfile:1 2 | 3 | FROM tensorflow/tensorflow:2.0.1-gpu-jupyter 4 | 5 | RUN apt-get update \ 6 | && apt-get install --yes --quiet --no-install-recommends \ 7 | ca-certificates \ 8 | git \ 9 | libgomp1 \ 10 | && rm -rf /var/lib/apt/lists/* 11 | 12 | ENV LC_ALL=C.UTF-8 \ 13 | LANG=C.UTF-8 14 | 15 | COPY requirements.txt requirements.txt 16 | RUN pip install --no-cache-dir -r requirements.txt 17 | RUN pip install --no-cache-dir PyYAML 18 | 19 | WORKDIR /work 20 | LABEL maintainer="Hoda Rajaei " 21 | -------------------------------------------------------------------------------- /DDIG/VoxelMorph/1.0.0/Dockerfile: -------------------------------------------------------------------------------- 1 | # syntax=docker/dockerfile:1 2 | 3 | FROM tensorflow/tensorflow:2.6.0-gpu-jupyter 4 | 5 | RUN apt-get update \ 6 | && apt-get install --yes --quiet --no-install-recommends \ 7 | ca-certificates \ 8 | git \ 9 | libgomp1 \ 10 | && rm -rf /var/lib/apt/lists/* 11 | 12 | ENV LC_ALL=C.UTF-8 \ 13 | LANG=C.UTF-8 14 | 15 | COPY requirements.txt requirements.txt 16 | RUN pip install --no-cache-dir -r requirements.txt 17 | RUN pip install --no-cache-dir PyYAML 18 | 19 | WORKDIR /work 20 | LABEL maintainer="Hoda Rajaei " 21 | -------------------------------------------------------------------------------- /DDIG/SynthMorph/1.0.0/brains/Dockerfile: -------------------------------------------------------------------------------- 1 | # syntax=docker/dockerfile:1 2 | 3 | FROM tensorflow/tensorflow:2.6.0-gpu-jupyter 4 | 5 | RUN apt-get update \ 6 | && apt-get install --yes --quiet --no-install-recommends \ 7 | ca-certificates \ 8 | git \ 9 | libgomp1 \ 10 | && rm -rf /var/lib/apt/lists/* 11 | 12 | ENV LC_ALL=C.UTF-8 \ 13 | LANG=C.UTF-8 14 | 15 | COPY requirements.txt requirements.txt 16 | RUN pip install --no-cache-dir -r requirements.txt 17 | RUN pip install --no-cache-dir PyYAML 18 | 19 | WORKDIR /work 20 | LABEL maintainer="Hoda Rajaei " 21 | -------------------------------------------------------------------------------- /DDIG/SynthMorph/1.0.0/shapes/Dockerfile: -------------------------------------------------------------------------------- 1 | # syntax=docker/dockerfile:1 2 | 3 | FROM tensorflow/tensorflow:2.6.0-gpu-jupyter 4 | 5 | RUN apt-get update \ 6 | && apt-get install --yes --quiet --no-install-recommends \ 7 | ca-certificates \ 8 | git \ 9 | libgomp1 \ 10 | && rm -rf /var/lib/apt/lists/* 11 | 12 | ENV LC_ALL=C.UTF-8 \ 13 | LANG=C.UTF-8 14 | 15 | COPY requirements.txt requirements.txt 16 | RUN pip install --no-cache-dir -r requirements.txt 17 | RUN pip install --no-cache-dir PyYAML 18 | 19 | WORKDIR /work 20 | LABEL maintainer="Hoda Rajaei " 21 | -------------------------------------------------------------------------------- /UCL/SynthSR/1.0.0/general/Dockerfile: -------------------------------------------------------------------------------- 1 | # syntax=docker/dockerfile:1 2 | 3 | FROM tensorflow/tensorflow:2.0.1-gpu-jupyter 4 | 5 | RUN apt-get update \ 6 | && apt-get install --yes --quiet --no-install-recommends \ 7 | ca-certificates \ 8 | git \ 9 | libgomp1 \ 10 | && rm -rf /var/lib/apt/lists/* 11 | 12 | ENV LC_ALL=C.UTF-8 \ 13 | LANG=C.UTF-8 14 | 15 | COPY requirements.txt requirements.txt 16 | RUN pip install --no-cache-dir -r requirements.txt 17 | RUN pip install --no-cache-dir PyYAML 18 | 19 | WORKDIR /work 20 | LABEL maintainer="Hoda Rajaei " 21 | -------------------------------------------------------------------------------- /UCL/SynthSR/1.0.0/hyperfine/Dockerfile: -------------------------------------------------------------------------------- 1 | # syntax=docker/dockerfile:1 2 | 3 | FROM tensorflow/tensorflow:2.0.1-gpu-jupyter 4 | 5 | RUN apt-get update \ 6 | && apt-get install --yes --quiet --no-install-recommends \ 7 | ca-certificates \ 8 | git \ 9 | libgomp1 \ 10 | && rm -rf /var/lib/apt/lists/* 11 | 12 | ENV LC_ALL=C.UTF-8 \ 13 | LANG=C.UTF-8 14 | 15 | COPY requirements.txt requirements.txt 16 | RUN pip install --no-cache-dir -r requirements.txt 17 | RUN pip install --no-cache-dir PyYAML 18 | 19 | WORKDIR /work 20 | LABEL maintainer="Hoda Rajaei " 21 | -------------------------------------------------------------------------------- /.github/workflows/change_label.yml: -------------------------------------------------------------------------------- 1 | name: Update label for issues 2 | 3 | on: 4 | issues: 5 | types: 6 | - edited 7 | 8 | jobs: 9 | update_label: 10 | if: ${{ (startsWith(github.event.issue.title, 'New Model:') || startsWith(github.event.issue.title, 'Update Model:')) }} 11 | 12 | runs-on: ubuntu-latest 13 | 14 | steps: 15 | - name: Set labels 16 | uses: actions-cool/issues-helper@v3 17 | with: 18 | actions: 'set-labels' 19 | token: ${{ secrets.GH_TOKEN }} 20 | issue-number: ${{ github.event.issue.number }} 21 | labels: 'Ready-to-test' -------------------------------------------------------------------------------- /.github/workflows/getModelInfo.py: -------------------------------------------------------------------------------- 1 | import os 2 | import re 3 | 4 | # Get the python_scripts environment variable 5 | python_scripts = os.environ.get("model_info") 6 | 7 | # Split the URLs into a list and strip whitespace 8 | urls = [url.strip() for url in python_scripts.strip().split('\n')] 9 | 10 | svn_urls = [] 11 | # Loop through each URL 12 | for url in urls: 13 | # Replace /tree/branchName or /blob/branchName with /trunk 14 | svn_url = re.sub(r'/tree/[^/]+|/blob/[^/]+', '/trunk', url) 15 | 16 | svn_urls.append(svn_url) 17 | 18 | # Print the generated SVN URLs separated by newline 19 | print('\n'.join(svn_urls)) -------------------------------------------------------------------------------- /.github/workflows/getPythonScripts.py: -------------------------------------------------------------------------------- 1 | import os 2 | import re 3 | 4 | # Get the python_scripts environment variable 5 | python_scripts = os.environ.get("pythons") 6 | 7 | # Split the URLs into a list and strip whitespace 8 | urls = [url.strip() for url in python_scripts.strip().split('\n')] 9 | 10 | svn_urls = [] 11 | # Loop through each URL 12 | for url in urls: 13 | # Replace /tree/branchName or /blob/branchName with /trunk 14 | svn_url = re.sub(r'/tree/[^/]+|/blob/[^/]+', '/trunk', url) 15 | 16 | svn_urls.append(svn_url) 17 | 18 | # Print the generated SVN URLs separated by newline 19 | print('\n'.join(svn_urls)) -------------------------------------------------------------------------------- /DDIG/SynthStrip/1.0.0/Dockerfile: -------------------------------------------------------------------------------- 1 | # syntax=docker/dockerfile:1 2 | 3 | FROM pytorch/pytorch:1.11.0-cuda11.3-cudnn8-runtime 4 | 5 | RUN apt-get update \ 6 | && apt-get install --yes --quiet --no-install-recommends \ 7 | ca-certificates \ 8 | git \ 9 | libgomp1 \ 10 | gcc \ 11 | && rm -rf /var/lib/apt/lists/* 12 | 13 | ENV LC_ALL=C.UTF-8 \ 14 | LANG=C.UTF-8 15 | 16 | # python packages 17 | COPY requirements.txt requirements.txt 18 | RUN pip install --no-cache-dir -r requirements.txt 19 | 20 | # clean up 21 | RUN rm -rf /root/.cache/pip 22 | 23 | WORKDIR /work 24 | LABEL maintainer="Hoda Rajaei " 25 | -------------------------------------------------------------------------------- /CHANGELOG.MD: -------------------------------------------------------------------------------- 1 | ## Changelog 2 | 3 | ### Version 1.0.0 🚀 (2023-07-27) 4 | 5 | - **Feature** (minor): Added a LinkML schema and validator for creating model_card and spec yaml files. 6 | - **Feature** (minor): Created a PR template to get data for the workflow. 7 | - **Feature** (minor): Added a workflow for adding a new model to the repo. This includes building and pushing the docker image, and validating card and spec information. 8 | - **Fix** (patch): Fixed issues with DeepCSR image creation. 9 | - **Feature** (minor): Added the docs folder containing model_card and spec templates. 10 | - **Feature** (minor): Added a schema folder containing the linkml schema and the validator. 11 | - **Docs** (patch): Improved the current docs on adding a new model to the repo. -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | 2 | * annex.backend=MD5E 3 | **/.git* annex.largefiles=nothing 4 | images/brain-extraction annex.largefiles=nothing 5 | images/brain-generation annex.largefiles=nothing 6 | docs/*.yaml annex.largefiles=nothing 7 | * annex.largefiles=((mimeencoding=binary)and(largerthan=0)) 8 | **/**/**/*.py annex.largefiles=nothing 9 | **/**/**/*.yaml annex.largefiles=nothing 10 | **/**/**/Dockerfile annex.largefiles=nothing 11 | **/**/**/*.txt annex.largefiles=nothing 12 | **/**/**/*.md annex.largefiles=nothing 13 | **/**/**/**/*.py annex.largefiles=nothing 14 | **/**/**/**/*.txt annex.largefiles=nothing 15 | **/**/**/**/*.yaml annex.largefiles=nothing 16 | **/**/**/**/*.md annex.largefiles=nothing 17 | **/**/**/**/Dockerfiles annex.largefiles=nothing 18 | **/.git* annex.largefiles=nothing 19 | -------------------------------------------------------------------------------- /docs/model_card.yaml: -------------------------------------------------------------------------------- 1 | Model_details: 2 | Organization: 3 | Model_date: 4 | Model_version: 5 | Model_type: 6 | More_information: 7 | Citation_details: 8 | Contact_info: 9 | 10 | Intended_use: 11 | Primary_intended_uses: 12 | Primary_intended_users: 13 | Out_of_scope_use_cases: 14 | 15 | Factors: 16 | Relevant_factors: 17 | Evaluation_factors: 18 | Model_performance_measures: 19 | 20 | Metrics: 21 | Model Performance Measures: 22 | Decision Thresholds: 23 | Variation Approaches: 24 | 25 | Evaluation Data: 26 | Datasets: 27 | Motivation: 28 | Preprocessing: 29 | 30 | Training Data: 31 | Datasets: 32 | Motivation: 33 | Preprocessing: 34 | 35 | Quantitative Analyses: 36 | Unitary Results: 37 | Intersectional Results: 38 | 39 | Ethical Considerations: 40 | 41 | Caveats and Recommendations: 42 | -------------------------------------------------------------------------------- /lcn/parcnet/1.0.0/model_card.yaml: -------------------------------------------------------------------------------- 1 | Model_details: 2 | Organization: "lcn" 3 | Model_date: "2023" 4 | Model_version: 1.0.0 5 | Model_type: 6 | More_information: "parcnet" 7 | Citation_details: 8 | Contact_info: 9 | 10 | Intended_use: 11 | Primary_intended_uses: 12 | Primary_intended_users: 13 | Out_of_scope_use_cases: 14 | 15 | Factors: 16 | Relevant_factors: 17 | Evaluation_factors: 18 | Model_performance_measures: 19 | 20 | Metrics: 21 | Model Performance Measures: 22 | Decision Thresholds: 23 | Variation Approaches: 24 | 25 | Evaluation Data: 26 | Datasets: 27 | Motivation: 28 | Preprocessing: 29 | 30 | Training Data: 31 | Datasets: 32 | Motivation: 33 | Preprocessing: 34 | 35 | Quantitative Analyses: 36 | Unitary Results: 37 | Intersectional Results: 38 | 39 | Ethical Considerations: 40 | 41 | Caveats and Recommendations: 42 | -------------------------------------------------------------------------------- /neuronets/ams/0.1.0/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM tensorflow/tensorflow:2.5.0-gpu-jupyter 2 | 3 | RUN curl -sSL http://neuro.debian.net/lists/bionic.us-nh.full | tee /etc/apt/sources.list.d/neurodebian.sources.list \ 4 | && export GNUPGHOME="$(mktemp -d)" \ 5 | && echo "disable-ipv6" >> ${GNUPGHOME}/dirmngr.conf \ 6 | && apt-key adv --homedir $GNUPGHOME --recv-keys --keyserver hkp://pgpkeys.eu:80 0xA5D32F012649A5A9 \ 7 | && apt-get update \ 8 | && apt-get install -y git-annex-standalone git \ 9 | && rm -rf /tmp/* 10 | 11 | RUN git config user.name "nobrainerzoo" \ 12 | && git onfig user.email "nobrainerzoo" 13 | 14 | RUN python3 -m pip install --no-cache-dir nobrainer datalad datalad-osf PyYAML 15 | 16 | ENV LC_ALL=C.UTF-8 \ 17 | LANG=C.UTF-8 18 | 19 | WORKDIR "/work" 20 | LABEL maintainer="Jakub Kaczmarzyk , Hoda Rajaei " 21 | -------------------------------------------------------------------------------- /neuronets/brainy/0.1.0/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM tensorflow/tensorflow:2.5.0-gpu-jupyter 2 | 3 | RUN curl -sSL http://neuro.debian.net/lists/bionic.us-nh.full | tee /etc/apt/sources.list.d/neurodebian.sources.list \ 4 | && export GNUPGHOME="$(mktemp -d)" \ 5 | && echo "disable-ipv6" >> ${GNUPGHOME}/dirmngr.conf \ 6 | && apt-key adv --homedir $GNUPGHOME --recv-keys --keyserver hkp://pgpkeys.eu:80 0xA5D32F012649A5A9 \ 7 | && apt-get update \ 8 | && apt-get install -y git-annex-standalone git \ 9 | && rm -rf /tmp/* 10 | 11 | RUN git config user.name "nobrainerzoo" \ 12 | && git onfig user.email "nobrainerzoo" 13 | 14 | RUN python3 -m pip install --no-cache-dir nobrainer datalad datalad-osf PyYAML 15 | 16 | ENV LC_ALL=C.UTF-8 \ 17 | LANG=C.UTF-8 18 | 19 | WORKDIR "/work" 20 | LABEL maintainer="Jakub Kaczmarzyk , Hoda Rajaei " 21 | -------------------------------------------------------------------------------- /neuronets/kwyk/0.4.1/bwn/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM tensorflow/tensorflow:2.5.0-gpu-jupyter 2 | 3 | RUN curl -sSL http://neuro.debian.net/lists/bionic.us-nh.full | tee /etc/apt/sources.list.d/neurodebian.sources.list \ 4 | && export GNUPGHOME="$(mktemp -d)" \ 5 | && echo "disable-ipv6" >> ${GNUPGHOME}/dirmngr.conf \ 6 | && apt-key adv --homedir $GNUPGHOME --recv-keys --keyserver hkp://pgpkeys.eu:80 0xA5D32F012649A5A9 \ 7 | && apt-get update \ 8 | && apt-get install -y git-annex-standalone git \ 9 | && rm -rf /tmp/* 10 | 11 | RUN git config user.name "nobrainerzoo" \ 12 | && git onfig user.email "nobrainerzoo" 13 | 14 | RUN python3 -m pip install --no-cache-dir nobrainer datalad datalad-osf PyYAML 15 | 16 | ENV LC_ALL=C.UTF-8 \ 17 | LANG=C.UTF-8 18 | 19 | WORKDIR "/work" 20 | LABEL maintainer="Jakub Kaczmarzyk , Hoda Rajaei " 21 | -------------------------------------------------------------------------------- /neuronets/kwyk/0.4.1/bwn_multi/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM tensorflow/tensorflow:2.5.0-gpu-jupyter 2 | 3 | RUN curl -sSL http://neuro.debian.net/lists/bionic.us-nh.full | tee /etc/apt/sources.list.d/neurodebian.sources.list \ 4 | && export GNUPGHOME="$(mktemp -d)" \ 5 | && echo "disable-ipv6" >> ${GNUPGHOME}/dirmngr.conf \ 6 | && apt-key adv --homedir $GNUPGHOME --recv-keys --keyserver hkp://pgpkeys.eu:80 0xA5D32F012649A5A9 \ 7 | && apt-get update \ 8 | && apt-get install -y git-annex-standalone git \ 9 | && rm -rf /tmp/* 10 | 11 | RUN git config user.name "nobrainerzoo" \ 12 | && git onfig user.email "nobrainerzoo" 13 | 14 | RUN python3 -m pip install --no-cache-dir nobrainer datalad datalad-osf PyYAML 15 | 16 | ENV LC_ALL=C.UTF-8 \ 17 | LANG=C.UTF-8 18 | 19 | WORKDIR "/work" 20 | LABEL maintainer="Jakub Kaczmarzyk , Hoda Rajaei " 21 | -------------------------------------------------------------------------------- /neuronets/braingen/0.1.0/generator_res_16/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM tensorflow/tensorflow:2.5.0-gpu-jupyter 2 | 3 | RUN curl -sSL http://neuro.debian.net/lists/bionic.us-nh.full | tee /etc/apt/sources.list.d/neurodebian.sources.list \ 4 | && export GNUPGHOME="$(mktemp -d)" \ 5 | && echo "disable-ipv6" >> ${GNUPGHOME}/dirmngr.conf \ 6 | && apt-key adv --homedir $GNUPGHOME --recv-keys --keyserver hkp://pgpkeys.eu:80 0xA5D32F012649A5A9 \ 7 | && apt-get update \ 8 | && apt-get install -y git-annex-standalone git \ 9 | && rm -rf /tmp/* 10 | 11 | RUN git config user.name "nobrainerzoo" \ 12 | && git onfig user.email "nobrainerzoo" 13 | 14 | RUN python3 -m pip install --no-cache-dir nobrainer datalad datalad-osf PyYAML 15 | 16 | ENV LC_ALL=C.UTF-8 \ 17 | LANG=C.UTF-8 18 | 19 | WORKDIR "/work" 20 | LABEL maintainer="Jakub Kaczmarzyk , Hoda Rajaei " 21 | -------------------------------------------------------------------------------- /neuronets/braingen/0.1.0/generator_res_32/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM tensorflow/tensorflow:2.5.0-gpu-jupyter 2 | 3 | RUN curl -sSL http://neuro.debian.net/lists/bionic.us-nh.full | tee /etc/apt/sources.list.d/neurodebian.sources.list \ 4 | && export GNUPGHOME="$(mktemp -d)" \ 5 | && echo "disable-ipv6" >> ${GNUPGHOME}/dirmngr.conf \ 6 | && apt-key adv --homedir $GNUPGHOME --recv-keys --keyserver hkp://pgpkeys.eu:80 0xA5D32F012649A5A9 \ 7 | && apt-get update \ 8 | && apt-get install -y git-annex-standalone git \ 9 | && rm -rf /tmp/* 10 | 11 | RUN git config user.name "nobrainerzoo" \ 12 | && git onfig user.email "nobrainerzoo" 13 | 14 | RUN python3 -m pip install --no-cache-dir nobrainer datalad datalad-osf PyYAML 15 | 16 | ENV LC_ALL=C.UTF-8 \ 17 | LANG=C.UTF-8 18 | 19 | WORKDIR "/work" 20 | LABEL maintainer="Jakub Kaczmarzyk , Hoda Rajaei " 21 | -------------------------------------------------------------------------------- /neuronets/braingen/0.1.0/generator_res_64/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM tensorflow/tensorflow:2.5.0-gpu-jupyter 2 | 3 | RUN curl -sSL http://neuro.debian.net/lists/bionic.us-nh.full | tee /etc/apt/sources.list.d/neurodebian.sources.list \ 4 | && export GNUPGHOME="$(mktemp -d)" \ 5 | && echo "disable-ipv6" >> ${GNUPGHOME}/dirmngr.conf \ 6 | && apt-key adv --homedir $GNUPGHOME --recv-keys --keyserver hkp://pgpkeys.eu:80 0xA5D32F012649A5A9 \ 7 | && apt-get update \ 8 | && apt-get install -y git-annex-standalone git \ 9 | && rm -rf /tmp/* 10 | 11 | RUN git config user.name "nobrainerzoo" \ 12 | && git onfig user.email "nobrainerzoo" 13 | 14 | RUN python3 -m pip install --no-cache-dir nobrainer datalad datalad-osf PyYAML 15 | 16 | ENV LC_ALL=C.UTF-8 \ 17 | LANG=C.UTF-8 18 | 19 | WORKDIR "/work" 20 | LABEL maintainer="Jakub Kaczmarzyk , Hoda Rajaei " 21 | -------------------------------------------------------------------------------- /neuronets/braingen/0.1.0/generator_res_8/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM tensorflow/tensorflow:2.5.0-gpu-jupyter 2 | 3 | RUN curl -sSL http://neuro.debian.net/lists/bionic.us-nh.full | tee /etc/apt/sources.list.d/neurodebian.sources.list \ 4 | && export GNUPGHOME="$(mktemp -d)" \ 5 | && echo "disable-ipv6" >> ${GNUPGHOME}/dirmngr.conf \ 6 | && apt-key adv --homedir $GNUPGHOME --recv-keys --keyserver hkp://pgpkeys.eu:80 0xA5D32F012649A5A9 \ 7 | && apt-get update \ 8 | && apt-get install -y git-annex-standalone git \ 9 | && rm -rf /tmp/* 10 | 11 | RUN git config user.name "nobrainerzoo" \ 12 | && git onfig user.email "nobrainerzoo" 13 | 14 | RUN python3 -m pip install --no-cache-dir nobrainer datalad datalad-osf PyYAML 15 | 16 | ENV LC_ALL=C.UTF-8 \ 17 | LANG=C.UTF-8 18 | 19 | WORKDIR "/work" 20 | LABEL maintainer="Jakub Kaczmarzyk , Hoda Rajaei " 21 | -------------------------------------------------------------------------------- /neuronets/kwyk/0.4.1/bvwn_multi_prior/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM tensorflow/tensorflow:2.5.0-gpu-jupyter 2 | 3 | RUN curl -sSL http://neuro.debian.net/lists/bionic.us-nh.full | tee /etc/apt/sources.list.d/neurodebian.sources.list \ 4 | && export GNUPGHOME="$(mktemp -d)" \ 5 | && echo "disable-ipv6" >> ${GNUPGHOME}/dirmngr.conf \ 6 | && apt-key adv --homedir $GNUPGHOME --recv-keys --keyserver hkp://pgpkeys.eu:80 0xA5D32F012649A5A9 \ 7 | && apt-get update \ 8 | && apt-get install -y git-annex-standalone git \ 9 | && rm -rf /tmp/* 10 | 11 | RUN git config user.name "nobrainerzoo" \ 12 | && git onfig user.email "nobrainerzoo" 13 | 14 | RUN python3 -m pip install --no-cache-dir nobrainer datalad datalad-osf PyYAML 15 | 16 | ENV LC_ALL=C.UTF-8 \ 17 | LANG=C.UTF-8 18 | 19 | WORKDIR "/work" 20 | LABEL maintainer="Jakub Kaczmarzyk , Hoda Rajaei " 21 | -------------------------------------------------------------------------------- /neuronets/braingen/0.1.0/generator_res_128/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM tensorflow/tensorflow:2.5.0-gpu-jupyter 2 | 3 | RUN curl -sSL http://neuro.debian.net/lists/bionic.us-nh.full | tee /etc/apt/sources.list.d/neurodebian.sources.list \ 4 | && export GNUPGHOME="$(mktemp -d)" \ 5 | && echo "disable-ipv6" >> ${GNUPGHOME}/dirmngr.conf \ 6 | && apt-key adv --homedir $GNUPGHOME --recv-keys --keyserver hkp://pgpkeys.eu:80 0xA5D32F012649A5A9 \ 7 | && apt-get update \ 8 | && apt-get install -y git-annex-standalone git \ 9 | && rm -rf /tmp/* 10 | 11 | RUN git config user.name "nobrainerzoo" \ 12 | && git onfig user.email "nobrainerzoo" 13 | 14 | RUN python3 -m pip install --no-cache-dir nobrainer datalad datalad-osf PyYAML 15 | 16 | ENV LC_ALL=C.UTF-8 \ 17 | LANG=C.UTF-8 18 | 19 | WORKDIR "/work" 20 | LABEL maintainer="Jakub Kaczmarzyk , Hoda Rajaei " 21 | -------------------------------------------------------------------------------- /neuronets/braingen/0.1.0/generator_res_256/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM tensorflow/tensorflow:2.5.0-gpu-jupyter 2 | 3 | RUN curl -sSL http://neuro.debian.net/lists/bionic.us-nh.full | tee /etc/apt/sources.list.d/neurodebian.sources.list \ 4 | && export GNUPGHOME="$(mktemp -d)" \ 5 | && echo "disable-ipv6" >> ${GNUPGHOME}/dirmngr.conf \ 6 | && apt-key adv --homedir $GNUPGHOME --recv-keys --keyserver hkp://pgpkeys.eu:80 0xA5D32F012649A5A9 \ 7 | && apt-get update \ 8 | && apt-get install -y git-annex-standalone git \ 9 | && rm -rf /tmp/* 10 | 11 | RUN git config user.name "nobrainerzoo" \ 12 | && git onfig user.email "nobrainerzoo" 13 | 14 | RUN python3 -m pip install --no-cache-dir nobrainer datalad datalad-osf PyYAML 15 | 16 | ENV LC_ALL=C.UTF-8 \ 17 | LANG=C.UTF-8 18 | 19 | WORKDIR "/work" 20 | LABEL maintainer="Jakub Kaczmarzyk , Hoda Rajaei " 21 | -------------------------------------------------------------------------------- /neuronets/braingen/0.1.0/generator_res_128/model_card.yaml: -------------------------------------------------------------------------------- 1 | Model_details: 2 | Organization: "neuronets" 3 | Model_date: "2020" 4 | Model_version: 0.1.0 5 | Model_type: "GAN" 6 | More_information: "braingen_res_128" 7 | Citation_details: "https://github.com/neuronets/progressivegan3d" 8 | Contact_info: "https://github.com/neuronets/progressivegan3d/issues/new" 9 | 10 | Intended_use: 11 | Primary_intended_uses: 12 | Primary_intended_users: 13 | Out_of_scope_use_cases: 14 | 15 | Factors: 16 | Relevant_factors: 17 | Evaluation_factors: 18 | Model_performance_measures: 19 | 20 | Metrics: 21 | Model Performance Measures: 22 | Decision Thresholds: 23 | Variation Approaches: 24 | 25 | Evaluation Data: 26 | Datasets: 27 | Motivation: 28 | Preprocessing: 29 | 30 | Training Data: 31 | Datasets: 32 | Motivation: 33 | Preprocessing: 34 | 35 | Quantitative Analyses: 36 | Unitary Results: 37 | Intersectional Results: 38 | 39 | Ethical Considerations: 40 | 41 | Caveats and Recommendations: 42 | -------------------------------------------------------------------------------- /neuronets/braingen/0.1.0/generator_res_16/model_card.yaml: -------------------------------------------------------------------------------- 1 | Model_details: 2 | Organization: "neuronets" 3 | Model_date: "2020" 4 | Model_version: 0.1.0 5 | Model_type: "GAN" 6 | More_information: "braingen_res_16" 7 | Citation_details: "https://github.com/neuronets/progressivegan3d" 8 | Contact_info: "https://github.com/neuronets/progressivegan3d/issues/new" 9 | 10 | Intended_use: 11 | Primary_intended_uses: 12 | Primary_intended_users: 13 | Out_of_scope_use_cases: 14 | 15 | Factors: 16 | Relevant_factors: 17 | Evaluation_factors: 18 | Model_performance_measures: 19 | 20 | Metrics: 21 | Model Performance Measures: 22 | Decision Thresholds: 23 | Variation Approaches: 24 | 25 | Evaluation Data: 26 | Datasets: 27 | Motivation: 28 | Preprocessing: 29 | 30 | Training Data: 31 | Datasets: 32 | Motivation: 33 | Preprocessing: 34 | 35 | Quantitative Analyses: 36 | Unitary Results: 37 | Intersectional Results: 38 | 39 | Ethical Considerations: 40 | 41 | Caveats and Recommendations: 42 | -------------------------------------------------------------------------------- /neuronets/braingen/0.1.0/generator_res_256/model_card.yaml: -------------------------------------------------------------------------------- 1 | Model_details: 2 | Organization: "neuronets" 3 | Model_date: "2020" 4 | Model_version: 0.1.0 5 | Model_type: "GAN" 6 | More_information: "braingen_res_256" 7 | Citation_details: "https://github.com/neuronets/progressivegan3d" 8 | Contact_info: "https://github.com/neuronets/progressivegan3d/issues/new" 9 | 10 | Intended_use: 11 | Primary_intended_uses: 12 | Primary_intended_users: 13 | Out_of_scope_use_cases: 14 | 15 | Factors: 16 | Relevant_factors: 17 | Evaluation_factors: 18 | Model_performance_measures: 19 | 20 | Metrics: 21 | Model Performance Measures: 22 | Decision Thresholds: 23 | Variation Approaches: 24 | 25 | Evaluation Data: 26 | Datasets: 27 | Motivation: 28 | Preprocessing: 29 | 30 | Training Data: 31 | Datasets: 32 | Motivation: 33 | Preprocessing: 34 | 35 | Quantitative Analyses: 36 | Unitary Results: 37 | Intersectional Results: 38 | 39 | Ethical Considerations: 40 | 41 | Caveats and Recommendations: 42 | -------------------------------------------------------------------------------- /neuronets/braingen/0.1.0/generator_res_32/model_card.yaml: -------------------------------------------------------------------------------- 1 | Model_details: 2 | Organization: "neuronets" 3 | Model_date: "2020" 4 | Model_version: 0.1.0 5 | Model_type: "GAN" 6 | More_information: "braingen_res_32" 7 | Citation_details: "https://github.com/neuronets/progressivegan3d" 8 | Contact_info: "https://github.com/neuronets/progressivegan3d/issues/new" 9 | 10 | Intended_use: 11 | Primary_intended_uses: 12 | Primary_intended_users: 13 | Out_of_scope_use_cases: 14 | 15 | Factors: 16 | Relevant_factors: 17 | Evaluation_factors: 18 | Model_performance_measures: 19 | 20 | Metrics: 21 | Model Performance Measures: 22 | Decision Thresholds: 23 | Variation Approaches: 24 | 25 | Evaluation Data: 26 | Datasets: 27 | Motivation: 28 | Preprocessing: 29 | 30 | Training Data: 31 | Datasets: 32 | Motivation: 33 | Preprocessing: 34 | 35 | Quantitative Analyses: 36 | Unitary Results: 37 | Intersectional Results: 38 | 39 | Ethical Considerations: 40 | 41 | Caveats and Recommendations: 42 | -------------------------------------------------------------------------------- /neuronets/braingen/0.1.0/generator_res_64/model_card.yaml: -------------------------------------------------------------------------------- 1 | Model_details: 2 | Organization: "neuronets" 3 | Model_date: "2020" 4 | Model_version: 0.1.0 5 | Model_type: "GAN" 6 | More_information: "braingen_res_64" 7 | Citation_details: "https://github.com/neuronets/progressivegan3d" 8 | Contact_info: "https://github.com/neuronets/progressivegan3d/issues/new" 9 | 10 | Intended_use: 11 | Primary_intended_uses: 12 | Primary_intended_users: 13 | Out_of_scope_use_cases: 14 | 15 | Factors: 16 | Relevant_factors: 17 | Evaluation_factors: 18 | Model_performance_measures: 19 | 20 | Metrics: 21 | Model Performance Measures: 22 | Decision Thresholds: 23 | Variation Approaches: 24 | 25 | Evaluation Data: 26 | Datasets: 27 | Motivation: 28 | Preprocessing: 29 | 30 | Training Data: 31 | Datasets: 32 | Motivation: 33 | Preprocessing: 34 | 35 | Quantitative Analyses: 36 | Unitary Results: 37 | Intersectional Results: 38 | 39 | Ethical Considerations: 40 | 41 | Caveats and Recommendations: 42 | -------------------------------------------------------------------------------- /neuronets/braingen/0.1.0/generator_res_8/model_card.yaml: -------------------------------------------------------------------------------- 1 | Model_details: 2 | Organization: "neuronets" 3 | Model_date: "2020" 4 | Model_version: 0.1.0 5 | Model_type: "GAN" 6 | More_information: "braingen_res_8" 7 | Citation_details: "https://github.com/neuronets/progressivegan3d" 8 | Contact_info: "https://github.com/neuronets/progressivegan3d/issues/new" 9 | 10 | Intended_use: 11 | Primary_intended_uses: 12 | Primary_intended_users: 13 | Out_of_scope_use_cases: 14 | 15 | Factors: 16 | Relevant_factors: 17 | Evaluation_factors: 18 | Model_performance_measures: 19 | 20 | Metrics: 21 | Model Performance Measures: 22 | Decision Thresholds: 23 | Variation Approaches: 24 | 25 | Evaluation Data: 26 | Datasets: 27 | Motivation: 28 | Preprocessing: 29 | 30 | Training Data: 31 | Datasets: 32 | Motivation: 33 | Preprocessing: 34 | 35 | Quantitative Analyses: 36 | Unitary Results: 37 | Intersectional Results: 38 | 39 | Ethical Considerations: 40 | 41 | Caveats and Recommendations: 42 | -------------------------------------------------------------------------------- /DeepCSR/deepcsr/1.0/spec.yaml: -------------------------------------------------------------------------------- 1 | image: 2 | docker: neuronets/deepcsr 3 | singularity: nobrainer-zoo_deepcsr.sif 4 | repository: 5 | repo_url: None 6 | committish: None 7 | repo_download: 'False' 8 | repo_download_location: None 9 | inference: 10 | prediction_script: trained-models/DeepCSR/deepcsr/1.0/predict.py 11 | command: f"python {MODELS_PATH}/{model}/predict.py --conf_path {conf} --model_checkpoint {infile[0]} --dataset {infile[1]}" 12 | data_spec: 13 | infile: 14 | n_files: 1 15 | outfile: 16 | n_files: 1 17 | training_data_info: 18 | data_number: 19 | total: 1 20 | train: 1 21 | evaluate: 1 22 | test: 1 23 | biological_sex: 24 | male: 25 | female: 26 | age_histogram: '1' 27 | race: '1' 28 | imaging_contrast_info: '1' 29 | dataset_sources: '1' 30 | data_sites: 31 | number_of_sites: 1 32 | sites: '1' 33 | scanner_models: '1' 34 | hardware: '1' 35 | training_parameters: 36 | input_shape: '1' 37 | block_shape: '1' 38 | n_classes: 1 39 | lr: '1' 40 | n_epochs: 1 41 | total_batch_size: 1 42 | number_of_gpus: 1 43 | loss_function: '1' 44 | metrics: '1' 45 | data_preprocessing: '1' 46 | data_augmentation: '1' 47 | -------------------------------------------------------------------------------- /.github/workflows/add_model.yml: -------------------------------------------------------------------------------- 1 | name: Add Model 2 | 3 | on: 4 | push: 5 | branches: ["master"] 6 | 7 | jobs: 8 | build: 9 | # created matrix for future os runs 10 | runs-on: ${{ matrix.os }} 11 | strategy: 12 | matrix: 13 | os: [ ubuntu-22.04 ] 14 | python-version: ["3.9"] 15 | 16 | steps: 17 | # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it 18 | - uses: actions/checkout@v3 19 | with: 20 | fetch-depth: 0 21 | ref: master 22 | 23 | - name: Set up Python ${{ matrix.python-version }} 24 | uses: actions/setup-python@v4.2.0 25 | with: 26 | python-version: ${{ matrix.python-version }} 27 | 28 | - name: Install dependencies 29 | run: | 30 | python3 -m pip install --upgrade pip 31 | sudo apt-get install datalad 32 | python3 -m pip install datalad-osf 33 | git config --global user.name "trained_models" 34 | git config --global user.email "trained_models" 35 | - name: Add model 36 | env: 37 | OSF_TOKEN: ${{secrets.OSF_TOKEN}} 38 | run: | 39 | datalad siblings 40 | datalad siblings configure -s origin --publish-depends osf-storage 41 | # Sanity check 42 | # cat .git/config 43 | datalad push --to origin -d $GITHUB_WORKSPACE 44 | -------------------------------------------------------------------------------- /DeepCSR/deepcsr/1.0/src/metrics.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch import nn 3 | from torch.nn import functional as F 4 | 5 | 6 | class OCCBCELogits(nn.Module): 7 | def __init__(self): 8 | super(OCCBCELogits, self).__init__() 9 | 10 | def forward(self, logits, occ): 11 | pos_weights = (torch.sum(1. - occ.view(-1, occ.size(-1)), dim=0) + 1e-12) 12 | pos_weights = pos_weights / (torch.sum(occ.view(-1, occ.size(-1)), dim=0) + 1e-12) 13 | loss = F.binary_cross_entropy_with_logits(logits, occ, pos_weight=pos_weights, reduction='none') 14 | return loss.sum(dim=[-1,-2]).mean() 15 | 16 | 17 | class SDFL1Loss(nn.Module): 18 | def __init__(self): 19 | super(SDFL1Loss, self).__init__() 20 | 21 | def forward(self, logits, sdf): 22 | loss = F.l1_loss(logits, sdf, reduction='none') 23 | return loss.sum(dim=[-1,-2]).mean() 24 | 25 | 26 | def itersection_over_union(pred_bin, gt_bin): 27 | assert pred_bin.shape == gt_bin.shape 28 | 29 | batch_size = gt_bin.shape[0] 30 | pred_bin = pred_bin.reshape(batch_size, -1).bool() 31 | gt_bin = gt_bin.reshape(batch_size, -1).bool() 32 | 33 | # Compute IOU 34 | area_union = torch.logical_or(pred_bin, gt_bin).float().sum(dim=-1) 35 | area_intersect = torch.logical_and(pred_bin, gt_bin).float().sum(dim=-1) 36 | iou = (area_intersect / area_union) 37 | return iou.mean() -------------------------------------------------------------------------------- /.github/workflows/update_yaml_info.py: -------------------------------------------------------------------------------- 1 | import oyaml 2 | import os 3 | import sys 4 | 5 | 6 | def edit_spec_yaml(path, model_name): 7 | spec_file = f"{path}/spec.yaml" 8 | 9 | # Check if the spec.yaml file exists 10 | if not os.path.isfile(spec_file): 11 | print(f"Error: spec.yaml file not found in {path}") 12 | return False 13 | 14 | # Read the content of the spec.yaml file 15 | with open(spec_file, "r") as f: 16 | spec_data = oyaml.safe_load(f) 17 | 18 | # Update the container info keys 19 | # The key is "image" 20 | container_info = spec_data.get("image", {}) 21 | container_info["singularity"] = f"nobrainer-zoo_{model_name}.sif" 22 | container_info["docker"] = f"neuronets/{model_name}" 23 | 24 | # Update the spec.yaml content with the modified data 25 | spec_data["image"] = container_info 26 | 27 | # Write the updated data back to the spec.yaml file 28 | with open(spec_file, "w") as f: 29 | oyaml.dump(spec_data, f) 30 | 31 | return True 32 | 33 | if __name__ == "__main__": 34 | model_folder = sys.argv[1] 35 | dockerfile_path = sys.argv[2] 36 | 37 | success = edit_spec_yaml(dockerfile_path, model_folder) 38 | 39 | if success: 40 | print("Spec.yaml file updated successfully!") 41 | else: 42 | print("Failed to update spec.yaml file.") 43 | print("Failed to update spec.yaml file.") -------------------------------------------------------------------------------- /UCL/SynthSeg/1.0.0/requirements.txt: -------------------------------------------------------------------------------- 1 | absl-py==0.9.0 2 | astor==0.8.1 3 | backcall==0.1.0 4 | cachetools==4.1.0 5 | certifi==2020.4.5.1 6 | chardet==3.0.4 7 | cloudpickle==1.1.1 8 | cycler==0.10.0 9 | decorator==4.4.2 10 | gast==0.2.2 11 | google-auth==1.14.0 12 | google-auth-oauthlib==0.4.1 13 | google-pasta==0.2.0 14 | grpcio==1.28.1 15 | h5py==2.10.0 16 | idna==2.9 17 | imageio==2.8.0 18 | ipython==7.13.0 19 | ipython-genutils==0.2.0 20 | jedi==0.17.0 21 | joblib==0.14.1 22 | Keras==2.3.1 23 | Keras-Applications==1.0.8 24 | Keras-Preprocessing==1.1.0 25 | kiwisolver==1.2.0 26 | Markdown==3.2.1 27 | matplotlib==3.2.1 28 | nibabel==3.1.0 29 | numpy==1.18.2 30 | oauthlib==3.1.0 31 | opt-einsum==3.2.1 32 | packaging==20.3 33 | pandas==1.0.3 34 | parso==0.7.0 35 | pexpect==4.8.0 36 | pickleshare==0.7.5 37 | Pillow==7.1.1 38 | prompt-toolkit==3.0.5 39 | protobuf==3.11.3 40 | ptyprocess==0.6.0 41 | pyasn1==0.4.8 42 | pyasn1-modules==0.2.8 43 | Pygments==2.6.1 44 | pylab-sdk==1.1.2 45 | pyparsing==2.4.7 46 | python-dateutil==2.8.1 47 | pytz==2019.3 48 | PyYAML==5.3.1 49 | requests==2.23.0 50 | requests-oauthlib==1.3.0 51 | rsa==4.0 52 | scikit-learn==0.22.2.post1 53 | scipy==1.4.1 54 | seaborn==0.10.0 55 | six==1.14.0 56 | sklearn==0.0 57 | tensorboard==2.0.2 58 | tensorflow-estimator==2.0.1 59 | tensorflow-gpu==2.0.1 60 | tensorflow-probability==0.8.0 61 | termcolor==1.1.0 62 | tqdm==4.45.0 63 | traitlets==4.3.3 64 | urllib3==1.25.9 65 | wcwidth==0.1.9 66 | Werkzeug==1.0.1 67 | wrapt==1.12.1 68 | -------------------------------------------------------------------------------- /UCL/SynthSR/1.0.0/general/requirements.txt: -------------------------------------------------------------------------------- 1 | absl-py==0.9.0 2 | astor==0.8.1 3 | backcall==0.1.0 4 | cachetools==4.1.0 5 | certifi==2020.4.5.1 6 | chardet==3.0.4 7 | cloudpickle==1.1.1 8 | cycler==0.10.0 9 | decorator==4.4.2 10 | gast==0.2.2 11 | google-auth==1.14.0 12 | google-auth-oauthlib==0.4.1 13 | google-pasta==0.2.0 14 | grpcio==1.28.1 15 | h5py==2.10.0 16 | idna==2.9 17 | imageio==2.8.0 18 | ipython==7.13.0 19 | ipython-genutils==0.2.0 20 | jedi==0.17.0 21 | joblib==0.14.1 22 | Keras==2.3.1 23 | Keras-Applications==1.0.8 24 | Keras-Preprocessing==1.1.0 25 | kiwisolver==1.2.0 26 | Markdown==3.2.1 27 | matplotlib==3.2.1 28 | nibabel==3.1.0 29 | numpy==1.18.2 30 | oauthlib==3.1.0 31 | opt-einsum==3.2.1 32 | packaging==20.3 33 | pandas==1.0.3 34 | parso==0.7.0 35 | pexpect==4.8.0 36 | pickleshare==0.7.5 37 | Pillow==7.1.1 38 | prompt-toolkit==3.0.5 39 | protobuf==3.11.3 40 | ptyprocess==0.6.0 41 | pyasn1==0.4.8 42 | pyasn1-modules==0.2.8 43 | Pygments==2.6.1 44 | pylab-sdk==1.1.2 45 | pyparsing==2.4.7 46 | python-dateutil==2.8.1 47 | pytz==2019.3 48 | PyYAML==5.3.1 49 | requests==2.23.0 50 | requests-oauthlib==1.3.0 51 | rsa==4.0 52 | scikit-learn==0.22.2.post1 53 | scipy==1.4.1 54 | seaborn==0.10.0 55 | six==1.14.0 56 | sklearn==0.0 57 | tensorboard==2.0.2 58 | tensorflow-estimator==2.0.1 59 | tensorflow-gpu==2.0.1 60 | tensorflow-probability==0.8.0 61 | termcolor==1.1.0 62 | tqdm==4.45.0 63 | traitlets==4.3.3 64 | urllib3==1.25.9 65 | wcwidth==0.1.9 66 | Werkzeug==1.0.1 67 | wrapt==1.12.1 68 | -------------------------------------------------------------------------------- /UCL/SynthSR/1.0.0/hyperfine/requirements.txt: -------------------------------------------------------------------------------- 1 | absl-py==0.9.0 2 | astor==0.8.1 3 | backcall==0.1.0 4 | cachetools==4.1.0 5 | certifi==2020.4.5.1 6 | chardet==3.0.4 7 | cloudpickle==1.1.1 8 | cycler==0.10.0 9 | decorator==4.4.2 10 | gast==0.2.2 11 | google-auth==1.14.0 12 | google-auth-oauthlib==0.4.1 13 | google-pasta==0.2.0 14 | grpcio==1.28.1 15 | h5py==2.10.0 16 | idna==2.9 17 | imageio==2.8.0 18 | ipython==7.13.0 19 | ipython-genutils==0.2.0 20 | jedi==0.17.0 21 | joblib==0.14.1 22 | Keras==2.3.1 23 | Keras-Applications==1.0.8 24 | Keras-Preprocessing==1.1.0 25 | kiwisolver==1.2.0 26 | Markdown==3.2.1 27 | matplotlib==3.2.1 28 | nibabel==3.1.0 29 | numpy==1.18.2 30 | oauthlib==3.1.0 31 | opt-einsum==3.2.1 32 | packaging==20.3 33 | pandas==1.0.3 34 | parso==0.7.0 35 | pexpect==4.8.0 36 | pickleshare==0.7.5 37 | Pillow==7.1.1 38 | prompt-toolkit==3.0.5 39 | protobuf==3.11.3 40 | ptyprocess==0.6.0 41 | pyasn1==0.4.8 42 | pyasn1-modules==0.2.8 43 | Pygments==2.6.1 44 | pylab-sdk==1.1.2 45 | pyparsing==2.4.7 46 | python-dateutil==2.8.1 47 | pytz==2019.3 48 | PyYAML==5.3.1 49 | requests==2.23.0 50 | requests-oauthlib==1.3.0 51 | rsa==4.0 52 | scikit-learn==0.22.2.post1 53 | scipy==1.4.1 54 | seaborn==0.10.0 55 | six==1.14.0 56 | sklearn==0.0 57 | tensorboard==2.0.2 58 | tensorflow-estimator==2.0.1 59 | tensorflow-gpu==2.0.1 60 | tensorflow-probability==0.8.0 61 | termcolor==1.1.0 62 | tqdm==4.45.0 63 | traitlets==4.3.3 64 | urllib3==1.25.9 65 | wcwidth==0.1.9 66 | Werkzeug==1.0.1 67 | wrapt==1.12.1 68 | -------------------------------------------------------------------------------- /.github/workflows/package-lock.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "ngustafson2023.github.io", 3 | "lockfileVersion": 2, 4 | "requires": true, 5 | "packages": { 6 | "": { 7 | "dependencies": { 8 | "js-yaml": "^4.1.0" 9 | } 10 | }, 11 | "node_modules/argparse": { 12 | "version": "2.0.1", 13 | "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", 14 | "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==" 15 | }, 16 | "node_modules/js-yaml": { 17 | "version": "4.1.0", 18 | "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", 19 | "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", 20 | "dependencies": { 21 | "argparse": "^2.0.1" 22 | }, 23 | "bin": { 24 | "js-yaml": "bin/js-yaml.js" 25 | } 26 | } 27 | }, 28 | "dependencies": { 29 | "argparse": { 30 | "version": "2.0.1", 31 | "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", 32 | "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==" 33 | }, 34 | "js-yaml": { 35 | "version": "4.1.0", 36 | "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", 37 | "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", 38 | "requires": { 39 | "argparse": "^2.0.1" 40 | } 41 | } 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /.github/workflows/get_model_data.yml: -------------------------------------------------------------------------------- 1 | name: Get Model Data 2 | 3 | on: 4 | push: 5 | branches: ["master"] 6 | 7 | jobs: 8 | build: 9 | runs-on: ubuntu-latest 10 | 11 | steps: 12 | - name: Checkout master 13 | uses: actions/checkout@v3 14 | with: 15 | fetch-depth: 0 16 | ref: master 17 | 18 | - name: Clone trained-models-template 19 | uses: GuillaumeFalourd/clone-github-repo-action@v2 20 | with: 21 | owner: 'neuronets' 22 | repository: 'trained-models-template' 23 | 24 | - name: Move package.json and package-lock.json to root 25 | run: | 26 | mv ./.github/workflows/package-lock.json ./package-lock.json 27 | mv ./.github/workflows/package.json ./package.json 28 | 29 | - name: Use node.js 30 | uses: actions/setup-node@v3 31 | with: 32 | node-version: '16.x' 33 | - run: npm ci 34 | 35 | - name: Run get_model_data.js 36 | run: node ./.github/workflows/get_model_data.js 37 | 38 | - name: Extract files from clone and remove clone 39 | run: | 40 | mv ./trained-models-template/docs . 41 | rm -r ./trained-models-template 42 | 43 | - name: Use GitHub Actions' cache to shorten build times and decrease load on servers 44 | uses: actions/cache@v2 45 | with: 46 | path: vendor/bundle 47 | key: ${{ runner.os }}-gems-${{ hashFiles('**/Gemfile') }} 48 | restore-keys: | 49 | ${{ runner.os }}-gems- 50 | 51 | - name: Build and publish Jekyll site to gh-pages 52 | uses: helaili/jekyll-action@v2 53 | with: 54 | token: ${{ secrets.GITHUB_TOKEN }} 55 | target_branch: 'gh-pages' -------------------------------------------------------------------------------- /lcn/parcnet/1.0.0/spec.yaml: -------------------------------------------------------------------------------- 1 | #### container info 2 | image: 3 | singularity: nobrainer-zoo_torch_1.10.0.sif 4 | docker: neuronets/nobrainer-zoo:torch_1.10.0 5 | 6 | #### repository info 7 | repository: 8 | repo_url: "None" 9 | committish: "None" 10 | repo_download: False 11 | repo_download_location: "None" 12 | 13 | #### required fields for prediction 14 | inference: 15 | prediction_script: "trained-models/lcn/parcnet/1.0.0/predict.py" 16 | command: f"python3 {MODELS_PATH}/{model}/predict.py --model {model_path} {infile[0]} {outfile}" 17 | # TODO: we should add help for options. 18 | options: 19 | cpu: {mandatory: False, argstr: "--cpu", is_flag: true} 20 | #### input data characteristics 21 | data_spec: 22 | infile: {n_files: 1} 23 | outfile: {n_files: 1} 24 | 25 | #### required fields for model training 26 | train: 27 | #TODO: train spec to be added here 28 | 29 | #### training data characteristics 30 | training_data_info: 31 | data_number: 32 | total: None 33 | train: None 34 | evaluate: None 35 | test: None 36 | biological_sex: 37 | male: None 38 | female: None 39 | age_histogram: None 40 | race: None 41 | imaging_contrast_info: None 42 | dataset_sources: None 43 | data_sites: 44 | number_of_sites: None 45 | sites: None 46 | scanner_models: None 47 | hardware: None 48 | training_parameters: 49 | input_shape: None 50 | block_shape: None 51 | n_classes: None 52 | lr: None 53 | n_epochs: None 54 | total_batch_size: None 55 | number_of_gpus: None 56 | loss_function: None 57 | metrics: None 58 | data_preprocessing: None 59 | data_augmentation: None 60 | -------------------------------------------------------------------------------- /docs/spec.yaml: -------------------------------------------------------------------------------- 1 | #### container info 2 | image: 3 | singularity: [Singularity image] 4 | docker: [docker image path] 5 | 6 | #### repository info 7 | repository: 8 | repo_url: 9 | committish: 10 | repo_download: 11 | repo_download_location: 12 | 13 | #### Training Data Characteristics 14 | training_data_info: 15 | data_number: 16 | total: [Total number of data goes here] 17 | train: [Number of training data goes here] 18 | evaluate: [Number of evaluation data goes here] 19 | test: [Number of test data goes here] 20 | biological_sex: 21 | male: [Number of male data goes here] 22 | female: [Number of female data goes here] 23 | age_histogram: [Age histogram goes here] 24 | race: [Race information goes here] 25 | imaging_contrast_info: [Imaging contrast information goes here] 26 | dataset_sources: [Dataset sources go here] 27 | data_sites: 28 | number_of_sites: [Number of data sites goes here] 29 | sites: [List of data site names goes here] 30 | scanner_models: [Scanner models go here] 31 | hardware: [Hardware information goes here] 32 | training_parameters: 33 | input_shape: [Input shape used for training goes here] 34 | block_shape: [Block shape used for training goes here] 35 | n_classes: [Number of classes used for training goes here] 36 | lr: [Learning rate used for training goes here] 37 | n_epochs: [Number of epochs used for training goes here] 38 | total_batch_size: [Total batch size used for training goes here] 39 | number_of_gpus: [Number of GPUs used for training goes here] 40 | loss_function: [Loss function used for training goes here] 41 | metrics: [List of evaluation metrics used for training goes here] 42 | data_preprocessing: [Description or details about the data preprocessing steps goes here] 43 | data_augmentation: [Description or details about the data augmentation techniques used goes here] -------------------------------------------------------------------------------- /UCL/SynthSR/1.0.0/hyperfine/spec.yaml: -------------------------------------------------------------------------------- 1 | #### container info 2 | image: 3 | singularity: nobrainer-zoo_ucl.sif 4 | docker: neuronets/nobrainer-zoo:ucl 5 | 6 | #### repository info 7 | repository: 8 | repo_url: "https://github.com/BBillot/SynthSR.git" 9 | committish: "59b92b54" 10 | repo_download: True 11 | repo_download_location: f"{REPO_PATH}/{model_nm}-{ver}" 12 | 13 | #### required fields for prediction 14 | inference: 15 | prediction_script: "trained-models/UCL/SynthSR/1.0.0/hyperfine/predict.py" 16 | command: f"python3 {MODELS_PATH}/{model}/{model_type}/predict.py --repo_path {repo_dest} --model_path {model_path} {infile[0]} {infile[1]} {outfile}" 17 | # TODO: we should add help for options. 18 | options: 19 | threads: {mandatory: False, argstr: "--threads", type: "int", default: 1} 20 | cpu: {mandatory: False, argstr: "--cpu", is_flag: true} 21 | #### input data characteristics 22 | data_spec: 23 | infile: {n_files: 2} 24 | outfile: {n_files: 1} 25 | 26 | #### training data characteristics 27 | training_data_info: 28 | data_number: 29 | total: None 30 | train: None 31 | evaluate: None 32 | test: None 33 | biological_sex: 34 | male: None 35 | female: None 36 | age_histogram: None 37 | race: None 38 | imaging_contrast_info: "any contrast" 39 | dataset_sources: None 40 | data_sites: 41 | number_of_sites: None 42 | sites: None 43 | scanner_models: None 44 | hardware: None 45 | training_parameters: 46 | input_shape: None 47 | block_shape: None 48 | n_classes: None 49 | lr: None 50 | n_epochs: None 51 | total_batch_size: None 52 | number_of_gpus: None 53 | loss_function: None 54 | metrics: None 55 | data_preprocessing: None 56 | data_augmentation: "spatial transform" 57 | -------------------------------------------------------------------------------- /DDIG/VoxelMorph/1.0.0/spec.yaml: -------------------------------------------------------------------------------- 1 | #### container info 2 | image: 3 | singularity: nobrainer-zoo_ddig.sif 4 | docker: neuronets/nobrainer-zoo:ddig 5 | 6 | #### repository info 7 | repository: 8 | repo_url: "https://github.com/voxelmorph/voxelmorph.git" 9 | committish: "67bc9d20" 10 | repo_download: False 11 | repo_download_location: "None" 12 | 13 | #### required fields for prediction 14 | inference: 15 | prediction_script: "trained-models/DDIG/VoxelMorph/1.0.0/register.py" 16 | command: f"python3 {MODELS_PATH}/{model}/register.py --moving {moving} --fixed {fixed} --model {model_path} --moved {moved}" 17 | # TODO: we should add help for model options 18 | options: 19 | warp: {mandatory: False, argstr: "--warp", type: "str"} 20 | gpu: {mandatory: False, default: "None", argstr: "-g", type: "int"} 21 | multichannel: {mandatory: False, is_flag: true} 22 | #### input data characteristics 23 | data_spec: 24 | moving: {n_files: 1} 25 | fixed: {n_files: 1} 26 | moved: {n_files: 1} 27 | 28 | #### required fields for model training 29 | train: 30 | # TODO: train spec to be added 31 | 32 | #### training data characteristics 33 | training_data_info: 34 | data_number: 35 | total: None 36 | train: None 37 | evaluate: None 38 | test: None 39 | biological_sex: 40 | male: None 41 | female: None 42 | age_histogram: None 43 | race: None 44 | imaging_contrast_info: None 45 | dataset_sources: None 46 | data_sites: 47 | number_of_sites: None 48 | sites: None 49 | scanner_models: None 50 | hardware: None 51 | training_parameters: 52 | input_shape: None 53 | block_shape: None 54 | n_classes: None 55 | lr: None 56 | n_epochs: None 57 | total_batch_size: None 58 | number_of_gpus: None 59 | loss_function: None 60 | metrics: None 61 | data_preprocessing: None 62 | data_augmentation: None 63 | 64 | -------------------------------------------------------------------------------- /DDIG/SynthMorph/1.0.0/brains/spec.yaml: -------------------------------------------------------------------------------- 1 | #### container info 2 | image: 3 | singularity: nobrainer-zoo_ddig.sif 4 | docker: neuronets/nobrainer-zoo:ddig 5 | 6 | #### repository info 7 | repository: 8 | repo_url: "https://github.com/voxelmorph/voxelmorph.git" 9 | committish: "67bc9d20" 10 | repo_download: False 11 | repo_download_location: "None" 12 | 13 | #### required fields for prediction 14 | inference: 15 | prediction_script: "trained-models/DDIG/SynthMorph/1.0.0/brains/register.py" 16 | command: f"python3 {MODELS_PATH}/{model}/{model_type}/register.py --moving {moving} --fixed {fixed} --model {model_path} --moved {moved}" 17 | # TODO: we should add help for model options 18 | options: 19 | warp: {mandatory: False, argstr: "--warp", type: "str"} 20 | gpu: {mandatory: False, default: "None", argstr: "--gpu", type: "int"} 21 | multichannel: {mandatory: False, is_flag: true} 22 | #### input data characteristics 23 | data_spec: 24 | moving: {n_files: 1} 25 | fixed: {n_files: 1} 26 | moved: {n_files: 1} 27 | 28 | #### required fields for model training 29 | train: 30 | # TODO: train spec to be added 31 | 32 | #### training data characteristics 33 | training_data_info: 34 | data_number: 35 | total: None 36 | train: None 37 | evaluate: None 38 | test: None 39 | biological_sex: 40 | male: None 41 | female: None 42 | age_histogram: None 43 | race: None 44 | imaging_contrast_info: None 45 | dataset_sources: None 46 | data_sites: 47 | number_of_sites: None 48 | sites: None 49 | scanner_models: None 50 | hardware: None 51 | training_parameters: 52 | input_shape: None 53 | block_shape: None 54 | n_classes: None 55 | lr: None 56 | n_epochs: None 57 | total_batch_size: None 58 | number_of_gpus: None 59 | loss_function: None 60 | metrics: None 61 | data_preprocessing: None 62 | data_augmentation: None 63 | -------------------------------------------------------------------------------- /DDIG/SynthMorph/1.0.0/shapes/spec.yaml: -------------------------------------------------------------------------------- 1 | #### container info 2 | image: 3 | singularity: nobrainer-zoo_ddig.sif 4 | docker: neuronets/nobrainer-zoo:ddig 5 | 6 | #### repository info 7 | repository: 8 | repo_url: "https://github.com/voxelmorph/voxelmorph.git" 9 | committish: "67bc9d20" 10 | repo_download: False 11 | repo_download_location: "None" 12 | 13 | #### required fields for prediction 14 | inference: 15 | prediction_script: "trained-models/DDIG/SynthMorph/1.0.0/shapes/register.py" 16 | command: f"python3 {MODELS_PATH}/{model}/{model_type}/register.py --moving {moving} --fixed {fixed} --model {model_path} --moved {moved}" 17 | # TODO: we should add help for model options 18 | options: 19 | warp: {mandatory: False, argstr: "--warp", type: "str"} 20 | gpu: {mandatory: False, default: "None", argstr: "--gpu", type: "int"} 21 | multichannel: {mandatory: False, is_flag: true} 22 | #### input data characteristics 23 | data_spec: 24 | moving: {n_files: 1} 25 | fixed: {n_files: 1} 26 | moved: {n_files: 1} 27 | 28 | #### required fields for model training 29 | train: 30 | # TODO: train spec to be added 31 | 32 | #### training data characteristics 33 | training_data_info: 34 | data_number: 35 | total: None 36 | train: None 37 | evaluate: None 38 | test: None 39 | biological_sex: 40 | male: None 41 | female: None 42 | age_histogram: None 43 | race: None 44 | imaging_contrast_info: None 45 | dataset_sources: None 46 | data_sites: 47 | number_of_sites: None 48 | sites: None 49 | scanner_models: None 50 | hardware: None 51 | training_parameters: 52 | input_shape: None 53 | block_shape: None 54 | n_classes: None 55 | lr: None 56 | n_epochs: None 57 | total_batch_size: None 58 | number_of_gpus: None 59 | loss_function: None 60 | metrics: None 61 | data_preprocessing: None 62 | data_augmentation: None 63 | -------------------------------------------------------------------------------- /DeepCSR/deepcsr/1.0/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM nvidia/cuda:11.3.1-cudnn8-devel-ubuntu20.04 2 | ENV LANG=C.UTF-8 3 | ENV LC_ALL=C.UTF-8 4 | ENV PATH=/opt/miniconda3/bin:$PATH 5 | ENV PYTHONDONTWRITEBYTECODE=1 6 | ENV PYTHONUNBUFFERED=1 7 | ENV PYTHONIOENCODING=UTF-8 8 | ENV PIPENV_VENV_IN_PROJECT=1 9 | ENV JCC_JDK=/usr/lib/jvm/java-8-openjdk-amd64 10 | RUN USE_CUDA=1 11 | RUN CUDA_VERSION=11.3.1 12 | RUN CUDNN_VERSION=8 13 | RUN LINUX_DISTRO=ubuntu 14 | RUN DISTRO_VERSION=20.04 15 | RUN TORCH_CUDA_ARCH_LIST="7.0 7.5 8.0 8.6" 16 | RUN rm -f /etc/apt/apt.conf.d/docker-clean; \ 17 | echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' \ 18 | > /etc/apt/apt.conf.d/keep-cache 19 | RUN apt-get update && DEBIAN_FRONTEND=noninteractive TZ=Etc/UTC apt-get -y install tzdata && apt-get install -y --no-install-recommends \ 20 | build-essential \ 21 | ca-certificates \ 22 | ccache \ 23 | curl \ 24 | git \ 25 | wget \ 26 | cmake \ 27 | openjdk-8-jdk \ 28 | libaio-dev && \ 29 | apt-get install -y --no-install-recommends llvm-10 lldb-10 llvm-10-dev libllvm10 llvm-10-runtime 30 | RUN rm -rf /var/lib/apt/lists/* 31 | ENV PYTHON_VERSION=3.8 32 | ENV CONDA_URL=https://repo.anaconda.com/miniconda/Miniconda3-py38_4.10.3-Linux-x86_64.sh 33 | RUN curl -fsSL -v -o ~/miniconda.sh -O ${CONDA_URL} && \ 34 | chmod +x ~/miniconda.sh && \ 35 | ~/miniconda.sh -b -p /opt/miniconda3 36 | 37 | WORKDIR /app 38 | COPY requirements.yml . 39 | COPY CBSI.tar.gz . 40 | RUN tar -xf CBSI.tar.gz 41 | RUN mkdir niftyreg-build 42 | ENV CMAKE_BUILD_TYPE=Release 43 | WORKDIR /app/niftyreg-build 44 | RUN cmake /app/niftyreg-CBSI 45 | RUN make 46 | RUN make install 47 | WORKDIR /app 48 | RUN conda env create -f requirements.yml 49 | SHELL ["conda", "run", "-n", "deepcsr", "/bin/bash", "-c"] 50 | RUN git clone https://github.com/neuroneural/DeepCSR-fork.git 51 | WORKDIR /app/DeepCSR-fork/docker/nighres 52 | RUN pip install jcc 53 | RUN git checkout tags/docker1 54 | RUN ./build.sh 55 | RUN python3 -m pip install . 56 | RUN apt-get clean 57 | RUN pip cache purge 58 | RUN conda clean -a 59 | ENTRYPOINT ["/bin/bash", "-l", "-c"] 60 | 61 | -------------------------------------------------------------------------------- /DDIG/SynthStrip/1.0.0/spec.yaml: -------------------------------------------------------------------------------- 1 | #### container info 2 | image: 3 | singularity: nobrainer-zoo_ddig_torch1.11.0.sif 4 | docker: neuronets/nobrainer-zoo:ddig_torch1.11.0 5 | 6 | #### repository info 7 | repository: 8 | repo_url: "https://github.com/freesurfer/freesurfer/tree/dev/mri_synthstrip" 9 | committish: "e935059a" 10 | repo_download: False 11 | repo_download_location: "None" 12 | 13 | #### required fields for prediction 14 | inference: 15 | prediction_script: "trained-models/DDIG/SynthStrip/1.0.0/predict.py" 16 | command: f"python3 {MODELS_PATH}/{model}/predict.py --model {model_path} -i {infile} -o {outfile}" 17 | 18 | options: 19 | mask: {mandatory: False, argstr: "-m", type: "str", help: "Save binary brain mask to path."} 20 | gpu: {mandatory: False, argstr: "-g", is_flag: true, help: "Use the GPU."} 21 | border: {mandatory: False, argstr: "-b", type: "int", default: 1, help: "Mask border threshold in mm. Default is 1."} 22 | #### input data characteristics 23 | data_spec: 24 | infile: {n_files: 1} 25 | outfile: {n_files: 1} 26 | 27 | #### required fields for model training 28 | train: 29 | #### TODO: Add the train spec here 30 | 31 | #### training data characteristics 32 | training_data_info: 33 | data_number: 34 | total: None 35 | train: None 36 | evaluate: None 37 | test: None 38 | biological_sex: 39 | male: None 40 | female: None 41 | age_histogram: None 42 | race: None 43 | imaging_contrast_info: "any contrast" 44 | dataset_sources: None 45 | data_sites: 46 | number_of_sites: None 47 | sites: None 48 | scanner_models: None 49 | hardware: None 50 | training_parameters: 51 | input_shape: None 52 | block_shape: None 53 | n_classes: None 54 | lr: None 55 | n_epochs: None 56 | total_batch_size: None 57 | number_of_gpus: None 58 | loss_function: None 59 | metrics: None 60 | data_preprocessing: None 61 | data_augmentation: "domain randomization using a generator" 62 | -------------------------------------------------------------------------------- /UCL/SynthSeg/1.0.0/spec.yaml: -------------------------------------------------------------------------------- 1 | #### container info 2 | image: 3 | singularity: nobrainer-zoo_ucl.sif 4 | docker: neuronets/nobrainer-zoo:ucl 5 | 6 | #### repository info 7 | repository: 8 | repo_url: "https://github.com/BBillot/SynthSeg.git" 9 | committish: "cd597b08" 10 | repo_download: True 11 | repo_download_location: f"{REPO_PATH}/{model_nm}-{ver}" 12 | 13 | #### required fields for prediction 14 | inference: 15 | prediction_script: "trained-models/UCL/SynthSeg/1.0.0/predict.py" 16 | command: f"python3 {MODELS_PATH}/{model}/predict.py --repo_path {repo_dest} --model_path {model_path} --i {infile[0]} --o {outfile}" 17 | # TODO: we should add help for options. 18 | options: 19 | post: {mandatory: False, argstr: "--post", type: "str"} 20 | resample: {mandatory: False, argstr: "--resample", type: "str"} 21 | vol: {mandatury: False, argstr: "--vol", type: "str"} 22 | crop: {mandatory: False, argstr: "--crop", type: "int", default: 192} 23 | threads: {mandatory: False, argstr: "--threads", type: "int", default: 1} 24 | cpu: {mandatory: False, argstr: "--cpu", is_flag: true} 25 | #### input data characteristics 26 | data_spec: 27 | infile: {n_files: 1} 28 | outfile: {n_files: 1} 29 | 30 | #### training data characteristics 31 | training_data_info: 32 | data_number: 33 | total: None 34 | train: None 35 | evaluate: None 36 | test: None 37 | biological_sex: 38 | male: None 39 | female: None 40 | age_histogram: None 41 | race: None 42 | imaging_contrast_info: "any contrast" 43 | dataset_sources: None 44 | data_sites: 45 | number_of_sites: None 46 | sites: None 47 | scanner_models: None 48 | hardware: None 49 | training_parameters: 50 | input_shape: None 51 | block_shape: None 52 | n_classes: None 53 | lr: None 54 | n_epochs: None 55 | total_batch_size: None 56 | number_of_gpus: None 57 | loss_function: None 58 | metrics: None 59 | data_preprocessing: None 60 | data_augmentation: "domain randomization using a generator" -------------------------------------------------------------------------------- /neuronets/braingen/0.1.0/generator_res_32/spec.yaml: -------------------------------------------------------------------------------- 1 | #### container info 2 | image: 3 | singularity: nobrainer-zoo_nobrainer.sif 4 | docker: neuronets/nobrainer-zoo:nobrainer 5 | 6 | #### repository info 7 | repository: 8 | repo_url: "git@github.com:neuronets/nobrainer.git" 9 | committish: "72aa211b" 10 | repo_download: False 11 | repo_download_location: "None" 12 | 13 | #### required fields for prediction 14 | inference: 15 | prediction_script: "trained-models/neuronets/braingen/0.1.0/generator_res_32/generate.py" 16 | command: f"python3 {MODELS_PATH}/{model}/{model_type}/generate.py -m {model_path} {outfile}" 17 | # TODO: we should add help for model options 18 | options: 19 | latent-size: {mandatory: False, default: 1024, argstr: "-l", type: "int"} 20 | drange-in: {mandatory: False, default: (-1, 1), argstr: "--drange-in", type: "int"} 21 | drange-out: {mandatory: False, default: (0, 255), argstr: "--drange-out", type: "int"} 22 | output-shape: {mandatory: False, default: (128, 128, 128), argstr: "-o", type: "int"} 23 | verbose: {argstr: "-v", is_flag: true} 24 | #### input data characteristics 25 | data_spec: 26 | outfile: {n_files: 1} 27 | 28 | #### required fields for model training 29 | train: 30 | #### TODO: Add the train spec here 31 | #### training data characteristics 32 | training_data_info: 33 | data_number: 34 | total: None 35 | train: None 36 | evaluate: None 37 | test: None 38 | biological_sex: 39 | male: None 40 | female: None 41 | age_histogram: None 42 | race: None 43 | imaging_contrast_info: "T1-weighted" 44 | dataset_sources: None 45 | data_sites: 46 | number_of_sites: None 47 | sites: None 48 | scanner_models: None 49 | hardware: None 50 | training_parameters: 51 | input_shape: None 52 | block_shape: "32x32x32" 53 | n_classes: None 54 | lr: None 55 | n_epochs: None 56 | total_batch_size: None 57 | number_of_gpus: None 58 | loss_function: None 59 | metrics: None 60 | data_preprocessing: None 61 | data_augmentation: None -------------------------------------------------------------------------------- /neuronets/braingen/0.1.0/generator_res_8/spec.yaml: -------------------------------------------------------------------------------- 1 | #### container info 2 | image: 3 | singularity: nobrainer-zoo_nobrainer.sif 4 | docker: neuronets/nobrainer-zoo:nobrainer 5 | 6 | #### repository info 7 | repository: 8 | repo_url: "git@github.com:neuronets/nobrainer.git" 9 | committish: "72aa211b" 10 | repo_download: False 11 | repo_download_location: "None" 12 | 13 | #### required fields for prediction 14 | inference: 15 | prediction_script: "trained-models/neuronets/braingen/0.1.0/generator_res_8/generate.py" 16 | command: f"python3 {MODELS_PATH}/{model}/{model_type}/generate.py -m {model_path} {outfile}" 17 | # TODO: we should add help for model options 18 | options: 19 | latent-size: {mandatory: False, default: 1024, argstr: "-l", type: "int"} 20 | drange-in: {mandatory: False, default: (-1, 1), argstr: "--drange-in", type: "int"} 21 | drange-out: {mandatory: False, default: (0, 255), argstr: "--drange-out", type: "int"} 22 | output-shape: {mandatory: False, default: (128, 128, 128), argstr: "-o", type: "int"} 23 | verbose: {argstr: "-v", is_flag: true} 24 | #### input data characteristics 25 | data_spec: 26 | outfile: {n_files: 1} 27 | 28 | #### required fields for model training 29 | train: 30 | #### TODO: Add the train spec here 31 | #### training data characteristics 32 | training_data_info: 33 | data_number: 34 | total: None 35 | train: None 36 | evaluate: None 37 | test: None 38 | biological_sex: 39 | male: None 40 | female: None 41 | age_histogram: None 42 | race: None 43 | imaging_contrast_info: "T1-weighted" 44 | dataset_sources: None 45 | data_sites: 46 | number_of_sites: None 47 | sites: None 48 | scanner_models: None 49 | hardware: None 50 | training_parameters: 51 | input_shape: None 52 | block_shape: "8x8x8" 53 | n_classes: None 54 | lr: None 55 | n_epochs: None 56 | total_batch_size: None 57 | number_of_gpus: None 58 | loss_function: None 59 | metrics: None 60 | data_preprocessing: None 61 | data_augmentation: None 62 | -------------------------------------------------------------------------------- /neuronets/braingen/0.1.0/generator_res_128/spec.yaml: -------------------------------------------------------------------------------- 1 | #### container info 2 | image: 3 | singularity: nobrainer-zoo_nobrainer.sif 4 | docker: neuronets/nobrainer-zoo:nobrainer 5 | 6 | #### repository info 7 | repository: 8 | repo_url: "git@github.com:neuronets/nobrainer.git" 9 | committish: "72aa211b" 10 | repo_download: False 11 | repo_download_location: "None" 12 | 13 | #### required fields for prediction 14 | inference: 15 | prediction_script: "trained-models/neuronets/braingen/0.1.0/generator_res_128/generate.py" 16 | command: f"python3 {MODELS_PATH}/{model}/{model_type}/generate.py -m {model_path} {outfile}" 17 | # TODO: we should add help for model options 18 | options: 19 | latent-size: {mandatory: False, default: 1024, argstr: "-l", type: "int"} 20 | drange-in: {mandatory: False, default: (-1, 1), argstr: "--drange-in", type: "int"} 21 | drange-out: {mandatory: False, default: (0, 255), argstr: "--drange-out", type: "int"} 22 | output-shape: {mandatory: False, default: (128, 128, 128), argstr: "-o", type: "int"} 23 | verbose: {argstr: "-v", is_flag: true} 24 | #### input data characteristics 25 | data_spec: 26 | outfile: {n_files: 1} 27 | 28 | #### required fields for model training 29 | train: 30 | #### TODO: Add the train spec here 31 | #### training data characteristics 32 | training_data_info: 33 | data_number: 34 | total: None 35 | train: None 36 | evaluate: None 37 | test: None 38 | biological_sex: 39 | male: None 40 | female: None 41 | age_histogram: None 42 | race: None 43 | imaging_contrast_info: "T1-weighted" 44 | dataset_sources: None 45 | data_sites: 46 | number_of_sites: None 47 | sites: None 48 | scanner_models: None 49 | hardware: None 50 | training_parameters: 51 | input_shape: None 52 | block_shape: "128x128x128" 53 | n_classes: None 54 | lr: None 55 | n_epochs: None 56 | total_batch_size: None 57 | number_of_gpus: None 58 | loss_function: None 59 | metrics: None 60 | data_preprocessing: None 61 | data_augmentation: None -------------------------------------------------------------------------------- /neuronets/braingen/0.1.0/generator_res_16/spec.yaml: -------------------------------------------------------------------------------- 1 | #### container info 2 | image: 3 | singularity: nobrainer-zoo_nobrainer.sif 4 | docker: neuronets/nobrainer-zoo:nobrainer 5 | 6 | #### repository info 7 | repository: 8 | repo_url: "git@github.com:neuronets/nobrainer.git" 9 | committish: "72aa211b" 10 | repo_download: False 11 | repo_download_location: "None" 12 | 13 | #### required fields for prediction 14 | inference: 15 | prediction_script: "trained-models/neuronets/braingen/0.1.0/generator_res_16/generate.py" 16 | command: f"python3 {MODELS_PATH}/{model}/{model_type}/generate.py -m {model_path} {outfile}" 17 | # TODO: we should add help for model options 18 | options: 19 | latent-size: {mandatory: False, default: 1024, argstr: "-l", type: "int"} 20 | drange-in: {mandatory: False, default: (-1, 1), argstr: "--drange-in", type: "int"} 21 | drange-out: {mandatory: False, default: (0, 255), argstr: "--drange-out", type: "int"} 22 | output-shape: {mandatory: False, default: (128, 128, 128), argstr: "-o", type: "int"} 23 | verbose: {argstr: "-v", is_flag: true} 24 | #### input data characteristics 25 | data_spec: 26 | outfile: {n_files: 1} 27 | 28 | #### required fields for model training 29 | train: 30 | #### TODO: Add the train spec here 31 | #### training data characteristics 32 | training_data_info: 33 | data_number: 34 | total: None 35 | train: None 36 | evaluate: None 37 | test: None 38 | biological_sex: 39 | male: None 40 | female: None 41 | age_histogram: None 42 | race: None 43 | imaging_contrast_info: "T1-weighted" 44 | dataset_sources: None 45 | data_sites: 46 | number_of_sites: None 47 | sites: None 48 | scanner_models: None 49 | hardware: None 50 | training_parameters: 51 | input_shape: None 52 | block_shape: "16x16x16" 53 | n_classes: None 54 | lr: None 55 | n_epochs: None 56 | total_batch_size: None 57 | number_of_gpus: None 58 | loss_function: None 59 | metrics: None 60 | data_preprocessing: None 61 | data_augmentation: None 62 | -------------------------------------------------------------------------------- /neuronets/braingen/0.1.0/generator_res_256/spec.yaml: -------------------------------------------------------------------------------- 1 | #### container info 2 | image: 3 | singularity: nobrainer-zoo_nobrainer.sif 4 | docker: neuronets/nobrainer-zoo:nobrainer 5 | 6 | #### repository info 7 | repository: 8 | repo_url: "git@github.com:neuronets/nobrainer.git" 9 | committish: "72aa211b" 10 | repo_download: False 11 | repo_download_location: "None" 12 | 13 | #### required fields for prediction 14 | inference: 15 | prediction_script: "trained-models/neuronets/braingen/0.1.0/generator_res_256/generate.py" 16 | command: f"python3 {MODELS_PATH}/{model}/{model_type}/generate.py -m {model_path} {outfile}" 17 | # TODO: we should add help for model options 18 | options: 19 | latent-size: {mandatory: False, default: 1024, argstr: "-l", type: "int"} 20 | drange-in: {mandatory: False, default: (-1, 1), argstr: "--drange-in", type: "int"} 21 | drange-out: {mandatory: False, default: (0, 255), argstr: "--drange-out", type: "int"} 22 | output-shape: {mandatory: False, default: (128, 128, 128), argstr: "-o", type: "int"} 23 | verbose: {argstr: "-v", is_flag: true} 24 | #### input data characteristics 25 | data_spec: 26 | outfile: {n_files: 1} 27 | 28 | #### required fields for model training 29 | train: 30 | #### TODO: Add the train spec here 31 | #### training data characteristics 32 | training_data_info: 33 | data_number: 34 | total: None 35 | train: None 36 | evaluate: None 37 | test: None 38 | biological_sex: 39 | male: None 40 | female: None 41 | age_histogram: None 42 | race: None 43 | imaging_contrast_info: "T1-weighted" 44 | dataset_sources: None 45 | data_sites: 46 | number_of_sites: None 47 | sites: None 48 | scanner_models: None 49 | hardware: None 50 | training_parameters: 51 | input_shape: None 52 | block_shape: "256x256x256" 53 | n_classes: None 54 | lr: None 55 | n_epochs: None 56 | total_batch_size: None 57 | number_of_gpus: None 58 | loss_function: None 59 | metrics: None 60 | data_preprocessing: None 61 | data_augmentation: None -------------------------------------------------------------------------------- /neuronets/braingen/0.1.0/generator_res_64/spec.yaml: -------------------------------------------------------------------------------- 1 | #### container info 2 | image: 3 | singularity: nobrainer-zoo_nobrainer.sif 4 | docker: neuronets/nobrainer-zoo:nobrainer 5 | 6 | #### repository info 7 | repository: 8 | repo_url: "git@github.com:neuronets/nobrainer.git" 9 | committish: "72aa211b" 10 | repo_download: False 11 | repo_download_location: "None" 12 | 13 | #### required fields for prediction 14 | inference: 15 | prediction_script: "trained-models/neuronets/braingen/0.1.0/generator_res_64/generate.py" 16 | command: f"python3 {MODELS_PATH}/{model}/{model_type}/generate.py -m {model_path} {outfile}" 17 | # TODO: we should add help for model options 18 | options: 19 | latent-size: {mandatory: False, default: 1024, argstr: "-l", type: "int"} 20 | drange-in: {mandatory: False, default: (-1, 1), argstr: "--drange-in", type: "int"} 21 | drange-out: {mandatory: False, default: (0, 255), argstr: "--drange-out", type: "int"} 22 | output-shape: {mandatory: False, default: (128, 128, 128), argstr: "-o", type: "int"} 23 | verbose: {argstr: "-v", is_flag: true} 24 | #### input data characteristics 25 | data_spec: 26 | outfile: {n_files: 1} 27 | 28 | #### required fields for model training 29 | train: 30 | #### TODO: Add the train spec here 31 | #### training data characteristics 32 | training_data_info: 33 | data_number: 34 | total: None 35 | train: None 36 | evaluate: None 37 | test: None 38 | biological_sex: 39 | male: None 40 | female: None 41 | age_histogram: None 42 | race: None 43 | imaging_contrast_info: "T1-weighted" 44 | dataset_sources: None 45 | data_sites: 46 | number_of_sites: None 47 | sites: None 48 | scanner_models: None 49 | hardware: None 50 | training_parameters: 51 | input_shape: None 52 | block_shape: "64x64x64" 53 | n_classes: None 54 | lr: None 55 | n_epochs: None 56 | total_batch_size: None 57 | number_of_gpus: None 58 | loss_function: None 59 | metrics: None 60 | data_preprocessing: None 61 | data_augmentation: None 62 | -------------------------------------------------------------------------------- /neuronets/brainy/0.1.0/model_card.yaml: -------------------------------------------------------------------------------- 1 | Model_details: 2 | Organization: "neuronets" 3 | Model_date: "2021" 4 | Model_version: 0.1.0 5 | Model_type: "U-Net" 6 | More_information: "brainy" 7 | Citation_details: "https://github.com/neuronets/brainy" 8 | Contact_info: "https://github.com/neuronets/brainy/issues/new" 9 | 10 | Intended_use: 11 | Primary_intended_uses: "Automated brain extraction from T1-weighted brain scans." 12 | Primary_intended_users: "Researchers and practitioners in neuroimaging and neuroscience." 13 | Out_of_scope_use_cases: "Use cases outside of T1-weighted brain MRI analysis." 14 | 15 | Factors: 16 | Relevant_factors: "Scan quality, subject demographics, and variation in brain anatomy." 17 | Evaluation_factors: "Accuracy and speed of brain extraction, robustness to image variability." 18 | Model_performance_measures: "Dice score, Jaccard index, efficiency of processing." 19 | 20 | Metrics: 21 | Model Performance Measures: "Median Dice score of 0.97, mean of 0.96, and range of 0.91 to 0.98." 22 | Decision Thresholds: "Thresholds applied in image segmentation and classification." 23 | Variation Approaches: "Data augmentation with random rigid transformations for robustness." 24 | 25 | Evaluation Data: 26 | Datasets: "99 T1-weighted brain scans with binarized FreeSurfer segmentations." 27 | Motivation: "To validate the model's brain extraction performance in realistic scenarios." 28 | Preprocessing: "Standard scoring (Z-scoring) of T1-weighted volumes." 29 | 30 | Training Data: 31 | Datasets: "10,000 T1-weighted brain scans with binarized FreeSurfer segmentations." 32 | Motivation: "To train the model for accurate and efficient brain extraction." 33 | Preprocessing: "Segmentation into blocks of 128x128x128, data augmentation with random transformations." 34 | 35 | Quantitative Analyses: 36 | Unitary Results: "Detailed performance metrics for individual datasets." 37 | Intersectional Results: "Analysis of performance across different conditions and augmentation types." 38 | 39 | Ethical Considerations: 40 | "This tool is intended for research purposes and not as a medical product." 41 | 42 | Caveats and Recommendations: 43 | "Best used on T1-weighted MRI data. Effectiveness may vary with scan quality and anatomical differences. Regular updates and performance checks recommended." 44 | -------------------------------------------------------------------------------- /UCL/SynthSeg/1.0.0/model_card.yaml: -------------------------------------------------------------------------------- 1 | Model_details: 2 | Organization: "UCL" 3 | Model_date: "2021" 4 | Model_version: 1.0.0 5 | Model_type: "U-Net" 6 | More_information: "SynthSeg" 7 | Citation_details: "Billot, B., Greve, D. N., Puonti, O., Thielscher, A., Van Leemput, K., Fischl, B., Dalca, A. V., & Iglesias, J. E. (2021). SynthSeg: Segmentation of brain MRI scans of any contrast and resolution without retraining. ArXiv. https://doi.org/10.1016/j.media.2023.102789" 8 | Contact_info: "https://github.com/BBillot/SynthSeg/issues/new" 9 | 10 | Intended_use: 11 | Primary_intended_uses: "Segmentation of brain MRI scans of any contrast and resolution without retraining." 12 | Primary_intended_users: "Researchers and clinicians in neuroimaging." 13 | Out_of_scope_use_cases: "Use cases outside of brain MRI segmentation, especially for non-clinical or diagnostic purposes." 14 | 15 | Factors: 16 | Relevant_factors: "Scan contrast and resolution, patient population variability." 17 | Evaluation_factors: "Robustness to contrast and resolution changes, accuracy across diverse patient populations." 18 | Model_performance_measures: "Generalization ability, segmentation accuracy." 19 | 20 | Metrics: 21 | Model Performance Measures: "Dice scores, surface distances." 22 | Decision Thresholds: 23 | Variation Approaches: "Assessment across different MRI modalities and resolutions." 24 | 25 | Evaluation Data: 26 | Datasets: "A range of MRI datasets including T1-weighted images and CT scans." 27 | Motivation: "To assess robustness and generalization capabilities." 28 | Preprocessing: "Standard neuroimaging preprocessing steps." 29 | 30 | Training Data: 31 | Datasets: "Synthetic data generated from anatomical segmentations." 32 | Motivation: "To create a robust model for clinical MRI datasets." 33 | Preprocessing: "Synthetic data generation with randomized imaging parameters." 34 | 35 | Quantitative Analyses: 36 | Unitary Results: "Performance metrics such as Dice scores." 37 | Intersectional Results: "Performance analysis across modalities and resolutions." 38 | 39 | Ethical Considerations: 40 | "General ethical considerations in AI and medical imaging, such as data privacy and responsible use." 41 | 42 | Caveats and Recommendations: 43 | "Awareness of model's limitations and importance of supplementary clinical evaluation." 44 | -------------------------------------------------------------------------------- /lcn/parcnet/1.0.0/predict.py: -------------------------------------------------------------------------------- 1 | from argparse import ArgumentParser 2 | import os 3 | import torch 4 | import punet 5 | import parc 6 | 7 | # print information 8 | print('\n') 9 | print('ParcNet cortical parcellation') 10 | print('\n') 11 | 12 | # parse arguments 13 | parser = ArgumentParser() 14 | parser.add_argument("path_images", type=str, help="images to super-resolve / synthesize. Can be the path to a single image or to a folder") 15 | parser.add_argument("path_predictions", type=str, 16 | help="path where to save the synthetic 1mm MP-RAGEs. Must be the same type " 17 | "as path_images (path to a single image or to a folder)") 18 | parser.add_argument("--model", type=str, help="path to saved weightts") 19 | parser.add_argument("--cpu", action="store_true", help="enforce running with CPU rather than GPU.") 20 | args = vars(parser.parse_args()) 21 | 22 | # enforce CPU processing if necessary 23 | if args['cpu']: 24 | print('using CPU, hiding all CUDA_VISIBLE_DEVICES') 25 | os.environ['CUDA_VISIBLE_DEVICES'] = '-1' 26 | 27 | device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") 28 | # Prepare list of images to process 29 | path_images = os.path.abspath(args['path_images']) 30 | basename = os.path.basename(path_images) 31 | path_predictions = os.path.abspath(args['path_predictions']) 32 | 33 | dataset = parc.PARC(root=path_images, subset='.', split=None, mode='image', labels='', in_channels=3, num_classes=32, labeled=False) 34 | model = punet.unet2d_320_dktatlas_positional_20_1_0_0(loadpath=args["model"]).to(device) 35 | percentile = 0.02 36 | 37 | print('Found %d subjects' % len(dataset)) 38 | for idx in range(len(dataset)): 39 | print(' Working on subject %d ' % (idx+1)) 40 | img = dataset.__getitem__(idx)[0].to(device) 41 | minvals = torch.kthvalue(img.flatten(1),round(img.flatten(1).shape[1]*(percentile-0)),dim=1)[0][:,None,None] 42 | maxvals = torch.kthvalue(img.flatten(1),round(img.flatten(1).shape[1]*(1-percentile)),dim=1)[0][:,None,None] 43 | img = torch.min(torch.max(img,minvals),maxvals) 44 | img = (img - img.flatten(1).mean(1).reshape(3,1,1)) * (1 / img.flatten(1).std(1).reshape(3,1,1)) 45 | 46 | dataset.save_output(path_predictions, [model(img[None]).detach().cpu()[0].argmax(0, keepdims=True)], [idx]) 47 | 48 | print(' ') 49 | print('All done!') 50 | print(' ') 51 | 52 | -------------------------------------------------------------------------------- /DeepCSR/deepcsr/1.0/src/utils.py: -------------------------------------------------------------------------------- 1 | import time 2 | import torch 3 | import numpy as np 4 | import nibabel as nib 5 | 6 | 7 | class TicToc: 8 | """ 9 | TicToc class for time pieces of code. 10 | """ 11 | 12 | def __init__(self): 13 | self._TIC_TIME = {} 14 | self._TOC_TIME = {} 15 | 16 | def tic(self, tag=None): 17 | """ 18 | Timer start function 19 | :param tag: Label to save time 20 | :return: current time 21 | """ 22 | if tag is None: 23 | tag = 'default' 24 | self._TIC_TIME[tag] = time.time() 25 | return self._TIC_TIME[tag] 26 | 27 | def toc(self, tag=None): 28 | """ 29 | Timer ending function 30 | :param tag: Label to the saved time 31 | :param fmt: if True, formats time in H:M:S, if False just seconds. 32 | :return: elapsed time 33 | """ 34 | if tag is None: 35 | tag = 'default' 36 | self._TOC_TIME[tag] = time.time() 37 | 38 | if tag in self._TIC_TIME: 39 | d = (self._TOC_TIME[tag] - self._TIC_TIME[tag]) 40 | return d 41 | else: 42 | print("No tic() start time available for tag {}.".format(tag)) 43 | 44 | # Timer as python context manager 45 | def __enter__(self): 46 | self.tic('CONTEXT') 47 | 48 | def __exit__(self, type, value, traceback): 49 | self.toc('CONTEXT') 50 | 51 | 52 | 53 | def make_3d_grid(bb_min, bb_max, shape): 54 | ''' Makes a 3D grid. 55 | 56 | Args: 57 | bb_min (tuple): bounding box minimum 58 | bb_max (tuple): bounding box maximum 59 | shape (tuple): output shape 60 | ''' 61 | size = shape[0] * shape[1] * shape[2] 62 | 63 | pxs = torch.linspace(bb_min[0], bb_max[0], shape[0]) 64 | pys = torch.linspace(bb_min[1], bb_max[1], shape[1]) 65 | pzs = torch.linspace(bb_min[2], bb_max[2], shape[2]) 66 | 67 | pxs = pxs.view(-1, 1, 1).expand(*shape).contiguous().view(size) 68 | pys = pys.view(1, -1, 1).expand(*shape).contiguous().view(size) 69 | pzs = pzs.view(1, 1, -1).expand(*shape).contiguous().view(size) 70 | p = torch.stack([pxs, pys, pzs], dim=1) 71 | 72 | return p 73 | 74 | 75 | def save_nib_image(path, voxel_grid, affine=np.eye(4), header=None): 76 | nib_img = nib.Nifti1Image(voxel_grid, affine, header) 77 | nib.save(nib_img, path) -------------------------------------------------------------------------------- /DeepCSR/deepcsr/1.0/requirements.yml: -------------------------------------------------------------------------------- 1 | name: deepcsr 2 | channels: 3 | - defaults 4 | dependencies: 5 | - _libgcc_mutex=0.1=main 6 | - _openmp_mutex=4.5=1_gnu 7 | - ca-certificates=2022.4.26=h06a4308_0 8 | - certifi=2021.10.8=py37h06a4308_2 9 | - ld_impl_linux-64=2.38=h1181459_0 10 | - libffi=3.3=he6710b0_2 11 | - libgcc-ng=9.3.0=h5101ec6_17 12 | - libgomp=9.3.0=h5101ec6_17 13 | - libstdcxx-ng=9.3.0=hd4cf53a_17 14 | - ncurses=6.3=h7f8727e_2 15 | - openssl=1.1.1o=h7f8727e_0 16 | - pip=21.2.2=py37h06a4308_0 17 | - python=3.7.13=h12debd9_0 18 | - readline=8.1.2=h7f8727e_1 19 | - setuptools=61.2.0=py37h06a4308_0 20 | - sqlite=3.38.3=hc218d9a_0 21 | - tk=8.6.11=h1ccaba5_1 22 | - wheel=0.37.1=pyhd3eb1b0_0 23 | - xz=5.2.5=h7f8727e_1 24 | - zlib=1.2.12=h7f8727e_2 25 | - pip: 26 | - click==8.1.5 27 | - absl-py==1.0.0 28 | - antlr4-python3-runtime==4.8 29 | - cachetools==5.1.0 30 | - chart-studio==1.1.0 31 | - cycler==0.11.0 32 | - cython==0.29.29 33 | - dataclasses==0.6 34 | - fonttools==4.33.3 35 | - future==0.18.2 36 | - google-auth==2.6.6 37 | - google-auth-oauthlib==0.4.6 38 | - grpcio==1.46.1 39 | - hydra-core==1.1.2 40 | - imageio==2.19.2 41 | - importlib-metadata==4.11.3 42 | - importlib-resources==5.2.3 43 | - jcc==3.12 44 | - joblib==1.1.0 45 | - kiwisolver==1.4.3 46 | - markdown==3.3.7 47 | - matplotlib==3.5.2 48 | - networkx==2.6.3 49 | - oauthlib==3.2.0 50 | - omegaconf==2.1.2 51 | - packaging==21.3 52 | - patsy==0.5.2 53 | - pillow==9.1.0 54 | - plotly==5.9.0 55 | - protobuf==3.20.1 56 | - psutil==5.9.1 57 | - pyasn1==0.4.8 58 | - pyasn1-modules==0.2.8 59 | - pyparsing==3.0.9 60 | - python-dateutil==2.8.2 61 | - pywavelets==1.3.0 62 | - pyyaml==6.0 63 | - requests-oauthlib==1.3.1 64 | - retrying==1.3.3 65 | - rsa==4.8 66 | - rtree==1.0.0 67 | - scikit-image==0.19.2 68 | - scikit-learn==1.0.2 69 | - scipy==1.7.3 70 | - six==1.16.0 71 | - statsmodels==0.13.2 72 | - tenacity==8.0.1 73 | - tensorboard==2.9.0 74 | - tensorboard-data-server==0.6.1 75 | - tensorboard-plugin-wit==1.8.1 76 | - threadpoolctl==3.1.0 77 | - tifffile==2021.11.2 78 | - torch==1.7.0+cu101 79 | - torchaudio==0.7.0 80 | - torchvision==0.8.1+cu101 81 | - trimesh==3.12.0 82 | - typing-extensions==4.2.0 83 | - webcolors==1.12 84 | - werkzeug==2.1.2 85 | - zipp==3.8.0 86 | -------------------------------------------------------------------------------- /neuronets/kwyk/0.4.1/bwn_multi/spec.yaml: -------------------------------------------------------------------------------- 1 | #### container info 2 | image: 3 | singularity: nobrainer-zoo_nobrainer.sif 4 | docker: neuronets/nobrainer-zoo:nobrainer 5 | 6 | #### repository info 7 | repository: 8 | repo_url: "git@github.com:neuronets/nobrainer.git" 9 | committish: "72aa211b" 10 | repo_download: False 11 | repo_download_location: "None" 12 | 13 | #### required fields for prediction 14 | inference: 15 | prediction_script: "trained-models/neuronets/kwyk/0.4.1/bwn_multi/predict.py" 16 | command: f"python3 {MODELS_PATH}/{model}/{model_type}/predict.py -m {model_type} {infile[0]} {outfile}" 17 | # TODO: we should add help for model options 18 | options: 19 | n-samples: {mandatory: False, default: 1, argstr: "-n", type: "int"} 20 | batch-size: {mandatory: False, default: 8, argstr: "-b", type: "int"} 21 | save-variance: {argstr: "--save-variance", is_flag: true} 22 | save-entropy: {argstr: "--save-entropy", is_flag: true} 23 | overwrite: {mandatory: False, argstr: "--overwrite", type: "str"} 24 | #atlocation: {argstr: "--atlocation", is_flag: true} # not supported with zoo cli 25 | #### input data characteristics 26 | data_spec: 27 | infile: {n_files: "any"} 28 | outfile: {n_files: 1} 29 | 30 | #### required fields for model training 31 | train: 32 | #### Not implemented #### 33 | 34 | #### training data characteristics 35 | training_data_info: 36 | data_number: 37 | total: 11480 38 | train: 9184 39 | evaluate: 1148 40 | test: 1148 41 | held_out_data: 418 42 | biological_sex: 43 | male: None 44 | female: None 45 | age_histogram: None 46 | race: None 47 | imaging_contrast_info: "T1-weighted" 48 | dataset_sources: ["CoRR", "OpenfMRI", "NKI", "SLIM", "ABIDE", "HCP", "ADHD200", "CMI", "SALD", "Buckner", "HBNSSI", "GSP", "Haxby", "Gobbini", "ICBM", "Barrios"] 49 | data_sites: 50 | number_of_sites: None 51 | sites: None 52 | scanner_models: None 53 | hardware: None 54 | training_parameters: 55 | input_shape: "256x256x256" 56 | block_shape: "32x32x32" 57 | n_classes: 50 58 | lr: 0.0001 59 | n_epochs: None 60 | total_batch_size: 32 61 | number_of_gpus: 4 62 | loss_function: "ELBO" 63 | metrics: "Dice" 64 | data_preprocessing: "intensity normalization using freesurfer" 65 | data_augmentation: "No augmentation" 66 | -------------------------------------------------------------------------------- /DDIG/SynthMorph/1.0.0/brains/model_card.yaml: -------------------------------------------------------------------------------- 1 | Model_details: 2 | Organization: "DDIG" 3 | Model_date: "2020" 4 | Model_version: 1.0.0 5 | Model_type: "U-Net" 6 | More_information: "brains" 7 | Citation_details: "Hoffmann, M., Billot, B., Greve, D. N., Iglesias, J. E., Fischl, B., & Dalca, A. V. (2020). SynthMorph: Learning contrast-invariant registration without acquired images. ArXiv. https://doi.org/10.1109/TMI.2021.3116879" 8 | Contact_info: "https://github.com/voxelmorph/voxelmorph/issues/new" 9 | 10 | Intended_use: 11 | Primary_intended_uses: "Learning contrast-invariant registration without relying on acquired imaging data." 12 | Primary_intended_users: "Researchers and clinicians in neuroimaging and medical image analysis." 13 | Out_of_scope_use_cases: "Not intended for direct clinical diagnosis or treatment planning." 14 | 15 | Factors: 16 | Relevant_factors: "MRI contrasts, neural network training strategies." 17 | Evaluation_factors: "Generalization to a broad array of MRI contrasts, robustness, accuracy." 18 | Model_performance_measures: "Registration accuracy, contrast invariance, computational efficiency." 19 | 20 | Metrics: 21 | Model Performance Measures: "Dice scores, symmetric surface distances, warp folding proportion." 22 | Decision Thresholds: 23 | Variation Approaches: "Assessment across various MRI contrasts and processing levels." 24 | 25 | Evaluation Data: 26 | Datasets: "Includes brain MRI datasets from OASIS, HCP-A, BIRN, UK Biobank, and more." 27 | Motivation: "To evaluate robustness to contrast variations and generalizability." 28 | Preprocessing: "Standard neuroimaging preprocessing steps, including skull-stripping and normalization." 29 | 30 | Training Data: 31 | Datasets: "Synthetic data generated from label maps using a generative model." 32 | Motivation: "To achieve contrast-invariant registration capabilities." 33 | Preprocessing: "Synthesis of images from label maps to create training data with wide-ranging variability." 34 | 35 | Quantitative Analyses: 36 | Unitary Results: "Performance metrics like Dice scores and surface distances." 37 | Intersectional Results: "Analysis across different MRI contrasts and datasets." 38 | 39 | Ethical Considerations: 40 | "Adherence to ethical standards in AI and medical imaging research, particularly regarding data privacy and responsible use." 41 | 42 | Caveats and Recommendations: 43 | "Recognition of the model's limitations in clinical settings and the importance of validation with real-world data." 44 | -------------------------------------------------------------------------------- /DDIG/SynthMorph/1.0.0/shapes/model_card.yaml: -------------------------------------------------------------------------------- 1 | Model_details: 2 | Organization: "DDIG" 3 | Model_date: "2020" 4 | Model_version: 1.0.0 5 | Model_type: "U-Net" 6 | More_information: "shapes" 7 | Citation_details: "Hoffmann, M., Billot, B., Greve, D. N., Iglesias, J. E., Fischl, B., & Dalca, A. V. (2020). SynthMorph: Learning contrast-invariant registration without acquired images. ArXiv. https://doi.org/10.1109/TMI.2021.3116879" 8 | Contact_info: "https://github.com/voxelmorph/voxelmorph/issues/new" 9 | 10 | Intended_use: 11 | Primary_intended_uses: "Learning contrast-invariant registration without relying on acquired imaging data." 12 | Primary_intended_users: "Researchers and clinicians in neuroimaging and medical image analysis." 13 | Out_of_scope_use_cases: "Not intended for direct clinical diagnosis or treatment planning." 14 | 15 | Factors: 16 | Relevant_factors: "MRI contrasts, neural network training strategies." 17 | Evaluation_factors: "Generalization to a broad array of MRI contrasts, robustness, accuracy." 18 | Model_performance_measures: "Registration accuracy, contrast invariance, computational efficiency." 19 | 20 | Metrics: 21 | Model Performance Measures: "Dice scores, symmetric surface distances, warp folding proportion." 22 | Decision Thresholds: 23 | Variation Approaches: "Assessment across various MRI contrasts and processing levels." 24 | 25 | Evaluation Data: 26 | Datasets: "Includes brain MRI datasets from OASIS, HCP-A, BIRN, UK Biobank, and more." 27 | Motivation: "To evaluate robustness to contrast variations and generalizability." 28 | Preprocessing: "Standard neuroimaging preprocessing steps, including skull-stripping and normalization." 29 | 30 | Training Data: 31 | Datasets: "Synthetic data generated from label maps using a generative model." 32 | Motivation: "To achieve contrast-invariant registration capabilities." 33 | Preprocessing: "Synthesis of images from label maps to create training data with wide-ranging variability." 34 | 35 | Quantitative Analyses: 36 | Unitary Results: "Performance metrics like Dice scores and surface distances." 37 | Intersectional Results: "Analysis across different MRI contrasts and datasets." 38 | 39 | Ethical Considerations: 40 | "Adherence to ethical standards in AI and medical imaging research, particularly regarding data privacy and responsible use." 41 | 42 | Caveats and Recommendations: 43 | "Recognition of the model's limitations in clinical settings and the importance of validation with real-world data." 44 | -------------------------------------------------------------------------------- /UCL/SynthSR/1.0.0/general/model_card.yaml: -------------------------------------------------------------------------------- 1 | Model_details: 2 | Organization: "UCL" 3 | Model_date: "2021" 4 | Model_version: 1.0.0 5 | Model_type: "U-Net" 6 | More_information: "general" 7 | Citation_details: "Billot, B., Greve, D. N., Puonti, O., Thielscher, A., Van Leemput, K., Fischl, B., Dalca, A. V., & Iglesias, J. E. (2021). SynthSeg: Segmentation of brain MRI scans of any contrast and resolution without retraining. ArXiv. https://doi.org/10.1016/j.media.2023.102789" 8 | Contact_info: "https://github.com/BBillot/SynthSR/issues/new" 9 | 10 | Intended_use: 11 | Primary_intended_uses: "Super-resolution and synthesis of MRI scans to 1 mm isotropic MP-RAGE volumes, lesion inpainting." 12 | Primary_intended_users: "Researchers and clinicians in neuroimaging." 13 | Out_of_scope_use_cases: "Non-MRI based imaging applications and non-clinical uses." 14 | 15 | Factors: 16 | Relevant_factors: "MRI scan orientation, resolution, and contrast; lesion presence." 17 | Evaluation_factors: "Accuracy in super-resolution and synthesis, lesion inpainting effectiveness." 18 | Model_performance_measures: "Quality of generated 1 mm isotropic MP-RAGE volumes, lesion inpainting accuracy." 19 | 20 | Metrics: 21 | Model Performance Measures: "Quality assessment of synthesized MP-RAGE volumes, lesion inpainting effectiveness." 22 | Decision Thresholds: 23 | Variation Approaches: "Assessment across different MRI modalities and resolutions." 24 | 25 | Evaluation Data: 26 | Datasets: "Clinical MRI and CT scans with different orientations, resolutions, and contrasts." 27 | Motivation: "To evaluate the model's ability to generate high-quality MP-RAGE volumes from diverse clinical scans." 28 | Preprocessing: "Handling various MRI and CT scan formats and conditions." 29 | 30 | Training Data: 31 | Datasets: "Synthetic data generated using a generative model based on SynthSeg." 32 | Motivation: "To train a network for effective super-resolution and synthesis of MRI scans." 33 | Preprocessing: "Synthetic data generation with randomized imaging parameters." 34 | 35 | Quantitative Analyses: 36 | Unitary Results: "Performance analysis on individual datasets." 37 | Intersectional Results: "Analysis across different scan types and conditions." 38 | 39 | Ethical Considerations: 40 | "General ethical considerations in AI and medical imaging, such as data privacy and responsible use." 41 | 42 | Caveats and Recommendations: 43 | "Awareness of model's limitations and importance of supplementary clinical evaluation." 44 | -------------------------------------------------------------------------------- /neuronets/kwyk/0.4.1/bvwn_multi_prior/spec.yaml: -------------------------------------------------------------------------------- 1 | #### container info 2 | image: 3 | singularity: nobrainer-zoo_nobrainer.sif 4 | docker: neuronets/nobrainer-zoo:nobrainer 5 | 6 | #### repository info 7 | repository: 8 | repo_url: "git@github.com:neuronets/nobrainer.git" 9 | committish: "72aa211b" 10 | repo_download: False 11 | repo_download_location: "None" 12 | 13 | #### required fields for prediction 14 | inference: 15 | prediction_script: "trained-models/neuronets/kwyk/0.4.1/bvwn_multi_prior/predict.py" 16 | command: f"python3 {MODELS_PATH}/{model}/{model_type}/predict.py -m {model_type} {infile[0]} {outfile}" 17 | # TODO: we should add help for model options 18 | options: 19 | n-samples: {mandatory: False, default: 1, argstr: "-n", type: "int"} 20 | batch-size: {mandatory: False, default: 8, argstr: "-b", type: "int"} 21 | save-variance: {argstr: "--save-variance", is_flag: true} 22 | save-entropy: {argstr: "--save-entropy", is_flag: true} 23 | overwrite: {mandatory: False, argstr: "--overwrite", type: "str"} 24 | #atlocation: {argstr: "--atlocation", is_flag: true} # not supported with zoo cli 25 | #### input data characteristics 26 | data_spec: 27 | infile: {n_files: "any"} 28 | outfile: {n_files: 1} 29 | 30 | #### required fields for model training 31 | train: 32 | #### Not implemented #### 33 | 34 | #### training data characteristics 35 | training_data_info: 36 | data_number: 37 | total: 11480 38 | train: 9184 39 | evaluate: 1148 40 | test: 1148 41 | held_out_data: 418 42 | biological_sex: 43 | male: None 44 | female: None 45 | age_histogram: None 46 | race: None 47 | imaging_contrast_info: "T1-weighted" 48 | dataset_sources: ["CoRR", "OpenfMRI", "NKI", "SLIM", "ABIDE", "HCP", "ADHD200", "CMI", "SALD", "Buckner", "HBNSSI", "GSP", "Haxby", "Gobbini", "ICBM", "Barrios"] 49 | data_sites: 50 | number_of_sites: None 51 | sites: None 52 | scanner_models: None 53 | hardware: None 54 | training_parameters: 55 | input_shape: "256x256x256" 56 | block_shape: "32x32x32" 57 | n_classes: 50 58 | lr: 0.0001 59 | n_epochs: None 60 | total_batch_size: 32 61 | number_of_gpus: 4 62 | loss_function: "ELBO" 63 | metrics: "Dice" 64 | data_preprocessing: "intensity normalization using freesurfer" 65 | data_augmentation: "No augmentation" 66 | -------------------------------------------------------------------------------- /UCL/SynthSR/1.0.0/hyperfine/model_card.yaml: -------------------------------------------------------------------------------- 1 | Model_details: 2 | Organization: "UCL" 3 | Model_date: "2021" 4 | Model_version: 1.0.0 5 | Model_type: "U-Net" 6 | More_information: "hyperfine" 7 | Citation_details: "Billot, B., Greve, D. N., Puonti, O., Thielscher, A., Van Leemput, K., Fischl, B., Dalca, A. V., & Iglesias, J. E. (2021). SynthSeg: Segmentation of brain MRI scans of any contrast and resolution without retraining. ArXiv. https://doi.org/10.1016/j.media.2023.102789" 8 | Contact_info: "https://github.com/BBillot/SynthSR/issues/new" 9 | 10 | Intended_use: 11 | Primary_intended_uses: "Super-resolution and synthesis of MRI scans to 1 mm isotropic MP-RAGE volumes, lesion inpainting." 12 | Primary_intended_users: "Researchers and clinicians in neuroimaging." 13 | Out_of_scope_use_cases: "Non-MRI based imaging applications and non-clinical uses." 14 | 15 | Factors: 16 | Relevant_factors: "MRI scan orientation, resolution, and contrast; lesion presence." 17 | Evaluation_factors: "Accuracy in super-resolution and synthesis, lesion inpainting effectiveness." 18 | Model_performance_measures: "Quality of generated 1 mm isotropic MP-RAGE volumes, lesion inpainting accuracy." 19 | 20 | Metrics: 21 | Model Performance Measures: "Quality assessment of synthesized MP-RAGE volumes, lesion inpainting effectiveness." 22 | Decision Thresholds: 23 | Variation Approaches: "Assessment across different MRI modalities and resolutions." 24 | 25 | Evaluation Data: 26 | Datasets: "Clinical MRI and CT scans with different orientations, resolutions, and contrasts." 27 | Motivation: "To evaluate the model's ability to generate high-quality MP-RAGE volumes from diverse clinical scans." 28 | Preprocessing: "Handling various MRI and CT scan formats and conditions." 29 | 30 | Training Data: 31 | Datasets: "Synthetic data generated using a generative model based on SynthSeg." 32 | Motivation: "To train a network for effective super-resolution and synthesis of MRI scans." 33 | Preprocessing: "Synthetic data generation with randomized imaging parameters." 34 | 35 | Quantitative Analyses: 36 | Unitary Results: "Performance analysis on individual datasets." 37 | Intersectional Results: "Analysis across different scan types and conditions." 38 | 39 | Ethical Considerations: 40 | "General ethical considerations in AI and medical imaging, such as data privacy and responsible use." 41 | 42 | Caveats and Recommendations: 43 | "Awareness of model's limitations and importance of supplementary clinical evaluation." 44 | -------------------------------------------------------------------------------- /neuronets/kwyk/0.4.1/bwn/spec.yaml: -------------------------------------------------------------------------------- 1 | #### container info 2 | image: 3 | singularity: nobrainer-zoo_nobrainer.sif 4 | docker: neuronets/nobrainer-zoo:nobrainer 5 | 6 | #### repository info 7 | repository: 8 | repo_url: "git@github.com:neuronets/nobrainer.git" 9 | committish: "72aa211b" 10 | repo_download: False 11 | repo_download_location: "None" 12 | 13 | #### required fields for prediction 14 | inference: 15 | prediction_script: "trained-models/neuronets/kwyk/0.4.1/bwn/predict.py" 16 | command: f"python3 {MODELS_PATH}/{model}/{model_type}/predict.py -m {model_type} {infile[0]} {outfile}" 17 | # TODO: we should add help for model options 18 | options: 19 | n-samples: {mandatory: False, default: 1, argstr: "-n", type: "int"} 20 | batch-size: {mandatory: False, default: 8, argstr: "-b", type: "int"} 21 | save-variance: {argstr: "--save-variance", is_flag: true} 22 | save-entropy: {argstr: "--save-entropy", is_flag: true} 23 | overwrite: {mandatory: False, argstr: "--overwrite", type: "str"} 24 | #atlocation: {argstr: "--atlocation", is_flag: true} # not supported with zoo cli 25 | #### input data characteristics 26 | data_spec: 27 | infile: {n_files: "any"} 28 | outfile: {n_files: 1} 29 | 30 | #### required fields for model training 31 | train: 32 | #### Not implemented #### 33 | 34 | #### training data characteristics 35 | training_data_info: 36 | data_number: 37 | total: 11480 38 | train: 9184 39 | evaluate: 1148 40 | test: 1148 41 | held_out_data: 418 42 | biological_sex: 43 | male: None 44 | female: None 45 | age_histogram: None 46 | race: None 47 | imaging_contrast_info: "T1-weighted" 48 | dataset_sources: ["CoRR", "OpenfMRI", "NKI", "SLIM", "ABIDE", "HCP", "ADHD200", "CMI", "SALD", "Buckner", "HBNSSI", "GSP", "Haxby", "Gobbini", "ICBM", "Barrios"] 49 | data_sites: 50 | number_of_sites: None 51 | sites: None 52 | scanner_models: None 53 | hardware: None 54 | training_parameters: 55 | input_shape: "256x256x256" 56 | block_shape: "32x32x32" 57 | n_classes: 50 58 | lr: 0.0001 59 | n_epochs: None 60 | total_batch_size: 32 61 | number_of_gpus: 4 62 | loss_function: "Softmax cross-entropy + L2 regularization" 63 | metrics: "Dice" 64 | data_preprocessing: "intensity normalization using freesurfer" 65 | data_augmentation: "No augmentation" -------------------------------------------------------------------------------- /DeepCSR/deepcsr/1.0/model_card.yaml: -------------------------------------------------------------------------------- 1 | Model_details: 2 | Organization: "DeepCSR" 3 | Model_date: "2020" 4 | Model_version: 1.0.0 5 | Model_type: "Convolutional Neural Network" 6 | More_information: "DeepCSR" 7 | Citation_details: "Cruz, R. S., Lebrat, L., Bourgeat, P., Fookes, C., Fripp, J., & Salvado, O. (2020). DeepCSR: A 3D Deep Learning Approach for Cortical Surface Reconstruction. ArXiv. /abs/2010.11423" 8 | Contact_info: "https://github.com/neuroneural/DeepCSR-fork/issues/new" 9 | 10 | Intended_use: 11 | Primary_intended_uses: "Cortical surface reconstruction from magnetic resonance imaging (MRI)." 12 | Primary_intended_users: "Researchers and practitioners in neuroimaging and neurodegenerative disease studies." 13 | Out_of_scope_use_cases: "Applications outside of MRI-based brain imaging." 14 | 15 | Factors: 16 | Relevant_factors: "Variability in MRI scans, subject demographics, and cortical surface complexity." 17 | Evaluation_factors: "Precision in cortical surface reconstruction, adaptability to different MRI datasets." 18 | Model_performance_measures: "Accuracy in cortical surface reconstruction, speed of processing." 19 | 20 | Metrics: 21 | Model Performance Measures: "Reconstruction accuracy, runtime efficiency." 22 | Decision Thresholds: "Thresholds in hypercolumn feature extraction and surface representation." 23 | Variation Approaches: "Adapting to different MRI resolutions and cortical surface complexities." 24 | 25 | Evaluation Data: 26 | Datasets: "Alzheimer’s Disease Neuroimaging Initiative (ADNI) study dataset." 27 | Motivation: "To assess performance in a clinically relevant context with a variety of brain images." 28 | Preprocessing: "Affine registration to a brain template, implicit surface representation computation." 29 | 30 | Training Data: 31 | Datasets: "MRI data and corresponding pseudo-ground truth surfaces generated with FreeSurfer V6.0." 32 | Motivation: "To develop a model capable of accurate and efficient cortical surface reconstruction." 33 | Preprocessing: "Co-registering MR images to a brain template, point sampling near the target surface." 34 | 35 | Quantitative Analyses: 36 | Unitary Results: "Comparison with traditional cortical reconstruction methods like FreeSurfer." 37 | Intersectional Results: "Performance analysis across different MRI scans and cortical surface types." 38 | 39 | Ethical Considerations: 40 | "Data privacy and ethical use of MRI scans, especially in the context of neurodegenerative diseases." 41 | 42 | Caveats and Recommendations: 43 | "Users should be aware of the model's limitations in extremely complex cortical surfaces. Continuous updates and validation with diverse datasets are recommended." 44 | -------------------------------------------------------------------------------- /UCL/SynthSR/1.0.0/general/spec.yaml: -------------------------------------------------------------------------------- 1 | #### container info 2 | image: 3 | singularity: nobrainer-zoo_ucl.sif 4 | docker: neuronets/nobrainer-zoo:ucl 5 | 6 | #### repository info 7 | repository: 8 | repo_url: "https://github.com/BBillot/SynthSR.git" 9 | committish: "59b92b54" 10 | repo_download: True 11 | repo_download_location: f"{REPO_PATH}/{model_nm}-{ver}" 12 | 13 | #### required fields for prediction 14 | inference: 15 | prediction_script: "trained-models/UCL/SynthSR/1.0.0/general/predict.py" 16 | command: f"python3 {MODELS_PATH}/{model}/{model_type}/predict.py --repo_path {repo_dest} --model_path {model_path} {infile[0]} {outfile}" 17 | # TODO: we should add help for options. 18 | options: 19 | threads: {mandatory: False, argstr: "--threads", type: "int", default: 1} 20 | cpu: {mandatory: False, argstr: "--cpu", is_flag: true} 21 | ct: {mandatury: False, argstr: "--ct", is_flag: true} 22 | #### input data characteristics 23 | data_spec: 24 | infile: {n_files: 1} 25 | outfile: {n_files: 1} 26 | 27 | #### training data characteristics 28 | training_data_info: 29 | data_number: 30 | total: None 31 | train: None 32 | evaluate: None 33 | test: None 34 | biological_sex: 35 | male: None 36 | female: None 37 | age_histogram: None 38 | race: None 39 | imaging_contrast_info: "any contrast" 40 | dataset_sources: None 41 | data_sites: 42 | number_of_sites: None 43 | sites: None 44 | scanner_models: None 45 | hardware: None 46 | training_parameters: 47 | input_shape: None 48 | block_shape: None 49 | n_classes: None 50 | lr: None 51 | n_epochs: None 52 | total_batch_size: None 53 | number_of_gpus: None 54 | loss_function: None 55 | metrics: None 56 | data_preprocessing: None 57 | data_augmentation: "spatial transform" 58 | 59 | #### model information and help 60 | model: 61 | model_name: "SynthSR" 62 | description: "3D brain hyper resolution model" 63 | structure: "U-Net" 64 | training_mode: None 65 | model_url: "https://github.com/BBillot/SynthSR" 66 | Zoo_function: "predict" 67 | example: "nobrainer-zoo predict -m UCL/SynthSR/1.0.0 --model_type general /out.nii.gz" 68 | note: "Please provide an output file name with an extension." 69 | input_file_type: "nii.gz" 70 | model_details: "" 71 | intended_use: "" 72 | factors: "" 73 | metrics: "" 74 | eval_data: "" 75 | training_data: "" 76 | quant_analyses: "" 77 | ethical_considerations: "" 78 | caveats_recs: "" -------------------------------------------------------------------------------- /DDIG/VoxelMorph/1.0.0/model_card.yaml: -------------------------------------------------------------------------------- 1 | Model_details: 2 | Organization: "DDIG" 3 | Model_date: "2018" 4 | Model_version: 1.0.0 5 | Model_type: "U-Net" 6 | More_information: "VoxelMorph" 7 | Citation_details: "Balakrishnan, G., Zhao, A., Sabuncu, M. R., Guttag, J., & Dalca, A. V. (2018). VoxelMorph: A Learning Framework for Deformable Medical Image Registration. ArXiv. https://doi.org/10.1109/TMI.2019.2897538" 8 | Contact_info: "https://github.com/voxelmorph/voxelmorph/issues/new?labels=voxelmorph" 9 | 10 | Intended_use: 11 | Primary_intended_uses: "Deformable medical image registration for various imaging studies." 12 | Primary_intended_users: "Researchers and professionals in medical imaging and computational anatomy." 13 | Out_of_scope_use_cases: "Non-medical image processing, applications outside deformable registration." 14 | 15 | Factors: 16 | Relevant_factors: "Image type variability, subject diversity, anatomical variations." 17 | Evaluation_factors: "Accuracy of registration, adaptability to different imaging conditions." 18 | Model_performance_measures: "Image matching objective functions, overlap of anatomical segmentations." 19 | 20 | Metrics: 21 | Model Performance Measures: "Dice coefficient for registration accuracy, runtime efficiency." 22 | Decision Thresholds: "Thresholds in loss functions for image similarity and deformation smoothness." 23 | Variation Approaches: "Adaptation to different datasets, integration of auxiliary data for improved registration." 24 | 25 | Evaluation Data: 26 | Datasets: "Used for atlas-based and subject-to-subject registration; includes 3731 T1-weighted MRI scans from various datasets." 27 | Motivation: "To validate the model's effectiveness across diverse imaging types and subject populations." 28 | Preprocessing: "Affine spatial normalization, brain extraction, and anatomical segmentation using FreeSurfer." 29 | 30 | Training Data: 31 | Datasets: "Training on a large-scale multi-site dataset including T1-weighted MRI scans." 32 | Motivation: "To develop a robust and efficient model for deformable image registration." 33 | Preprocessing: "Standard preprocessing steps including affine normalization and brain extraction." 34 | 35 | Quantitative Analyses: 36 | Unitary Results: "Registration accuracy and runtime comparisons with state-of-the-art methods." 37 | Intersectional Results: "Performance analysis across different datasets, subject groups, and imaging conditions." 38 | 39 | Ethical Considerations: 40 | "Ensuring the privacy and security of sensitive medical imaging data." 41 | 42 | Caveats and Recommendations: 43 | "Users should be aware of the model's limitations in challenging registration scenarios. Regular updates and validations are recommended for maintaining accuracy and efficiency." 44 | -------------------------------------------------------------------------------- /DDIG/SynthStrip/1.0.0/model_card.yaml: -------------------------------------------------------------------------------- 1 | Model_details: 2 | Organization: "DDIG" 3 | Model_date: "2022" 4 | Model_version: 1.0.0 5 | Model_type: "U-Net" 6 | More_information: "SynthStrip" 7 | Citation_details: "Andrew Hoopes, Jocelyn S. Mora, Adrian V. Dalca, Bruce Fischl, Malte Hoffmann, SynthStrip: skull-stripping for any brain image, NeuroImage, Volume 260, 2022, 119474, ISSN 1053-8119, https://doi.org/10.1016/j.neuroimage.2022.119474." 8 | Contact_info: "https://github.com/freesurfer/freesurfer/issues/new" 9 | 10 | Intended_use: 11 | Primary_intended_uses: "Skull-stripping in various imaging modalities, resolutions, and subject populations." 12 | Primary_intended_users: "Researchers and practitioners in medical imaging." 13 | Out_of_scope_use_cases: "Non-medical image processing, applications outside brain imaging." 14 | 15 | Factors: 16 | Relevant_factors: "Image quality, resolution, imaging modalities (MRI, CT, PET), subject age and health condition (infants to adults, including glioblastoma patients)" 17 | Evaluation_factors: "Robustness to variations in imaging conditions, accuracy of brain voxel extraction." 18 | Model_performance_measures: "Accuracy in brain voxel extraction, compatibility with different imaging modalities." 19 | 20 | Metrics: 21 | Model Performance Measures: "Accuracy in skull-stripping across different imaging modalities and subject conditions." 22 | Decision Thresholds: "Thresholds for voxel classification as brain or non-brain tissue." 23 | Variation Approaches: "Adaptation to different imaging conditions and subject demographics." 24 | 25 | Evaluation Data: 26 | Datasets: "622 MRI, CT, and PET scans with corresponding ground-truth brain masks." 27 | Motivation: "To evaluate performance across diverse imaging types and subject populations." 28 | Preprocessing: "Standardization of image formats and resolution normalization." 29 | 30 | Training Data: 31 | Datasets: "131 adult MPRAGE scans with FreeSurfer brain labels and additional non-brain labels." 32 | Motivation: "To create a robust model capable of accurately segmenting brain tissue in diverse imaging contexts." 33 | Preprocessing: "Segmentation map synthesis, resolution normalization, data augmentation for robustness." 34 | 35 | Quantitative Analyses: 36 | Unitary Results: "Performance metrics for individual modalities and subject conditions." 37 | Intersectional Results: "Analysis across combinations of imaging modalities and subject demographics." 38 | 39 | Ethical Considerations: 40 | "Consideration of patient privacy and data security, especially given the sensitive nature of medical imaging data." 41 | 42 | Caveats and Recommendations: 43 | "Users should be aware of the model's limitations in extreme imaging conditions. Continuous validation with new data is recommended to maintain performance accuracy." 44 | -------------------------------------------------------------------------------- /neuronets/kwyk/0.4.1/bwn/model_card.yaml: -------------------------------------------------------------------------------- 1 | Model_details: 2 | Organization: "neuronets" 3 | Model_date: "2019" 4 | Model_version: 0.4.0 5 | Model_type: "Meshnet" 6 | More_information: "bwn" 7 | Citation_details: "McClure, P., Rho, N., Lee, J. A., Kaczmarzyk, J. R., Zheng, C. Y., Ghosh, S. S., Nielson, D. M., Thomas, A. G., Bandettini, P., & Pereira, F. (2019). Knowing What You Know in Brain Segmentation Using Bayesian Deep Neural Networks. Frontiers in Neuroinformatics, 13, 479876. https://doi.org/10.3389/fninf.2019.00067" 8 | Contact_info: "https://github.com/neuronets/kwyk/issues/new" 9 | 10 | Intended_use: 11 | Primary_intended_uses: "Brain segmentation from structural MRI volumes." 12 | Primary_intended_users: "Researchers in neuroimaging, clinicians, and those involved in brain structure studies." 13 | Out_of_scope_use_cases: "Non-MRI based brain imaging applications." 14 | 15 | Factors: 16 | Relevant_factors: "Variability in MRI scans, differences in brain structures, and noise in images." 17 | Evaluation_factors: "Segmentation accuracy, uncertainty estimation, and generalization across different datasets." 18 | Model_performance_measures: "Dice coefficient, true positive rate, false negative rate." 19 | 20 | Metrics: 21 | Model Performance Measures: "Average Dice scores of 0.8373 (in-site) and 0.7921 (out-of-site)." 22 | Decision Thresholds: "Thresholds in uncertainty estimation for predicting segmentation errors and scan quality." 23 | Variation Approaches: "Adaptation to diverse MRI datasets and scan qualities." 24 | 25 | Evaluation Data: 26 | Datasets: "Combination of several datasets including CoRR, OpenfMRI, NKI, and others totaling 11,480 T1 sMRI volumes." 27 | Motivation: "To ensure robust performance across varied datasets and enhance generalizability." 28 | Preprocessing: "Resampling to 1mm isotropic cubic volumes, normalization, and z-scoring." 29 | 30 | Training Data: 31 | Datasets: "Large dataset of 9,184 sMRI volumes, with 50-class FreeSurfer segmentations as labels." 32 | Motivation: "To train a network that can rapidly and accurately predict FreeSurfer segmentations." 33 | Preprocessing: "Data was split into 32x32x32 sub-volumes, used as inputs for the neural network." 34 | 35 | Quantitative Analyses: 36 | Unitary Results: "Performance evaluation based on Dice scores for individual classes across test volumes." 37 | Intersectional Results: "Analysis of performance generalization on in-site and out-of-site test sets." 38 | 39 | Ethical Considerations: 40 | "The tool is designed for research purposes and not intended as a standalone diagnostic tool. Users should be cautious about its application in clinical settings." 41 | 42 | Caveats and Recommendations: 43 | "The model shows high accuracy in brain segmentation tasks, but users should be aware of potential variability in performance across different MRI datasets. It is recommended to evaluate the model's performance in the specific context of use." 44 | -------------------------------------------------------------------------------- /neuronets/kwyk/0.4.1/bwn_multi/model_card.yaml: -------------------------------------------------------------------------------- 1 | Model_details: 2 | Organization: "neuronets" 3 | Model_date: "2019" 4 | Model_version: 0.4.0 5 | Model_type: "Meshnet" 6 | More_information: "bwn_multi" 7 | Citation_details: "McClure, P., Rho, N., Lee, J. A., Kaczmarzyk, J. R., Zheng, C. Y., Ghosh, S. S., Nielson, D. M., Thomas, A. G., Bandettini, P., & Pereira, F. (2019). Knowing What You Know in Brain Segmentation Using Bayesian Deep Neural Networks. Frontiers in Neuroinformatics, 13, 479876. https://doi.org/10.3389/fninf.2019.00067" 8 | Contact_info: "https://github.com/neuronets/kwyk/issues/new" 9 | 10 | Intended_use: 11 | Primary_intended_uses: "Brain segmentation from structural MRI volumes." 12 | Primary_intended_users: "Researchers in neuroimaging, clinicians, and those involved in brain structure studies." 13 | Out_of_scope_use_cases: "Non-MRI based brain imaging applications." 14 | 15 | Factors: 16 | Relevant_factors: "Variability in MRI scans, differences in brain structures, and noise in images." 17 | Evaluation_factors: "Segmentation accuracy, uncertainty estimation, and generalization across different datasets." 18 | Model_performance_measures: "Dice coefficient, true positive rate, false negative rate." 19 | 20 | Metrics: 21 | Model Performance Measures: "Average Dice scores of 0.8373 (in-site) and 0.7921 (out-of-site)." 22 | Decision Thresholds: "Thresholds in uncertainty estimation for predicting segmentation errors and scan quality." 23 | Variation Approaches: "Adaptation to diverse MRI datasets and scan qualities." 24 | 25 | Evaluation Data: 26 | Datasets: "Combination of several datasets including CoRR, OpenfMRI, NKI, and others totaling 11,480 T1 sMRI volumes." 27 | Motivation: "To ensure robust performance across varied datasets and enhance generalizability." 28 | Preprocessing: "Resampling to 1mm isotropic cubic volumes, normalization, and z-scoring." 29 | 30 | Training Data: 31 | Datasets: "Large dataset of 9,184 sMRI volumes, with 50-class FreeSurfer segmentations as labels." 32 | Motivation: "To train a network that can rapidly and accurately predict FreeSurfer segmentations." 33 | Preprocessing: "Data was split into 32x32x32 sub-volumes, used as inputs for the neural network." 34 | 35 | Quantitative Analyses: 36 | Unitary Results: "Performance evaluation based on Dice scores for individual classes across test volumes." 37 | Intersectional Results: "Analysis of performance generalization on in-site and out-of-site test sets." 38 | 39 | Ethical Considerations: 40 | "The tool is designed for research purposes and not intended as a standalone diagnostic tool. Users should be cautious about its application in clinical settings." 41 | 42 | Caveats and Recommendations: 43 | "The model shows high accuracy in brain segmentation tasks, but users should be aware of potential variability in performance across different MRI datasets. It is recommended to evaluate the model's performance in the specific context of use." 44 | -------------------------------------------------------------------------------- /neuronets/kwyk/0.4.1/bvwn_multi_prior/model_card.yaml: -------------------------------------------------------------------------------- 1 | Model_details: 2 | Organization: "neuronets" 3 | Model_date: "2019" 4 | Model_version: 0.4.0 5 | Model_type: "Meshnet" 6 | More_information: "bvwn_multi_prior" 7 | Citation_details: "McClure, P., Rho, N., Lee, J. A., Kaczmarzyk, J. R., Zheng, C. Y., Ghosh, S. S., Nielson, D. M., Thomas, A. G., Bandettini, P., & Pereira, F. (2019). Knowing What You Know in Brain Segmentation Using Bayesian Deep Neural Networks. Frontiers in Neuroinformatics, 13, 479876. https://doi.org/10.3389/fninf.2019.00067" 8 | Contact_info: "https://github.com/neuronets/kwyk/issues/new" 9 | 10 | Intended_use: 11 | Primary_intended_uses: "Brain segmentation from structural MRI volumes." 12 | Primary_intended_users: "Researchers in neuroimaging, clinicians, and those involved in brain structure studies." 13 | Out_of_scope_use_cases: "Non-MRI based brain imaging applications." 14 | 15 | Factors: 16 | Relevant_factors: "Variability in MRI scans, differences in brain structures, and noise in images." 17 | Evaluation_factors: "Segmentation accuracy, uncertainty estimation, and generalization across different datasets." 18 | Model_performance_measures: "Dice coefficient, true positive rate, false negative rate." 19 | 20 | Metrics: 21 | Model Performance Measures: "Average Dice scores of 0.8373 (in-site) and 0.7921 (out-of-site)." 22 | Decision Thresholds: "Thresholds in uncertainty estimation for predicting segmentation errors and scan quality." 23 | Variation Approaches: "Adaptation to diverse MRI datasets and scan qualities." 24 | 25 | Evaluation Data: 26 | Datasets: "Combination of several datasets including CoRR, OpenfMRI, NKI, and others totaling 11,480 T1 sMRI volumes." 27 | Motivation: "To ensure robust performance across varied datasets and enhance generalizability." 28 | Preprocessing: "Resampling to 1mm isotropic cubic volumes, normalization, and z-scoring." 29 | 30 | Training Data: 31 | Datasets: "Large dataset of 9,184 sMRI volumes, with 50-class FreeSurfer segmentations as labels." 32 | Motivation: "To train a network that can rapidly and accurately predict FreeSurfer segmentations." 33 | Preprocessing: "Data was split into 32x32x32 sub-volumes, used as inputs for the neural network." 34 | 35 | Quantitative Analyses: 36 | Unitary Results: "Performance evaluation based on Dice scores for individual classes across test volumes." 37 | Intersectional Results: "Analysis of performance generalization on in-site and out-of-site test sets." 38 | 39 | Ethical Considerations: 40 | "The tool is designed for research purposes and not intended as a standalone diagnostic tool. Users should be cautious about its application in clinical settings." 41 | 42 | Caveats and Recommendations: 43 | "The model shows high accuracy in brain segmentation tasks, but users should be aware of potential variability in performance across different MRI datasets. It is recommended to evaluate the model's performance in the specific context of use." 44 | -------------------------------------------------------------------------------- /neuronets/ams/0.1.0/model_card.yaml: -------------------------------------------------------------------------------- 1 | Model_details: 2 | Organization: "neuronets" 3 | Model_date: "2020" 4 | Model_version: 0.1.0 5 | Model_type: "U-Net" 6 | More_information: "ams" 7 | Citation_details: "Boaro, A., Kaczmarzyk, J. R., Kavouridis, V. K., Harary, M., Mammi, M., Dawood, H., Shea, A., Cho, E. Y., Juvekar, P., Noh, T., Rana, A., Ghosh, S., & Arnaout, O. (2022). Deep neural networks allow expert-level brain meningioma segmentation and present potential for improvement of clinical practice. Scientific Reports, 12(1), 1-9. https://doi.org/10.1038/s41598-022-19356-5" 8 | Contact_info: "https://github.com/neuronets/ams/issues/new" 9 | 10 | Intended_use: 11 | Primary_intended_uses: "Expert-level automated segmentation and volume estimation of brain meningiomas on MRI scans." 12 | Primary_intended_users: "Neurologists, radiologists, and researchers in neuroimaging." 13 | Out_of_scope_use_cases: "Not intended for non-meningioma brain pathology segmentation or use outside clinical research settings." 14 | 15 | Factors: 16 | Relevant_factors: "Tumor size, location, and MRI characteristics." 17 | Evaluation_factors: "Segmentation accuracy, robustness to tumor variations, generalization to diverse MRI datasets." 18 | Model_performance_measures: "Dice score, Hausdorff distance, average Hausdorff distance." 19 | 20 | Metrics: 21 | Model Performance Measures: "Accuracy of tumor segmentation, volume estimation accuracy." 22 | Decision Thresholds: "Segmentation threshold optimized for meningiomas." 23 | Variation Approaches: "Performance evaluation across different sizes and locations of meningiomas." 24 | 25 | Evaluation Data: 26 | Datasets: "806 pre-surgery exams containing 936 tumors from radiological repositories of two major academic hospitals." 27 | Motivation: "To develop a robust model for meningioma segmentation in diverse clinical scenarios." 28 | Preprocessing: "Standardized image processing, conforming to 256 slices with 1mm3 isotropic voxels." 29 | 30 | Training Data: 31 | Datasets: "Dataset of 10099 high-resolution T1-weighted MRI scans of healthy brains and 806 contrast-enhanced T1-weighted meningioma MRIs." 32 | Motivation: "To create a reliable model for meningioma segmentation, capable of generalizing across various MRI types and tumor characteristics." 33 | Preprocessing: "MRI scans conformed to standard dimensions, tumors segmented by experts for training." 34 | 35 | Quantitative Analyses: 36 | Unitary Results: "Model achieved high segmentation accuracy, comparable to human experts." 37 | Intersectional Results: "Performance analysis showed consistency across different tumor sizes and MRI characteristics." 38 | 39 | Ethical Considerations: 40 | "Ethical considerations include ensuring patient privacy and responsible use of AI in clinical settings." 41 | 42 | Caveats and Recommendations: 43 | "While the model shows high accuracy, its predictions should be used in conjunction with expert clinical judgment. Further validation in diverse clinical settings is recommended." 44 | -------------------------------------------------------------------------------- /neuronets/braingen/0.1.0/generator_res_16/generate.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import os 3 | import click 4 | import logging 5 | import tensorflow as tf 6 | import numpy as np 7 | from pathlib import Path 8 | import nibabel as nib 9 | 10 | 11 | @click.command() 12 | @click.argument("outfile") 13 | @click.option( 14 | "-m", 15 | "--model", 16 | type=click.Path(exists=True), 17 | required=True, 18 | help="Path to saved models directory containing the HDF5 files.", 19 | ) 20 | @click.option( 21 | "-l", 22 | "--latent-size", 23 | type=int, 24 | default=1024, 25 | help=("Input latent size for the generator."), 26 | ) 27 | @click.option( 28 | "--drange-in", 29 | default=(-1, 1), 30 | type=int, 31 | nargs=2, 32 | help="Range of values of image generated by model.", 33 | ) 34 | @click.option( 35 | "--drange-out", 36 | default=(0, 255), 37 | type=int, 38 | nargs=2, 39 | help="Desired output range of values of image.", 40 | ) 41 | @click.option( 42 | "-o", 43 | "--output-shape", 44 | default=(128, 128, 128), 45 | type=int, 46 | nargs=3, 47 | help="Shape of sub-volumes to generate.", 48 | ) 49 | @click.option( 50 | "-v", "--verbose", is_flag=True, help="Print progress bar.", 51 | ) 52 | def generate( 53 | *, 54 | outfile, 55 | model, 56 | latent_size, 57 | drange_in, 58 | drange_out, 59 | output_shape, 60 | verbose, 61 | ): 62 | """Generate images from latents using a trained GAN model. 63 | The generated image is saved to OUTFILE. 64 | """ 65 | 66 | if not verbose: 67 | # Suppress most logging messages. 68 | os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" 69 | tf.get_logger().setLevel(logging.ERROR) 70 | 71 | outfile = Path(outfile).resolve() 72 | 73 | if outfile.exists(): 74 | raise FileExistsError( 75 | "Output file already exists. Will not overwrite {}".format(outfile) 76 | ) 77 | 78 | if verbose: 79 | click.echo("Generating ...") 80 | try: 81 | latents = tf.random.normal((1, latent_size)) 82 | #output_resolution = int(output_shape[0]) 83 | #model = os.path.join(model, "generator_res_{}".format(output_resolution)) 84 | generator = tf.saved_model.load(model) 85 | generate = generator.signatures["serving_default"] 86 | img = generate(latents)["generated"] 87 | img = np.squeeze(img) 88 | except Exception: 89 | click.echo(click.style("ERROR: generation failed. See error trace.", fg="red")) 90 | raise 91 | 92 | if verbose: 93 | click.echo("Saving ...") 94 | 95 | img = nib.Nifti1Image(img.astype(np.uint8), np.eye(4)) 96 | nib.save(img, str(outfile)) 97 | 98 | if verbose: 99 | click.echo("Output saved to {}".format(outfile)) 100 | 101 | 102 | if __name__ == '__main__': 103 | generate() 104 | -------------------------------------------------------------------------------- /neuronets/braingen/0.1.0/generator_res_32/generate.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import os 3 | import click 4 | import logging 5 | import tensorflow as tf 6 | import numpy as np 7 | from pathlib import Path 8 | import nibabel as nib 9 | 10 | 11 | @click.command() 12 | @click.argument("outfile") 13 | @click.option( 14 | "-m", 15 | "--model", 16 | type=click.Path(exists=True), 17 | required=True, 18 | help="Path to saved models directory containing the HDF5 files.", 19 | ) 20 | @click.option( 21 | "-l", 22 | "--latent-size", 23 | type=int, 24 | default=1024, 25 | help=("Input latent size for the generator."), 26 | ) 27 | @click.option( 28 | "--drange-in", 29 | default=(-1, 1), 30 | type=int, 31 | nargs=2, 32 | help="Range of values of image generated by model.", 33 | ) 34 | @click.option( 35 | "--drange-out", 36 | default=(0, 255), 37 | type=int, 38 | nargs=2, 39 | help="Desired output range of values of image.", 40 | ) 41 | @click.option( 42 | "-o", 43 | "--output-shape", 44 | default=(128, 128, 128), 45 | type=int, 46 | nargs=3, 47 | help="Shape of sub-volumes to generate.", 48 | ) 49 | @click.option( 50 | "-v", "--verbose", is_flag=True, help="Print progress bar.", 51 | ) 52 | def generate( 53 | *, 54 | outfile, 55 | model, 56 | latent_size, 57 | drange_in, 58 | drange_out, 59 | output_shape, 60 | verbose, 61 | ): 62 | """Generate images from latents using a trained GAN model. 63 | The generated image is saved to OUTFILE. 64 | """ 65 | 66 | if not verbose: 67 | # Suppress most logging messages. 68 | os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" 69 | tf.get_logger().setLevel(logging.ERROR) 70 | 71 | outfile = Path(outfile).resolve() 72 | 73 | if outfile.exists(): 74 | raise FileExistsError( 75 | "Output file already exists. Will not overwrite {}".format(outfile) 76 | ) 77 | 78 | if verbose: 79 | click.echo("Generating ...") 80 | try: 81 | latents = tf.random.normal((1, latent_size)) 82 | #output_resolution = int(output_shape[0]) 83 | #model = os.path.join(model, "generator_res_{}".format(output_resolution)) 84 | generator = tf.saved_model.load(model) 85 | generate = generator.signatures["serving_default"] 86 | img = generate(latents)["generated"] 87 | img = np.squeeze(img) 88 | except Exception: 89 | click.echo(click.style("ERROR: generation failed. See error trace.", fg="red")) 90 | raise 91 | 92 | if verbose: 93 | click.echo("Saving ...") 94 | 95 | img = nib.Nifti1Image(img.astype(np.uint8), np.eye(4)) 96 | nib.save(img, str(outfile)) 97 | 98 | if verbose: 99 | click.echo("Output saved to {}".format(outfile)) 100 | 101 | 102 | if __name__ == '__main__': 103 | generate() 104 | -------------------------------------------------------------------------------- /neuronets/braingen/0.1.0/generator_res_64/generate.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import os 3 | import click 4 | import logging 5 | import tensorflow as tf 6 | import numpy as np 7 | from pathlib import Path 8 | import nibabel as nib 9 | 10 | 11 | @click.command() 12 | @click.argument("outfile") 13 | @click.option( 14 | "-m", 15 | "--model", 16 | type=click.Path(exists=True), 17 | required=True, 18 | help="Path to saved models directory containing the HDF5 files.", 19 | ) 20 | @click.option( 21 | "-l", 22 | "--latent-size", 23 | type=int, 24 | default=1024, 25 | help=("Input latent size for the generator."), 26 | ) 27 | @click.option( 28 | "--drange-in", 29 | default=(-1, 1), 30 | type=int, 31 | nargs=2, 32 | help="Range of values of image generated by model.", 33 | ) 34 | @click.option( 35 | "--drange-out", 36 | default=(0, 255), 37 | type=int, 38 | nargs=2, 39 | help="Desired output range of values of image.", 40 | ) 41 | @click.option( 42 | "-o", 43 | "--output-shape", 44 | default=(128, 128, 128), 45 | type=int, 46 | nargs=3, 47 | help="Shape of sub-volumes to generate.", 48 | ) 49 | @click.option( 50 | "-v", "--verbose", is_flag=True, help="Print progress bar.", 51 | ) 52 | def generate( 53 | *, 54 | outfile, 55 | model, 56 | latent_size, 57 | drange_in, 58 | drange_out, 59 | output_shape, 60 | verbose, 61 | ): 62 | """Generate images from latents using a trained GAN model. 63 | The generated image is saved to OUTFILE. 64 | """ 65 | 66 | if not verbose: 67 | # Suppress most logging messages. 68 | os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" 69 | tf.get_logger().setLevel(logging.ERROR) 70 | 71 | outfile = Path(outfile).resolve() 72 | 73 | if outfile.exists(): 74 | raise FileExistsError( 75 | "Output file already exists. Will not overwrite {}".format(outfile) 76 | ) 77 | 78 | if verbose: 79 | click.echo("Generating ...") 80 | try: 81 | latents = tf.random.normal((1, latent_size)) 82 | #output_resolution = int(output_shape[0]) 83 | #model = os.path.join(model, "generator_res_{}".format(output_resolution)) 84 | generator = tf.saved_model.load(model) 85 | generate = generator.signatures["serving_default"] 86 | img = generate(latents)["generated"] 87 | img = np.squeeze(img) 88 | except Exception: 89 | click.echo(click.style("ERROR: generation failed. See error trace.", fg="red")) 90 | raise 91 | 92 | if verbose: 93 | click.echo("Saving ...") 94 | 95 | img = nib.Nifti1Image(img.astype(np.uint8), np.eye(4)) 96 | nib.save(img, str(outfile)) 97 | 98 | if verbose: 99 | click.echo("Output saved to {}".format(outfile)) 100 | 101 | 102 | if __name__ == '__main__': 103 | generate() 104 | -------------------------------------------------------------------------------- /neuronets/braingen/0.1.0/generator_res_8/generate.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import os 3 | import click 4 | import logging 5 | import tensorflow as tf 6 | import numpy as np 7 | from pathlib import Path 8 | import nibabel as nib 9 | 10 | 11 | @click.command() 12 | @click.argument("outfile") 13 | @click.option( 14 | "-m", 15 | "--model", 16 | type=click.Path(exists=True), 17 | required=True, 18 | help="Path to saved models directory containing the HDF5 files.", 19 | ) 20 | @click.option( 21 | "-l", 22 | "--latent-size", 23 | type=int, 24 | default=1024, 25 | help=("Input latent size for the generator."), 26 | ) 27 | @click.option( 28 | "--drange-in", 29 | default=(-1, 1), 30 | type=int, 31 | nargs=2, 32 | help="Range of values of image generated by model.", 33 | ) 34 | @click.option( 35 | "--drange-out", 36 | default=(0, 255), 37 | type=int, 38 | nargs=2, 39 | help="Desired output range of values of image.", 40 | ) 41 | @click.option( 42 | "-o", 43 | "--output-shape", 44 | default=(128, 128, 128), 45 | type=int, 46 | nargs=3, 47 | help="Shape of sub-volumes to generate.", 48 | ) 49 | @click.option( 50 | "-v", "--verbose", is_flag=True, help="Print progress bar.", 51 | ) 52 | def generate( 53 | *, 54 | outfile, 55 | model, 56 | latent_size, 57 | drange_in, 58 | drange_out, 59 | output_shape, 60 | verbose, 61 | ): 62 | """Generate images from latents using a trained GAN model. 63 | The generated image is saved to OUTFILE. 64 | """ 65 | 66 | if not verbose: 67 | # Suppress most logging messages. 68 | os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" 69 | tf.get_logger().setLevel(logging.ERROR) 70 | 71 | outfile = Path(outfile).resolve() 72 | 73 | if outfile.exists(): 74 | raise FileExistsError( 75 | "Output file already exists. Will not overwrite {}".format(outfile) 76 | ) 77 | 78 | if verbose: 79 | click.echo("Generating ...") 80 | try: 81 | latents = tf.random.normal((1, latent_size)) 82 | #output_resolution = int(output_shape[0]) 83 | #model = os.path.join(model, "generator_res_{}".format(output_resolution)) 84 | generator = tf.saved_model.load(model) 85 | generate = generator.signatures["serving_default"] 86 | img = generate(latents)["generated"] 87 | img = np.squeeze(img) 88 | except Exception: 89 | click.echo(click.style("ERROR: generation failed. See error trace.", fg="red")) 90 | raise 91 | 92 | if verbose: 93 | click.echo("Saving ...") 94 | 95 | img = nib.Nifti1Image(img.astype(np.uint8), np.eye(4)) 96 | nib.save(img, str(outfile)) 97 | 98 | if verbose: 99 | click.echo("Output saved to {}".format(outfile)) 100 | 101 | 102 | if __name__ == '__main__': 103 | generate() 104 | -------------------------------------------------------------------------------- /neuronets/braingen/0.1.0/generator_res_128/generate.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import os 3 | import click 4 | import logging 5 | import tensorflow as tf 6 | import numpy as np 7 | from pathlib import Path 8 | import nibabel as nib 9 | 10 | 11 | @click.command() 12 | @click.argument("outfile") 13 | @click.option( 14 | "-m", 15 | "--model", 16 | type=click.Path(exists=True), 17 | required=True, 18 | help="Path to saved models directory containing the HDF5 files.", 19 | ) 20 | @click.option( 21 | "-l", 22 | "--latent-size", 23 | type=int, 24 | default=1024, 25 | help=("Input latent size for the generator."), 26 | ) 27 | @click.option( 28 | "--drange-in", 29 | default=(-1, 1), 30 | type=int, 31 | nargs=2, 32 | help="Range of values of image generated by model.", 33 | ) 34 | @click.option( 35 | "--drange-out", 36 | default=(0, 255), 37 | type=int, 38 | nargs=2, 39 | help="Desired output range of values of image.", 40 | ) 41 | @click.option( 42 | "-o", 43 | "--output-shape", 44 | default=(128, 128, 128), 45 | type=int, 46 | nargs=3, 47 | help="Shape of sub-volumes to generate.", 48 | ) 49 | @click.option( 50 | "-v", "--verbose", is_flag=True, help="Print progress bar.", 51 | ) 52 | def generate( 53 | *, 54 | outfile, 55 | model, 56 | latent_size, 57 | drange_in, 58 | drange_out, 59 | output_shape, 60 | verbose, 61 | ): 62 | """Generate images from latents using a trained GAN model. 63 | The generated image is saved to OUTFILE. 64 | """ 65 | 66 | if not verbose: 67 | # Suppress most logging messages. 68 | os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" 69 | tf.get_logger().setLevel(logging.ERROR) 70 | 71 | outfile = Path(outfile).resolve() 72 | 73 | if outfile.exists(): 74 | raise FileExistsError( 75 | "Output file already exists. Will not overwrite {}".format(outfile) 76 | ) 77 | 78 | if verbose: 79 | click.echo("Generating ...") 80 | try: 81 | latents = tf.random.normal((1, latent_size)) 82 | #output_resolution = int(output_shape[0]) 83 | #model = os.path.join(model, "generator_res_{}".format(output_resolution)) 84 | generator = tf.saved_model.load(model) 85 | generate = generator.signatures["serving_default"] 86 | img = generate(latents)["generated"] 87 | img = np.squeeze(img) 88 | except Exception: 89 | click.echo(click.style("ERROR: generation failed. See error trace.", fg="red")) 90 | raise 91 | 92 | if verbose: 93 | click.echo("Saving ...") 94 | 95 | img = nib.Nifti1Image(img.astype(np.uint8), np.eye(4)) 96 | nib.save(img, str(outfile)) 97 | 98 | if verbose: 99 | click.echo("Output saved to {}".format(outfile)) 100 | 101 | 102 | if __name__ == '__main__': 103 | generate() 104 | -------------------------------------------------------------------------------- /neuronets/braingen/0.1.0/generator_res_256/generate.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import os 3 | import click 4 | import logging 5 | import tensorflow as tf 6 | import numpy as np 7 | from pathlib import Path 8 | import nibabel as nib 9 | 10 | 11 | @click.command() 12 | @click.argument("outfile") 13 | @click.option( 14 | "-m", 15 | "--model", 16 | type=click.Path(exists=True), 17 | required=True, 18 | help="Path to saved models directory containing the HDF5 files.", 19 | ) 20 | @click.option( 21 | "-l", 22 | "--latent-size", 23 | type=int, 24 | default=1024, 25 | help=("Input latent size for the generator."), 26 | ) 27 | @click.option( 28 | "--drange-in", 29 | default=(-1, 1), 30 | type=int, 31 | nargs=2, 32 | help="Range of values of image generated by model.", 33 | ) 34 | @click.option( 35 | "--drange-out", 36 | default=(0, 255), 37 | type=int, 38 | nargs=2, 39 | help="Desired output range of values of image.", 40 | ) 41 | @click.option( 42 | "-o", 43 | "--output-shape", 44 | default=(128, 128, 128), 45 | type=int, 46 | nargs=3, 47 | help="Shape of sub-volumes to generate.", 48 | ) 49 | @click.option( 50 | "-v", "--verbose", is_flag=True, help="Print progress bar.", 51 | ) 52 | def generate( 53 | *, 54 | outfile, 55 | model, 56 | latent_size, 57 | drange_in, 58 | drange_out, 59 | output_shape, 60 | verbose, 61 | ): 62 | """Generate images from latents using a trained GAN model. 63 | The generated image is saved to OUTFILE. 64 | """ 65 | 66 | if not verbose: 67 | # Suppress most logging messages. 68 | os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" 69 | tf.get_logger().setLevel(logging.ERROR) 70 | 71 | outfile = Path(outfile).resolve() 72 | 73 | if outfile.exists(): 74 | raise FileExistsError( 75 | "Output file already exists. Will not overwrite {}".format(outfile) 76 | ) 77 | 78 | if verbose: 79 | click.echo("Generating ...") 80 | try: 81 | latents = tf.random.normal((1, latent_size)) 82 | #output_resolution = int(output_shape[0]) 83 | #model = os.path.join(model, "generator_res_{}".format(output_resolution)) 84 | generator = tf.saved_model.load(model) 85 | generate = generator.signatures["serving_default"] 86 | img = generate(latents)["generated"] 87 | img = np.squeeze(img) 88 | except Exception: 89 | click.echo(click.style("ERROR: generation failed. See error trace.", fg="red")) 90 | raise 91 | 92 | if verbose: 93 | click.echo("Saving ...") 94 | 95 | img = nib.Nifti1Image(img.astype(np.uint8), np.eye(4)) 96 | nib.save(img, str(outfile)) 97 | 98 | if verbose: 99 | click.echo("Output saved to {}".format(outfile)) 100 | 101 | 102 | if __name__ == '__main__': 103 | generate() 104 | -------------------------------------------------------------------------------- /UCL/SynthSeg/1.0.0/predict.py: -------------------------------------------------------------------------------- 1 | # This code is adapted from SynthSeg_predict.py to be compatible for Nobrainer-zoo 2 | """This script enables to launch predictions with SynthSeg from the terminal.""" 3 | 4 | # print information 5 | print('\n') 6 | print('SynthSeg prediction') 7 | print('\n') 8 | 9 | # python imports 10 | import os 11 | import sys 12 | from argparse import ArgumentParser 13 | 14 | # parse arguments 15 | parser = ArgumentParser() 16 | 17 | # repository location and model path 18 | parser.add_argument("--repo_path", type=str, dest="repo_path", help="repository download location.") 19 | parser.add_argument("--model_path", type=str, dest="path_model", help="saved model path") 20 | 21 | # input/outputs 22 | parser.add_argument("--i", type=str, dest='path_images', 23 | help="Image(s) to segment. Can be a path to an image or to a folder.") 24 | parser.add_argument("--o", type=str, dest="path_segmentations", 25 | help="Segmentation output(s). Must be a folder if --i designates a folder.") 26 | parser.add_argument("--post", type=str, default=None, dest="path_posteriors", 27 | help="(optional) Posteriors output(s). Must be a folder if --i designates a folder.") 28 | parser.add_argument("--resample", type=str, default=None, dest="path_resampled", 29 | help="(optional) Resampled image(s). Must be a folder if --i designates a folder.") 30 | parser.add_argument("--vol", type=str, default=None, dest="path_volumes", 31 | help="(optional) Output CSV file with volumes for all structures and subjects.") 32 | 33 | # parameters 34 | parser.add_argument("--crop", nargs='+', type=int, default=192, dest="cropping", 35 | help="(optional) Size of 3D patches to analyse. Default is 192.") 36 | parser.add_argument("--threads", type=int, default=1, dest="threads", 37 | help="(optional) Number of cores to be used. Default is 1.") 38 | parser.add_argument("--cpu", action="store_true", help="(optional) Enforce running with CPU rather than GPU.") 39 | 40 | # parse commandline 41 | args = vars(parser.parse_args()) 42 | 43 | # add the repository main folder to python path and import ./SynthSeg/predict.py 44 | repo_path = args["repo_path"] 45 | sys.path.append(repo_path) 46 | args.pop("repo_path") 47 | from SynthSeg.predict import predict 48 | 49 | # enforce CPU processing if necessary 50 | if args['cpu']: 51 | print('using CPU, hiding all CUDA_VISIBLE_DEVICES') 52 | os.environ['CUDA_VISIBLE_DEVICES'] = '-1' 53 | del args['cpu'] 54 | 55 | # limit the number of threads to be used if running on CPU 56 | import tensorflow as tf 57 | tf.config.threading.set_intra_op_parallelism_threads(args['threads']) 58 | del args['threads'] 59 | 60 | 61 | # default parameters 62 | args['segmentation_labels'] = os.path.join(repo_path, 'data/labels_classes_priors/segmentation_labels.npy') 63 | args['n_neutral_labels'] = 18 64 | args['segmentation_label_names'] = os.path.join(repo_path, 'data/labels_classes_priors/segmentation_names.npy') 65 | args['topology_classes'] = os.path.join(repo_path, 'data/labels_classes_priors/topological_classes.npy') 66 | #args['path_model'] = os.path.join(repo_path, 'models/SynthSeg.h5') # using model added to the zoo repository 67 | args['padding'] = args['cropping'] 68 | 69 | # call predict 70 | predict(**args) 71 | -------------------------------------------------------------------------------- /DDIG/VoxelMorph/1.0.0/register.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | """ 4 | Example script to register two volumes with VoxelMorph models. 5 | 6 | Please make sure to use trained models appropriately. Let's say we have a model trained to register 7 | a scan (moving) to an atlas (fixed). To register a scan to the atlas and save the warp field, run: 8 | 9 | register.py --moving moving.nii.gz --fixed fixed.nii.gz --model model.h5 10 | --moved moved.nii.gz --warp warp.nii.gz 11 | 12 | The source and target input images are expected to be affinely registered. 13 | 14 | If you use this code, please cite the following, and read function docs for further info/citations 15 | VoxelMorph: A Learning Framework for Deformable Medical Image Registration 16 | G. Balakrishnan, A. Zhao, M. R. Sabuncu, J. Guttag, A.V. Dalca. 17 | IEEE TMI: Transactions on Medical Imaging. 38(8). pp 1788-1800. 2019. 18 | 19 | Copyright 2020 Adrian V. Dalca 20 | 21 | Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in 22 | compliance with the License. You may obtain a copy of the License at 23 | 24 | http://www.apache.org/licenses/LICENSE-2.0 25 | 26 | Unless required by applicable law or agreed to in writing, software distributed under the License is 27 | distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 28 | implied. See the License for the specific language governing permissions and limitations under the 29 | License. 30 | """ 31 | 32 | import os 33 | import argparse 34 | import numpy as np 35 | import voxelmorph as vxm 36 | import tensorflow as tf 37 | 38 | 39 | # parse commandline args 40 | parser = argparse.ArgumentParser() 41 | parser.add_argument('--moving', required=True, help='moving image (source) filename') 42 | parser.add_argument('--fixed', required=True, help='fixed image (target) filename') 43 | parser.add_argument('--moved', required=True, help='warped image output filename') 44 | parser.add_argument('--model', required=True, help='keras model for nonlinear registration') 45 | parser.add_argument('--warp', help='output warp deformation filename') 46 | parser.add_argument('-g', '--gpu', help='GPU number(s) - if not supplied, CPU is used') 47 | parser.add_argument('--multichannel', action='store_true', 48 | help='specify that data has multiple channels') 49 | args = parser.parse_args() 50 | 51 | # tensorflow device handling 52 | device, nb_devices = vxm.tf.utils.setup_device(args.gpu) 53 | 54 | # load moving and fixed images 55 | add_feat_axis = not args.multichannel 56 | moving = vxm.py.utils.load_volfile(args.moving, add_batch_axis=True, add_feat_axis=add_feat_axis) 57 | fixed, fixed_affine = vxm.py.utils.load_volfile( 58 | args.fixed, add_batch_axis=True, add_feat_axis=add_feat_axis, ret_affine=True) 59 | 60 | inshape = moving.shape[1:-1] 61 | nb_feats = moving.shape[-1] 62 | 63 | with tf.device(device): 64 | # load model and predict 65 | config = dict(inshape=inshape, input_model=None) 66 | warp = vxm.networks.VxmDense.load(args.model, **config).register(moving, fixed) 67 | moved = vxm.networks.Transform(inshape, nb_feats=nb_feats).predict([moving, warp]) 68 | 69 | # save warp 70 | if args.warp: 71 | vxm.py.utils.save_volfile(warp.squeeze(), args.warp, fixed_affine) 72 | 73 | # save moved image 74 | vxm.py.utils.save_volfile(moved.squeeze(), args.moved, fixed_affine) 75 | -------------------------------------------------------------------------------- /DDIG/SynthMorph/1.0.0/brains/register.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | """ 4 | Example script to register two volumes with VoxelMorph models. 5 | 6 | Please make sure to use trained models appropriately. Let's say we have a model trained to register 7 | a scan (moving) to an atlas (fixed). To register a scan to the atlas and save the warp field, run: 8 | 9 | register.py --moving moving.nii.gz --fixed fixed.nii.gz --model model.h5 10 | --moved moved.nii.gz --warp warp.nii.gz 11 | 12 | The source and target input images are expected to be affinely registered. 13 | 14 | If you use this code, please cite the following, and read function docs for further info/citations 15 | VoxelMorph: A Learning Framework for Deformable Medical Image Registration 16 | G. Balakrishnan, A. Zhao, M. R. Sabuncu, J. Guttag, A.V. Dalca. 17 | IEEE TMI: Transactions on Medical Imaging. 38(8). pp 1788-1800. 2019. 18 | 19 | Copyright 2020 Adrian V. Dalca 20 | 21 | Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in 22 | compliance with the License. You may obtain a copy of the License at 23 | 24 | http://www.apache.org/licenses/LICENSE-2.0 25 | 26 | Unless required by applicable law or agreed to in writing, software distributed under the License is 27 | distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 28 | implied. See the License for the specific language governing permissions and limitations under the 29 | License. 30 | """ 31 | 32 | import os 33 | import argparse 34 | import numpy as np 35 | import voxelmorph as vxm 36 | import tensorflow as tf 37 | 38 | 39 | # parse commandline args 40 | parser = argparse.ArgumentParser() 41 | parser.add_argument('--moving', required=True, help='moving image (source) filename') 42 | parser.add_argument('--fixed', required=True, help='fixed image (target) filename') 43 | parser.add_argument('--moved', required=True, help='warped image output filename') 44 | parser.add_argument('--model', required=True, help='keras model for nonlinear registration') 45 | parser.add_argument('--warp', help='output warp deformation filename') 46 | parser.add_argument('-g', '--gpu', help='GPU number(s) - if not supplied, CPU is used') 47 | parser.add_argument('--multichannel', action='store_true', 48 | help='specify that data has multiple channels') 49 | args = parser.parse_args() 50 | 51 | # tensorflow device handling 52 | device, nb_devices = vxm.tf.utils.setup_device(args.gpu) 53 | 54 | # load moving and fixed images 55 | add_feat_axis = not args.multichannel 56 | moving = vxm.py.utils.load_volfile(args.moving, add_batch_axis=True, add_feat_axis=add_feat_axis) 57 | fixed, fixed_affine = vxm.py.utils.load_volfile( 58 | args.fixed, add_batch_axis=True, add_feat_axis=add_feat_axis, ret_affine=True) 59 | 60 | inshape = moving.shape[1:-1] 61 | nb_feats = moving.shape[-1] 62 | 63 | with tf.device(device): 64 | # load model and predict 65 | config = dict(inshape=inshape, input_model=None) 66 | warp = vxm.networks.VxmDense.load(args.model, **config).register(moving, fixed) 67 | moved = vxm.networks.Transform(inshape, nb_feats=nb_feats).predict([moving, warp]) 68 | 69 | # save warp 70 | if args.warp: 71 | vxm.py.utils.save_volfile(warp.squeeze(), args.warp, fixed_affine) 72 | 73 | # save moved image 74 | vxm.py.utils.save_volfile(moved.squeeze(), args.moved, fixed_affine) 75 | -------------------------------------------------------------------------------- /DDIG/SynthMorph/1.0.0/shapes/register.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | """ 4 | Example script to register two volumes with VoxelMorph models. 5 | 6 | Please make sure to use trained models appropriately. Let's say we have a model trained to register 7 | a scan (moving) to an atlas (fixed). To register a scan to the atlas and save the warp field, run: 8 | 9 | register.py --moving moving.nii.gz --fixed fixed.nii.gz --model model.h5 10 | --moved moved.nii.gz --warp warp.nii.gz 11 | 12 | The source and target input images are expected to be affinely registered. 13 | 14 | If you use this code, please cite the following, and read function docs for further info/citations 15 | VoxelMorph: A Learning Framework for Deformable Medical Image Registration 16 | G. Balakrishnan, A. Zhao, M. R. Sabuncu, J. Guttag, A.V. Dalca. 17 | IEEE TMI: Transactions on Medical Imaging. 38(8). pp 1788-1800. 2019. 18 | 19 | Copyright 2020 Adrian V. Dalca 20 | 21 | Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in 22 | compliance with the License. You may obtain a copy of the License at 23 | 24 | http://www.apache.org/licenses/LICENSE-2.0 25 | 26 | Unless required by applicable law or agreed to in writing, software distributed under the License is 27 | distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 28 | implied. See the License for the specific language governing permissions and limitations under the 29 | License. 30 | """ 31 | 32 | import os 33 | import argparse 34 | import numpy as np 35 | import voxelmorph as vxm 36 | import tensorflow as tf 37 | 38 | 39 | # parse commandline args 40 | parser = argparse.ArgumentParser() 41 | parser.add_argument('--moving', required=True, help='moving image (source) filename') 42 | parser.add_argument('--fixed', required=True, help='fixed image (target) filename') 43 | parser.add_argument('--moved', required=True, help='warped image output filename') 44 | parser.add_argument('--model', required=True, help='keras model for nonlinear registration') 45 | parser.add_argument('--warp', help='output warp deformation filename') 46 | parser.add_argument('-g', '--gpu', help='GPU number(s) - if not supplied, CPU is used') 47 | parser.add_argument('--multichannel', action='store_true', 48 | help='specify that data has multiple channels') 49 | args = parser.parse_args() 50 | 51 | # tensorflow device handling 52 | device, nb_devices = vxm.tf.utils.setup_device(args.gpu) 53 | 54 | # load moving and fixed images 55 | add_feat_axis = not args.multichannel 56 | moving = vxm.py.utils.load_volfile(args.moving, add_batch_axis=True, add_feat_axis=add_feat_axis) 57 | fixed, fixed_affine = vxm.py.utils.load_volfile( 58 | args.fixed, add_batch_axis=True, add_feat_axis=add_feat_axis, ret_affine=True) 59 | 60 | inshape = moving.shape[1:-1] 61 | nb_feats = moving.shape[-1] 62 | 63 | with tf.device(device): 64 | # load model and predict 65 | config = dict(inshape=inshape, input_model=None) 66 | warp = vxm.networks.VxmDense.load(args.model, **config).register(moving, fixed) 67 | moved = vxm.networks.Transform(inshape, nb_feats=nb_feats).predict([moving, warp]) 68 | 69 | # save warp 70 | if args.warp: 71 | vxm.py.utils.save_volfile(warp.squeeze(), args.warp, fixed_affine) 72 | 73 | # save moved image 74 | vxm.py.utils.save_volfile(moved.squeeze(), args.moved, fixed_affine) 75 | -------------------------------------------------------------------------------- /neuronets/ams/0.1.0/spec.yaml: -------------------------------------------------------------------------------- 1 | #### container info 2 | image: 3 | singularity: nobrainer-zoo_nobrainer.sif 4 | docker: neuronets/nobrainer-zoo:nobrainer 5 | 6 | #### repository info 7 | repository: 8 | repo_url: "git@github.com:neuronets/nobrainer.git" 9 | committish: "72aa211b" 10 | repo_download: False 11 | repo_download_location: "None" 12 | 13 | #### required fields for prediction 14 | inference: 15 | prediction_script: "nobrainer/nobrainer/cli/main.py" 16 | command: f"nobrainer predict -m {model_path} {infile[0]} {outfile}" 17 | # TODO: we should add help for model options 18 | options: 19 | block_shape: {mandatory: False, default: [128, 128, 128], argstr: "-b", type: "list"} 20 | resize_features_to: {mandatory: False, default: [256, 256, 256], argstr: "-r", type: "list"} 21 | threshold: {mandatory: False, default: 0.3, argstr: "-t", type: "float"} 22 | rotate_and_predict: {argstr: "--rotate-and-predict", is_flag: true} 23 | largest_label: {argstr: "-l", is_flag: true} 24 | verbose: {argstr: "-v", is_flag: true} 25 | #### input data characteristics 26 | data_spec: 27 | infile: {n_files: 1} 28 | outfile: {n_files: 1} 29 | 30 | #### required fields for model training 31 | train: 32 | #### Train script 33 | train_script: train.py 34 | 35 | # the sample data used if data_patterns is not provided by the user 36 | sample_data: sample_MGH 37 | 38 | #### general settings 39 | name: unet_AMS 40 | is_train: true 41 | #use_visdom: false # for visualization 42 | #visdom_port: 8067 43 | model: cnn 44 | device: cuda:0 45 | 46 | #### datasets 47 | n_classes: 1 48 | dataset_train: 49 | data_location: data/ 50 | shuffle_buffer_size: 10 51 | block_shape: 32 52 | volume_shape: 256 53 | batch_size: 2 # per GPU 54 | augment: False 55 | n_train: 9 56 | num_parallel_calls: 2 # keeping same as batch sizse\ 57 | dataset_test: # test params may differ from train params 58 | data_location: data/ 59 | shuffle_buffer_size: 0 60 | block_shape: 32 61 | volume_shape: 256 62 | batch_size: 1 63 | n_test: 1 64 | num_parallel_calls: 1 65 | augment: False 66 | 67 | #### network structures 68 | network: 69 | model: unet 70 | batchnorm: True 71 | #### training settings: learning rate scheme, loss 72 | training: 73 | epoch: 5 74 | lr: .00001 # adam 75 | loss: nobrainer.losses.dice 76 | metrics: [nobrainer.metrics.dice, nobrainer.metrics.jaccard] 77 | 78 | #### logger 79 | logger: 80 | ckpt_path: ckpts/ 81 | 82 | path: 83 | save_model: model/ 84 | pretrained_model: none 85 | 86 | #### training data characteristics 87 | training_data_info: 88 | data_number: 89 | total: None 90 | train: None 91 | evaluate: None 92 | test: None 93 | biological_sex: 94 | male: None 95 | female: None 96 | age_histogram: None 97 | race: None 98 | imaging_contrast_info: None 99 | dataset_sources: None 100 | data_sites: 101 | number_of_sites: None 102 | sites: None 103 | scanner_models: None 104 | hardware: None 105 | training_parameters: 106 | input_shape: "256x256x256" 107 | block_shape: "128x128x128" 108 | n_classes: 1 109 | lr: None 110 | n_epochs: None 111 | total_batch_size: None 112 | number_of_gpus: None 113 | loss_function: None 114 | metrics: None 115 | data_preprocessing: None 116 | data_augmentation: None -------------------------------------------------------------------------------- /neuronets/brainy/0.1.0/spec.yaml: -------------------------------------------------------------------------------- 1 | #### container info 2 | image: 3 | singularity: nobrainer-zoo_nobrainer.sif 4 | docker: neuronets/nobrainer-zoo:nobrainer 5 | 6 | #### repository info 7 | repository: 8 | repo_url: "git@github.com:neuronets/nobrainer.git" 9 | committish: "72aa211b" 10 | repo_download: False 11 | repo_download_location: "None" 12 | 13 | #### required fields for prediction 14 | inference: 15 | prediction_script: "nobrainer/nobrainer/cli/main.py" 16 | command: f"nobrainer predict -m {model_path} {infile[0]} {outfile}" 17 | # TODO: we should add help for model options 18 | options: 19 | block_shape: {mandatory: False, default: [128, 128, 128], argstr: "-b", type: "list"} 20 | resize_features_to: {mandatory: False, default: [256, 256, 256], argstr: "-r", type: "list"} 21 | threshold: {mandatory: False, default: 0.3, argstr: "-t", type: "float"} 22 | rotate_and_predict: {argstr: "--rotate-and-predict", is_flag: true} 23 | largest_label: {arstr: "-l", is_flag: true} 24 | verbose: {argstr: "-v", is_flag: true} 25 | #### input data characteristics 26 | data_spec: 27 | infile: {n_files: 1} 28 | outfile: {n_files: 1} 29 | 30 | #### required fields for model training 31 | train: 32 | #### Train script 33 | train_script: Brainy_Train_Unet.py 34 | 35 | # the sample data used if data_patterns is not provided by the user 36 | sample_data: sample_MGH 37 | 38 | #### general settings 39 | name: unet_brainy 40 | is_train: true 41 | #use_visdom: false # for visualization 42 | #visdom_port: 8067 43 | model: cnn 44 | device: cuda:0 45 | 46 | #### datasets 47 | n_classes: 1 48 | dataset_train: 49 | data_location: data/ 50 | shuffle_buffer_size: 10 51 | block_shape: 32 52 | volume_shape: 256 53 | batch_size: 2 # per GPU 54 | augment: False 55 | n_train: 9 56 | num_parallel_calls: 2 # keeping same as batch sizse\ 57 | dataset_test: # test params may differ from train params 58 | data_location: data/ 59 | shuffle_buffer_size: 0 60 | block_shape: 32 61 | volume_shape: 256 62 | batch_size: 1 63 | n_test: 1 64 | num_parallel_calls: 1 65 | augment: False 66 | 67 | #### network structures 68 | network: 69 | model: unet 70 | batchnorm: True 71 | #### training settings: learning rate scheme, loss 72 | training: 73 | epoch: 5 74 | lr: .00001 # adam 75 | loss: nobrainer.losses.dice 76 | metrics: [nobrainer.metrics.dice, nobrainer.metrics.jaccard] 77 | 78 | #### logger 79 | logger: 80 | ckpt_path: ckpts/ 81 | 82 | path: 83 | save_model: model/ 84 | pretrained_model: none 85 | 86 | #### training data characteristics 87 | training_data_info: 88 | data_number: 89 | total: 10000 90 | train: None 91 | evaluate: None 92 | test: 99 93 | biological_sex: 94 | male: None 95 | female: None 96 | age_histogram: None 97 | race: None 98 | imaging_contrast_info: "T1-weighted" 99 | dataset_sources: None 100 | data_sites: 101 | number_of_sites: None 102 | sites: None 103 | scanner_models: None 104 | hardware: None 105 | training_parameters: 106 | input_shape: "256x256x256" 107 | block_shape: "128x128x128" 108 | n_classes: 1 109 | lr: None 110 | n_epochs: 5 111 | total_batch_size: 2 112 | number_of_gpus: None 113 | loss_function: "Jaccard" 114 | metrics: "Dice" 115 | data_preprocessing: "standard score" 116 | data_augmentation: "random rigid transformations applied to 50% of data" 117 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # poetry 98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 99 | # This is especially recommended for binary packages to ensure reproducibility, and is more 100 | # commonly ignored for libraries. 101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 102 | #poetry.lock 103 | 104 | # pdm 105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 106 | #pdm.lock 107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 108 | # in version control. 109 | # https://pdm.fming.dev/#use-with-ide 110 | .pdm.toml 111 | 112 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 113 | __pypackages__/ 114 | 115 | # Celery stuff 116 | celerybeat-schedule 117 | celerybeat.pid 118 | 119 | # SageMath parsed files 120 | *.sage.py 121 | 122 | # Environments 123 | .env 124 | .venv 125 | env/ 126 | venv/ 127 | ENV/ 128 | env.bak/ 129 | venv.bak/ 130 | 131 | # Spyder project settings 132 | .spyderproject 133 | .spyproject 134 | 135 | # Rope project settings 136 | .ropeproject 137 | 138 | # mkdocs documentation 139 | /site 140 | 141 | # mypy 142 | .mypy_cache/ 143 | .dmypy.json 144 | dmypy.json 145 | 146 | # Pyre type checker 147 | .pyre/ 148 | 149 | # pytype static type analyzer 150 | .pytype/ 151 | 152 | # Cython debug symbols 153 | cython_debug/ 154 | 155 | # PyCharm 156 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 157 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 158 | # and can be added to the global gitignore or merged into this file. For a more nuclear 159 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 160 | .idea/ 161 | 162 | .DS_Store 163 | ._* 164 | .h5 165 | node_modules 166 | -------------------------------------------------------------------------------- /.github/workflows/get_model_data.js: -------------------------------------------------------------------------------- 1 | const fs = require('fs'); 2 | const path = require("path"); 3 | const yaml = require('js-yaml'); 4 | 5 | const ignoreDirs = [ 6 | '.datalad', '.github', 'docs', 'images', 'weights', 'trained-models-template' 7 | ]; 8 | 9 | // find all spec.yml files 10 | const getAllPaths = function(dirPath, arrayOfPaths) { 11 | files = fs.readdirSync(dirPath); 12 | arrayOfPaths = arrayOfPaths || []; 13 | 14 | files.forEach(function(file) { 15 | if (fs.statSync(dirPath + "/" + file).isDirectory() && !ignoreDirs.includes(file)) { 16 | arrayOfPaths = getAllPaths(dirPath + "/" + file, arrayOfPaths); 17 | } else if (file === 'spec.yaml') { 18 | arrayOfPaths.push(path.join(dirPath, "/", file)); 19 | } 20 | }); 21 | 22 | return arrayOfPaths; 23 | } 24 | 25 | function findObj(name, arr) { 26 | for (const obj of arr) { 27 | if (obj.name === name) return obj; 28 | } 29 | return null; 30 | } 31 | 32 | const paths = getAllPaths('.'); 33 | const names = []; 34 | models = {}; 35 | 36 | paths.forEach(function(path) { 37 | const doc = yaml.load(fs.readFileSync(path, 'utf8')); 38 | 39 | //create names.yml 40 | const example = doc.model.example.split(' '); 41 | let org; 42 | let modelName; 43 | let version; 44 | let isLink; 45 | let modelType; 46 | for (let i=0; i < example.length; i++) { 47 | const str = example[i]; 48 | if (str.includes(doc.model.model_name)) { 49 | const initCombinedName = str.split('/'); 50 | org = initCombinedName[0]; 51 | modelName = initCombinedName[1]; 52 | version = initCombinedName[2]; 53 | isLink = !(example[i+1] === '--model_type'); 54 | modelType = !isLink ? example[i+2] : 'model'; 55 | break; 56 | } 57 | } 58 | let orgStruct = findObj(org, names); 59 | if (orgStruct === null) { 60 | orgStruct = { 61 | name: org, 62 | modelNames: [] 63 | }; 64 | names.push(orgStruct); 65 | } 66 | let modelNameStruct = findObj(modelName, orgStruct.modelNames); 67 | if (modelNameStruct === null) { 68 | modelNameStruct = { 69 | name: modelName, 70 | versions: [] 71 | }; 72 | orgStruct.modelNames.push(modelNameStruct); 73 | } 74 | let versionStruct = findObj(version, modelNameStruct.versions); 75 | if (versionStruct === null) { 76 | versionStruct = { 77 | name: version, 78 | modelTypes: [] 79 | }; 80 | versionStruct.isLink = isLink; 81 | modelNameStruct.versions.push(versionStruct); 82 | } 83 | if (!isLink) { 84 | versionStruct.modelTypes.push({ 85 | name: modelType 86 | }); 87 | } 88 | 89 | // create models.yml 90 | const combined_name = org + '_' + modelName + '_' + version + '_' + modelType; 91 | models[combined_name] = doc.model; 92 | modelCardFields = [ 93 | 'model_details', 'intended_use', 'factors', 'metrics', 'eval_data', 'training_data', 'quant_analyses', 'ethical_considerations', 'caveats_recs' 94 | ]; 95 | for (const field of modelCardFields) { 96 | if (models[combined_name][field] === '') { 97 | models[combined_name][field] = 'Information not provided.'; 98 | } 99 | } 100 | 101 | // create model pages 102 | const permalink = `/${org}/${modelName}/${version}/${modelType}/`; 103 | const page = `--- 104 | layout: model_card 105 | permalink: ${permalink} 106 | combined_name: ${combined_name} 107 | org: ${org} 108 | modelName: ${modelName} 109 | version: ${version} 110 | modelType: ${modelType} 111 | --- 112 | `; 113 | const filename = `./trained-models-template/docs/_pages/${combined_name}.markdown`; 114 | fs.writeFile(filename, page, "utf8", err => { 115 | if (err) console.log(err); 116 | }); 117 | }); 118 | 119 | // write to files 120 | yamlNames = yaml.dump(names); 121 | fs.writeFile("./trained-models-template/docs/_data/names.yml", yamlNames, "utf8", err => { 122 | if (err) console.log(err); 123 | }); 124 | 125 | yamlModels = yaml.dump(models); 126 | fs.writeFile("./trained-models-template/docs/_data/models.yml", yamlModels, "utf8", err => { 127 | if (err) console.log(err); 128 | }); -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Trained models 2 | 3 | This repository contains pre-trained models for 3D neuroimaging data processing. These models can be used for their original purpose or for transfer learning on a new task. For example, a pre-trained brain extraction network can be trained on a tumor-labeling task. Models are included based on `///weights` structure. Some models (such as kwyk and braingen) have various types which means there was different structural chracteristic during training that lead to different trained models. Therefor, the path for these models are changes as `////weights` 4 | Instructions to add a model can be find [here](https://github.com/neuronets/trained-models/blob/master/add_model_instructions.md). 5 | 6 | ## Neuronets organization 7 | 8 | These models were trained using the [_Nobrainer_](https://github.com/neuronets/nobrainer) framework, which wraps TensorFlow/Keras. 9 | 10 | - [brainy](https://github.com/neuronets/brainy): 3D U-Net brain extraction model 11 | - [ams](https://github.com/neuronets/ams): automated meningioma segmentation model 12 | - [kwyk](https://github.com/neuronets/kwyk): bayesian neural network for brain parcellation and uncertainty estimation (Tensorflow/estimator) 13 | - braingen: progressive generation of T1-weighted brain MR scans 14 | 15 | ## UCL organization 16 | 17 | - [SynthSeg](https://github.com/BBillot/SynthSeg): 3D brain MRI segmentation model (Tensorflow/keras) 18 | - [SynthSR](https://github.com/BBillot/SynthSR): 3D brain MRI (& CT) super resolution model (Tensorflow/keras) 19 | 20 | ## DDIG Organization 21 | 22 | - [SynthMorph](https://github.com/voxelmorph/voxelmorph): contrast agnostic registration model (Tensorflow/keras) 23 | - [VoxelMorph](https://github.com/voxelmorph/voxelmorph): learning based registration model (Tensorflow/keras) 24 | 25 | ## Laboratory for Computational Neuroscience (lcn) Organization 26 | 27 | - ParcNet: cortical parcellation model (pytorch) 28 | 29 | ## Downloading models 30 | 31 | This repository is a datalad dataset. To get the models, you need to install [`datalad`](https://www.datalad.org/get_datalad.html) and [`datalad-osf`](https://pypi.org/project/datalad-osf/) to your environment. 32 | 33 | ``` 34 | datalad clone https://github.com/neuronets/trained-models 35 | cd trained-models 36 | git-annex enableremote osf-storage 37 | ``` 38 | 39 | to download all the models, 40 | 41 | ``` 42 | datalad get -s osf-storage . 43 | ``` 44 | 45 | to get a specific model you can pass the path of the model to the `datalad get`. 46 | 47 | ``` 48 | datalad get -s osf-storage neuronets/ams/0.1.0/weights/meningioma_T1wc_128iso_v1.h5 49 | ``` 50 | 51 | ``` 52 | datalad get -s osf-storage neuronets/braingen/0.1.0 53 | ``` 54 | 55 | ## Using models for inference or training 56 | 57 | You can use the [Nobrainer-zoo](https://github.com/neuronets/zoo) toolbox for inference and re-training of the models without installing any additional model dependencies. 58 | 59 | ## Loading models for training with python and tensorflow/keras 60 | 61 | You can use `tensorflow.keras` module to load a tensorflow model. 62 | 63 | ``` 64 | import tensorflow as tf 65 | 66 | model = tf.keras.models.load_model("neuronets/brainy/0.1.0/brain-extraction-unet-128iso-model.h5") 67 | model.fit(...) 68 | ``` 69 | You can see a transfer learning example [here](https://github.com/neuronets/nobrainer/blob/master/guide/transfer_learning.ipynb), and an example of brain MRI generation using **braingen** models can be find [here](https://github.com/neuronets/nobrainer/blob/master/guide/train_generation_progressive.ipynb). 70 | 71 | All models are available for re-training or transfer learning purposes except the **kwyk** model. The kwyk model weights are not available in a tf2 keras format (We are working to make it available in near future). The kwyk models can be loaded with `tf.saved_model.load`. 72 | 73 | ``` 74 | model = tf.saved_model.load(model_path) 75 | predictor = model.signatures["serving_default"] 76 | ``` 77 | 78 | or you can use nobrainer [predict_by_estimator](https://github.com/neuronets/nobrainer/blob/00325e0ed03664ae2f560547df6641dafcf8a672/nobrainer/prediction.py#L492) function. check an example [here](https://github.com/neuronets/nobrainer/blob/master/guide/inference_with_kwyk_model.ipynb). 79 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/addModel.yml: -------------------------------------------------------------------------------- 1 | name: ⬆️ Add a new model 2 | description: Add a new model to the nobrainer-zoo 3 | title: "New Model: {org}/{model}/{version}" 4 | 5 | body: 6 | - type: markdown 7 | attributes: 8 | value: | 9 | ## Recommendations to the user: 10 | 11 | :one:. Include a docker folder inside your project which includes the dockerfile and everything associated with it (absolutely recommended). 12 | 13 | :two:. For your prediction script (if applicable), specify path to the best_model (or weights) and 'sample' dataset at the command line using flags. 14 | 15 | :three:. Please avoid absolute paths in any of your scripts. Ideally, relative paths with respect to the root (project) folder is mandatory in the test-command. 16 | 17 | :four:. This issue's title should start with "New Model:" plus your model's org/model/version. 18 | 19 | :five:. All github urls must point to files/folders in the master/main branch of your repository. 20 | 21 | :warning: Please fill all the details of this form to add your model sucessfully to the zoo. Thank you! 22 | 23 | :red_circle: *If the automatic checks fail, this issue will be tagged as 'failed.' However, fix the issues and comment "Ready-to-test" to test again. 24 | 25 | - type: input 26 | id: path 27 | attributes: 28 | label: Model's repository path 29 | description: Please enter the folder structure for your model {org}/{model}/{version}. 30 | placeholder: ex. DeepCSR/deepcsr/1.0 31 | validations: 32 | required: true 33 | - type: input 34 | id: weights 35 | attributes: 36 | label: Best model weights 37 | description: Please enter the link to the best weights for your model (.pth, .h5, etc). 38 | placeholder: Ensure it is available publicly (Google Drive, Onedrive, etc.) and follow the recommendations of the docs. 39 | validations: 40 | required: true 41 | - type: input 42 | id: docker 43 | attributes: 44 | label: Docker information 45 | description: Please enter the link to the docker folder. Following recommendation 1. 46 | placeholder: Enter the github link to the docker folder here. 47 | validations: 48 | required: true 49 | - type: textarea 50 | id: python-scripts 51 | attributes: 52 | label: Model's python scripts 53 | description: Please provide URL's to all your python scripts. It can be a standalone script but could also be a mix of folders and single scripts. 54 | placeholder: If multiple URLs please enter one per line. 55 | validations: 56 | required: true 57 | - type: textarea 58 | id: model_info 59 | attributes: 60 | label: Model's card and spec 61 | description: Please provide a URL for your model_card.yaml and spec.yaml files. For info on creating these files, please see [Docs](https://github.com/neuronets/trained-models/tree/master/docs) 62 | placeholder: One link per line please. 63 | validations: 64 | required: true 65 | - type: input 66 | id: sample-data 67 | attributes: 68 | label: Sample data 69 | description: Please provide a link to a sample dataset that can be used to test your model. Must be direct download url. Follow the recommendations of the docs. 70 | placeholder: Enter the sample-dataset here. 71 | validations: 72 | required: true 73 | - type: input 74 | id: modelConfig 75 | attributes: 76 | label: Model config 77 | description: Please provide a link in github to your model's config file (if applicable). 78 | placeholder: Enter the config file here. 79 | - type: textarea 80 | id: testCommand 81 | attributes: 82 | label: Test Command 83 | description: Please provide read our documentation @root/add_model_instructions.md 84 | placeholder: Enter your test command. 85 | validations: 86 | required: true 87 | - type: checkboxes 88 | id: terms 89 | attributes: 90 | label: Read & Followed recommendations 91 | description: By submitting this issue, you will be sharing your model's data. Therefore, you are ensuring us that you have followed our recommendations. 92 | options: 93 | - label: I have read and followed the recommedations in this form and have assigned myself to the issue. 94 | validations: 95 | required: true 96 | -------------------------------------------------------------------------------- /add_model_instructions.md: -------------------------------------------------------------------------------- 1 | # Repository "Trained-Models" - Model Submission Guide 2 | 3 | Thank you for your interest in contributing your trained model to our "Trained-Models" repository. To ensure a smooth submission process, please follow the instructions below. 4 | 5 | ## Step 1: Create an Issue 6 | 1. Navigate to the [Issues tab](https://github.com/neuronets/trained-models/issues) of the "Trained-Models" repository. 7 | 8 | 2. Click on the "New Issue" button. 9 | 10 | 3. Select the appropriate issue template: 11 | - If you want to add a new model, choose "Add New Model." 12 | - If you want to update an existing model, choose "Update a Model." 13 | 14 | 4. In the issue title, start with "New Model: " for new models or "Update Model: " for model updates. 15 | 16 | ## Step 2: Fill Out the Issue Template 17 | Carefully fill out the issue template with the required information: 18 | 19 | ### 1. Model's Repository Path 20 | - Provide the model's repository path in the following format: 21 | `//` (e.g., `DeepCSR/deepcsr/1.0`). 22 | 23 | ### 2. Best Model Weights 24 | - Paste a direct download link to the model's weights (Google Drive, Github Raw, etc.). If you need to upload multiple weights, zip the weights and paste the download link here. 25 | Example URL structure for Google Drive: ```"https://drive.google.com/uc?id=&confirm=t"``` 26 | 27 | To extract the FILEID from your google drive share url, do the following: 28 | 1. Right click on the file you want to share. 29 | 2. Click share 30 | 3. Click "Get Link" 31 | 4. Change the general access to "Anyone with the link" 32 | 5. Copy the link 33 | 6. Your link should look something like this (i.e) https://drive.google.com/file/d/1KaNTsBrEohhbaUxgI8qrOFpvPbo0FEQb/view?usp=share_link 34 | 7. The FILEID is the code that goes between ```https://drive.google.com/file/d/``` and ```/view?usp=share_link``` 35 | 8. Copy that FILEID and paste it in this template: ```"https://drive.google.com/uc?id=&confirm=t"``` 36 | 9. In example, the final URL should look like this: ```"https://drive.google.com/uc?id=1KaNTsBrEohhbaUxgI8qrOFpvPbo0FEQb&confirm=t"``` 37 | 38 | ### 3. Docker Information 39 | - Input a GitHub URL to a folder containing the Dockerfile and any other necessary files for Docker. 40 | 41 | ### 4. Model's Python Scripts 42 | - Provide GitHub URLs to folders/files containing Python scripts for your model. The main Python file should not be in the same folder as helper/utils scripts. Separate multiple URLs by line. 43 | 44 | ### 5. Model Card and Spec 45 | - Share GitHub URLs for the `model_card.yaml` and `spec.yaml` files. These files must be named as specified. For more information, refer to [this documentation](https://github.com/neuronets/trained-models/blob/master/docs/spec_file.md) and [card documentation](https://github.com/neuronets/trained-models/blob/master/docs/model_card.yaml). 46 | 47 | ### 6. Sample Data 48 | - Upload a direct download URL pointing to the sample dataset used to test the model (Google Drive, Github Raw, etc.). If you need to upload multiple datasets, zip the files and paste the download link here. Follow the URL structure if using Google Drive: ```"https://drive.google.com/uc?id=&confirm=t"``` 49 | 50 | ### 7. Model Config (Optional) 51 | - Provide a GitHub URL to your model's config file if you use one. This is optional but should be from GitHub. 52 | 53 | ### 8. Test Command 54 | - Include a test command with flags for: 55 | - Model weights 56 | - Dataset 57 | - Config file (optional) 58 | - Output directory (optional) 59 | 60 | The paths for the above flags should be relative to the repository structure and the model's path provided in field #1. 61 | 62 | Example: 63 | 64 | ```python predict.py --model_checkpoint .////weights/ --dataset .////example-data/ --conf_path .////config/ --output_dir .``` 65 | 66 | ### 9. Acknowledge Recommendations 67 | - Select the checkbox to confirm that you have followed all the recommendations. 68 | 69 | ### 10. Final Steps 70 | - Before submitting the issue ensure all your github urls point to files in the main/master branch. 71 | - Do not add any tags; our automated process will handle them. 72 | 73 | ## Step 3: Submit the Issue 74 | After submitting the issue, our bot will: 75 | - Automatically create a development branch with your issue number. 76 | - Generate a draft PR linked to the issue. 77 | - Test the model; if it fails, the issue will be tagged as "failed." 78 | - If you need to make changes, fix the issues, update URLs in the issue, and when ready, simply append "Ready XX" in the issue title (where XX is a number incrementing with 01 each time a fix has been applied). 79 | 80 | ## Step 4: Approval and Inclusion 81 | - If testing is successful, the issue will be tagged as "success." 82 | - Our bot will notify you to change the PR status to "Open." 83 | - A developer will review and approve/reject the PR. 84 | - Approved models will be added to the "Trained-Models" repository. 85 | 86 | Thank you for your contribution! -------------------------------------------------------------------------------- /Schema/model_card-schema.yaml: -------------------------------------------------------------------------------- 1 | id: https://w3id.org/linkml/examples/personinfo 2 | name: personinfo 3 | prefixes: 4 | linkml: https://w3id.org/linkml/ 5 | personinfo: https://w3id.org/linkml/examples/personinfo 6 | imports: 7 | - linkml:types 8 | default_range: string 9 | default_prefix: personinfo 10 | 11 | classes: 12 | ModelCard: 13 | attributes: 14 | # Basic Information About the Model 15 | organization: 16 | range: string 17 | description: "Person or Organization Developing Model." 18 | modelDate: 19 | range: string 20 | description: "Model Date." 21 | modelVersion: 22 | range: float 23 | description: "Model Version." 24 | modelType: 25 | range: string 26 | description: "Model Type." 27 | trainingApproaches: 28 | range: string 29 | description: "Information About Training Algorithms, Parameters, Fairness Constraints, or Other Applied Approaches, and Features." 30 | moreInformation: 31 | range: string 32 | description: "Paper or Other Resource for More Information." 33 | citationDetails: 34 | range: string 35 | description: "Citation Details." 36 | contactInfo: 37 | range: string 38 | description: "Where to Send Questions or Comments About the Model." 39 | 40 | # Intended Use 41 | primaryIntendedUses: 42 | range: string 43 | description: "Primary Intended Uses." 44 | primaryIntendedUsers: 45 | range: string 46 | description: "Primary Intended Users." 47 | outOfScopeUseCases: 48 | range: string 49 | description: "Out-of-Scope Use Cases." 50 | 51 | # Factors 52 | relevantFactors: 53 | range: string 54 | description: "Relevant Factors." 55 | evaluationFactors: 56 | range: string 57 | description: "Evaluation Factors." 58 | 59 | # Metrics 60 | modelPerformanceMeasures: 61 | range: string 62 | description: "Model Performance Measures." 63 | decisionThresholds: 64 | range: string 65 | description: "Decision Thresholds." 66 | variationApproaches: 67 | range: string 68 | description: "Variation Approaches." 69 | 70 | # Evaluation Data 71 | evaluationDatasets: 72 | range: string 73 | description: "Datasets for Evaluation." 74 | evaluationMotivation: 75 | range: string 76 | description: "Motivation for Evaluation." 77 | evaluationPreprocessing: 78 | range: string 79 | description: "Preprocessing for Evaluation." 80 | 81 | # Training Data 82 | trainingDatasets: 83 | range: string 84 | description: "Datasets for Training." 85 | trainingMotivation: 86 | range: string 87 | description: "Motivation for Training." 88 | trainingPreprocessing: 89 | range: string 90 | description: "Preprocessing for Training." 91 | 92 | # Quantitative Analyses 93 | unitaryResults: 94 | range: string 95 | description: "Unitary Results." 96 | intersectionalResults: 97 | range: string 98 | description: "Intersectional Results." 99 | 100 | # Ethical Considerations 101 | ethicalConsiderations: 102 | range: string 103 | description: "Description of the ethical considerations of your model." 104 | 105 | # Caveats and Recommendations 106 | caveatsAndRecommendations: 107 | range: string 108 | description: "Description of the caveats and recommendations of your model." 109 | 110 | ModelSpec: 111 | attributes: 112 | dockerImage: 113 | range: string 114 | required: true 115 | singularityImage: 116 | range: string 117 | required: true 118 | repoUrl: 119 | range: string 120 | committish: 121 | range: string 122 | repoDownload: 123 | range: string 124 | repoDownloadLocation: 125 | range: string 126 | command: 127 | range: string 128 | n_files: 129 | range: integer 130 | on_files: 131 | range: integer 132 | prediction_script: 133 | range: string 134 | total: 135 | range: integer 136 | train: 137 | range: integer 138 | evaluate: 139 | range: integer 140 | test: 141 | range: integer 142 | male: 143 | range: boolean 144 | female: 145 | range: boolean 146 | age_histogram: 147 | range: string 148 | race: 149 | range: string 150 | imaging_contrast_info: 151 | range: string 152 | dataset_sources: 153 | range: string 154 | number_of_sites: 155 | range: integer 156 | sites: 157 | range: string 158 | scanner_models: 159 | range: string 160 | hardware: 161 | range: string 162 | input_shape: 163 | range: string 164 | block_shape: 165 | range: string 166 | n_classes: 167 | range: integer 168 | lr: 169 | range: string 170 | n_epochs: 171 | range: integer 172 | total_batch_size: 173 | range: integer 174 | number_of_gpus: 175 | range: integer 176 | loss_function: 177 | range: string 178 | metrics: 179 | range: string 180 | data_preprocessing: 181 | range: string 182 | data_augmentation: 183 | range: string 184 | 185 | Container: 186 | tree_root: true 187 | attributes: 188 | modelcards: 189 | multivalued: true 190 | inlined_as_list: true 191 | range: ModelCard 192 | modelSpecs: 193 | multivalued: true 194 | inlined_as_list: true 195 | range: ModelSpec 196 | -------------------------------------------------------------------------------- /lcn/parcnet/1.0.0/parc.py: -------------------------------------------------------------------------------- 1 | import os 2 | import glob 3 | import math 4 | import random 5 | import torch 6 | import torch.nn as nn 7 | import numpy as np 8 | # import freesurfer as fs 9 | import nibabel as nib 10 | from typing import Any, Callable, Dict, List, Optional, Tuple, Union 11 | from torchvision.datasets.vision import VisionDataset 12 | from torchvision.datasets.utils import verify_str_arg 13 | 14 | class PARC(VisionDataset): 15 | def __init__( 16 | self, 17 | root: str = '/autofs/space/pac_001/tiamat_copy/3/users/subjects/aparc_atlas', 18 | split: range = None, 19 | subset: str = 'DKTatlas', 20 | labels = 'labels.DKT31.manual.2.annot', 21 | inputs = ['inflated.H', 'sulc', 'curv'], 22 | hemisphere: str = 'rh', 23 | transforms: Optional[Callable] = None, 24 | in_channels: int = 1, 25 | num_classes: int = 1, 26 | out_shape = [32, 256, 512], 27 | mode = 'image', 28 | multiplier = 1, 29 | stride: int = 1, 30 | labeled: bool = True, 31 | **kwargs 32 | ): 33 | super().__init__(root) 34 | 35 | self.labeled = labeled 36 | self.stride = stride 37 | self.inputs = inputs 38 | self.labels = labels 39 | self.split = split #verify_str_arg(split, 'split', ['train', 'valid']) 40 | self.transforms = transforms 41 | self.in_channels = in_channels 42 | self.num_classes = num_classes 43 | self.mode = verify_str_arg(mode, 'mode', ['image', 'surf']) 44 | self.multiplier = multiplier 45 | 46 | self.hemisphere = verify_str_arg(hemisphere, 'hemisphere', ['rh', 'lh']) 47 | self.out_shape = out_shape 48 | 49 | self.sphere_file = os.path.join('surf', '%s.sphere.reg' % (self.hemisphere)) 50 | self.expand_file = os.path.join('surf', '%s.inflated' % (self.hemisphere)) 51 | self.parcel_file = os.path.join('label','%s.%s' % (self.hemisphere, labels)) 52 | self.signal_file = [os.path.join('surf', '%s.%s' % (self.hemisphere, signal)) for signal in inputs] 53 | 54 | fullpath = os.path.join(root, subset) 55 | # with open(os.path.join(fullpath, '%s_%d.txt') % (split, seed)) as subjfile: 56 | # self.subjects = subjfile.read().splitlines() 57 | self.subjects = sorted([p for p in os.listdir(fullpath) if len(glob.glob(os.path.join(fullpath, p, self.signal_file[0] + '*'))) > 0]) 58 | self.subjects = [self.subjects[i] for i in self.split] if self.split != None else self.subjects 59 | 60 | self.spheres = [os.path.join(fullpath, subject, self.sphere_file) for subject in self.subjects] 61 | self.expands = [os.path.join(fullpath, subject, self.expand_file) for subject in self.subjects] 62 | self.parcels = [os.path.join(fullpath, subject, self.parcel_file) for subject in self.subjects] 63 | self.signals = [[os.path.join(fullpath, subject, signal) for signal in self.signal_file] for subject in self.subjects] 64 | 65 | 66 | def __getitem__(self, index: int): 67 | """ 68 | Args: 69 | index (int): Index 70 | Returns: 71 | tuple: (image, label, index) where label is the image segmentation. 72 | """ 73 | 74 | index = index % len(self.signals) 75 | 76 | # if self.mode == 'surf': 77 | # image = torch.tensor([fs.Surface.read(self.spheres[index]).parameterize(fs.Overlay.read(signal).data) for signal in self.signals[index]]) 78 | # label = torch.tensor([fs.Surface.read(self.spheres[index]).parameterize(fs.Overlay.read(self.parcels[index]).data, interp='nearest')]) if self.labeled \ 79 | # else torch.zeros([1] + list(image.shape[1:])) 80 | 81 | if self.mode == 'image': 82 | image = torch.tensor([nib.load(signal + '.mgz').get_fdata()[:,:,0] for signal in self.signals[index]], dtype=torch.float32).permute(0,2,1) 83 | label = torch.tensor([nib.load(self.parcels[index] + '.mgz').get_fdata()], dtype=torch.float32).permute(0,2,1) if self.labeled else torch.zeros([1] + list(image.shape[1:])) 84 | 85 | if self.transforms != None: 86 | image, label = self.transforms(image, label) 87 | 88 | return image, label, index 89 | 90 | def save_output(self, root, outputs, indices): 91 | for i in range(0, len(outputs)): 92 | # filename = self.subjects[i] '%05d' % (indices[i]) 93 | # if self.mode == 'surf': 94 | # parcel = fs.Overlay.read(self.parcels[indices[i]]) 95 | # parcel.write(os.path.join(root,filename + '.label.annot')) 96 | # sphere = fs.Surface.read(self.spheres[indices[i]]) 97 | # parcel = fs.Overlay(sphere.sample_parameterization(outputs[i].cpu().numpy(), interp='nearest'), lut=parcel.lut) 98 | # parcel.write(os.path.join(root,filename + '.image.annot')) 99 | 100 | if self.mode == 'image': 101 | # parcel = fs.Image.read(self.parcels[indices[i]] + '.mgz') 102 | # parcel.write(os.path.join(root,filename + '.label.mgz')) 103 | parcel = nib.MGHImage(outputs[i].permute(0,2,1).short().cpu().numpy(), np.eye(4))# fs.Image(outputs[i].permute(0,2,1).cpu()) 104 | os.makedirs(os.path.join(root, self.subjects[indices[i]]), exist_ok=True) 105 | nib.save(parcel, os.path.join(root, self.subjects[indices[i]], 'parc.mgz')) #parcel.write(os.path.join(root,filename + '.image.mgz')) 106 | 107 | def __len__(self) -> int: 108 | return int(len(self.signals) * self.multiplier) 109 | 110 | def __outshape__(self) -> list: 111 | return self.out_shape 112 | 113 | def __numinput__(self) -> int: 114 | return self.in_channels 115 | 116 | def __numclass__(self) -> int: 117 | return self.num_classes 118 | 119 | def __weights__(self) -> torch.Tensor: 120 | # weight_s = (torch.sin(math.pi * (1/(256-1)) * torch.arange(0, 256))**2).reshape(-1, 1).repeat(1, 512).reshape(-1,256,512) 121 | 122 | return 1 #weight_c #1 #weight_s #*weight_c 123 | -------------------------------------------------------------------------------- /UCL/SynthSR/1.0.0/general/predict.py: -------------------------------------------------------------------------------- 1 | """This code is adapted from SynthSR predict_command_line.py to be compatible for Nobrainer-zoo. 2 | 3 | 4 | If you use this code, please cite the SynthSR paper in: 5 | https://github.com/BBillot/SynthSR/blob/master/bibtex.bib 6 | 7 | Copyright 2020 Benjamin Billot 8 | 9 | Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in 10 | compliance with the License. You may obtain a copy of the License at 11 | http://www.apache.org/licenses/LICENSE-2.0 12 | Unless required by applicable law or agreed to in writing, software distributed under the License is 13 | distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 14 | implied. See the License for the specific language governing permissions and limitations under the 15 | License. 16 | """ 17 | 18 | 19 | # print information 20 | print('\n') 21 | print('SynthSR prediction') 22 | print('\n') 23 | 24 | # python imports 25 | import os 26 | import sys 27 | import numpy as np 28 | from argparse import ArgumentParser 29 | 30 | 31 | # parse arguments 32 | parser = ArgumentParser() 33 | 34 | # repository location and model path 35 | parser.add_argument("--repo_path", type=str, dest="repo_path", help="repository download location.") 36 | parser.add_argument("--model_path", type=str, dest="model_path", help="saved model path") 37 | 38 | parser.add_argument("path_images", type=str, help="images to super-resolve / synthesize. Can be the path to a single image or to a folder") 39 | parser.add_argument("path_predictions", type=str, 40 | help="path where to save the synthetic 1mm MP-RAGEs. Must be the same type " 41 | "as path_images (path to a single image or to a folder)") 42 | parser.add_argument("--cpu", action="store_true", help="enforce running with CPU rather than GPU.") 43 | parser.add_argument("--threads", type=int, default=1, dest="threads", 44 | help="number of threads to be used by tensorflow when running on CPU.") 45 | parser.add_argument("--ct", action="store_true", help="use this flag for ct scans.") 46 | 47 | args = vars(parser.parse_args()) 48 | 49 | # add the repository main folder to python path and import ./SynthSeg/predict.py 50 | sys.path.append(args["repo_path"]) 51 | from ext.neuron import models as nrn_models 52 | from ext.lab2im import utils 53 | from ext.lab2im import edit_volumes 54 | 55 | # enforce CPU processing if necessary 56 | if args['cpu']: 57 | print('using CPU, hiding all CUDA_VISIBLE_DEVICES') 58 | os.environ['CUDA_VISIBLE_DEVICES'] = '-1' 59 | 60 | # limit the number of threads to be used if running on CPU 61 | import tensorflow as tf 62 | tf.config.threading.set_intra_op_parallelism_threads(args['threads']) 63 | 64 | # Build Unet and load weights 65 | unet_model = nrn_models.unet(nb_features=24, 66 | input_shape=[None,None,None,1], 67 | nb_levels=5, 68 | conv_size=3, 69 | nb_labels=1, 70 | feat_mult=2, 71 | nb_conv_per_level=2, 72 | conv_dropout=0, 73 | final_pred_activation='linear', 74 | batch_norm=-1, 75 | activation='elu', 76 | input_model=None) 77 | 78 | unet_model.load_weights(args["model_path"], by_name=True) 79 | 80 | # Prepare list of images to process 81 | path_images = os.path.abspath(args['path_images']) 82 | basename = os.path.basename(path_images) 83 | path_predictions = os.path.abspath(args['path_predictions']) 84 | 85 | # prepare input/output volumes 86 | # First case: you're providing directories 87 | if ('.nii.gz' not in basename) & ('.nii' not in basename) & ('.mgz' not in basename) & ('.npz' not in basename): 88 | if os.path.isfile(path_images): 89 | raise Exception('extension not supported for %s, only use: nii.gz, .nii, .mgz, or .npz' % path_images) 90 | images_to_segment = utils.list_images_in_folder(path_images) 91 | utils.mkdir(path_predictions) 92 | path_predictions = [os.path.join(path_predictions, os.path.basename(image)).replace('.nii', '_SynthSR.nii') for image in 93 | images_to_segment] 94 | path_predictions = [seg_path.replace('.mgz', '_SynthSR.mgz') for seg_path in path_predictions] 95 | path_predictions = [seg_path.replace('.npz', '_SynthSR.npz') for seg_path in path_predictions] 96 | 97 | else: 98 | assert os.path.isfile(path_images), "files does not exist: %s " \ 99 | "\nplease make sure the path and the extension are correct" % path_images 100 | images_to_segment = [path_images] 101 | path_predictions = [path_predictions] 102 | 103 | 104 | # Do the actual work 105 | print('Found %d images' % len(images_to_segment)) 106 | for idx, (path_image, path_prediction) in enumerate(zip(images_to_segment, path_predictions)): 107 | print(' Working on image %d ' % (idx+1)) 108 | print(' ' + path_image) 109 | 110 | im, aff, hdr = utils.load_volume(path_image,im_only=False,dtype='float') 111 | if args['ct']: 112 | im[im < 0] = 0 113 | im[im > 80] = 80 114 | im, aff = edit_volumes.resample_volume(im, aff, [1.0, 1.0, 1.0]) 115 | aff_ref = np.eye(4) 116 | im, aff2 = edit_volumes.align_volume_to_ref(im, aff, aff_ref=aff_ref, return_aff=True, n_dims=3) 117 | im = im - np.min(im) 118 | im = im / np.max(im) 119 | I = im[np.newaxis,..., np.newaxis] 120 | W = (np.ceil(np.array(I.shape[1:-1]) / 32.0) * 32).astype('int') 121 | idx = np.floor((W-I.shape[1:-1])/2).astype('int') 122 | S = np.zeros([1, *W, 1]) 123 | S[0, idx[0]:idx[0]+I.shape[1], idx[1]:idx[1]+I.shape[2], idx[2]:idx[2]+I.shape[3], :] = I 124 | output = unet_model.predict(S) 125 | pred = np.squeeze(output) 126 | pred = 255 * pred 127 | pred[pred<0] = 0 128 | pred[pred>128] = 128 129 | pred = pred[idx[0]:idx[0]+I.shape[1], idx[1]:idx[1]+I.shape[2], idx[2]:idx[2]+I.shape[3]] 130 | utils.save_volume(pred,aff2,None,path_prediction) 131 | 132 | print(' ') 133 | print('All done!') 134 | print(' ') 135 | 136 | --------------------------------------------------------------------------------