├── .gitignore
├── README.md
├── config
├── MS-SiT
│ ├── segmentation
│ │ └── hparams.yml
│ └── training
│ │ └── hparams.yml
├── SiT
│ ├── attention_maps
│ │ └── hparams.yml
│ ├── pretraining
│ │ └── mpp.yml
│ ├── testing
│ │ └── hparams.yml
│ └── training
│ │ └── hparams.yml
└── preprocessing
│ └── hparams.yml
├── docs
├── docker.md
├── g-node.png
├── install.md
├── left_right_example.png
├── mindboggle.png
├── sit.jpeg
└── sit_gif.gif
├── labels
├── MindBoggle
│ ├── half
│ │ ├── template
│ │ │ ├── means.npy
│ │ │ └── stds.npy
│ │ ├── train.csv
│ │ └── val.csv
│ └── ids.csv
└── dHCP
│ ├── birth_age
│ ├── half
│ │ ├── native
│ │ │ ├── means.npy
│ │ │ └── stds.npy
│ │ ├── template
│ │ │ ├── means.npy
│ │ │ └── stds.npy
│ │ ├── train.csv
│ │ └── val.csv
│ ├── native
│ │ ├── means.npy
│ │ └── stds.npy
│ ├── template
│ │ ├── means.npy
│ │ └── stds.npy
│ ├── train.csv
│ └── validation.csv
│ └── scan_age
│ ├── half
│ ├── native
│ │ ├── means.npy
│ │ └── stds.npy
│ ├── template
│ │ ├── means.npy
│ │ └── stds.npy
│ ├── train.csv
│ └── val.csv
│ ├── native
│ ├── means.npy
│ └── stds.npy
│ ├── template
│ ├── means.npy
│ └── stds.npy
│ ├── train.csv
│ └── val.csv
├── models
├── mpp.py
├── ms_sit.py
├── ms_sit_shifted.py
├── ms_sit_unet.py
├── ms_sit_unet_shifted.py
└── sit.py
├── patch_extraction
├── reorder_patches
│ ├── order_ico1.npy
│ ├── order_ico2.npy
│ ├── order_ico3.npy
│ ├── order_ico4.npy
│ └── order_ico5.npy
├── triangle_indices_ico_6_sub_ico_1.csv
├── triangle_indices_ico_6_sub_ico_2.csv
├── triangle_indices_ico_6_sub_ico_3.csv
├── triangle_indices_ico_6_sub_ico_4.csv
└── triangle_indices_ico_6_sub_ico_5.csv
├── requirements.txt
├── surfaces
├── img_indices_40962.npy
├── img_weights_40962.npy
├── metric_resample.py
└── metric_resample_labels.py
├── tools
├── dataloader.py
├── datasets.py
├── extract_attention_maps_sit.py
├── metrics.py
├── pretrain.py
├── train.py
├── train_segmentation.py
└── utils.py
└── utils
├── coordinates_ico_6_L.shape.gii
├── ico-6-L.surf.gii
├── sphere.1280.L.surf.gii
├── sphere.320.L.surf.gii
├── warps
└── resample_ico6_ico_2
│ ├── ico_2_0.surf.gii
│ ├── ico_2_1.surf.gii
│ ├── ico_2_10.surf.gii
│ ├── ico_2_11.surf.gii
│ ├── ico_2_12.surf.gii
│ ├── ico_2_13.surf.gii
│ ├── ico_2_14.surf.gii
│ ├── ico_2_15.surf.gii
│ ├── ico_2_16.surf.gii
│ ├── ico_2_17.surf.gii
│ ├── ico_2_18.surf.gii
│ ├── ico_2_19.surf.gii
│ ├── ico_2_2.surf.gii
│ ├── ico_2_20.surf.gii
│ ├── ico_2_21.surf.gii
│ ├── ico_2_22.surf.gii
│ ├── ico_2_23.surf.gii
│ ├── ico_2_24.surf.gii
│ ├── ico_2_25.surf.gii
│ ├── ico_2_26.surf.gii
│ ├── ico_2_27.surf.gii
│ ├── ico_2_28.surf.gii
│ ├── ico_2_29.surf.gii
│ ├── ico_2_3.surf.gii
│ ├── ico_2_30.surf.gii
│ ├── ico_2_31.surf.gii
│ ├── ico_2_32.surf.gii
│ ├── ico_2_33.surf.gii
│ ├── ico_2_34.surf.gii
│ ├── ico_2_35.surf.gii
│ ├── ico_2_36.surf.gii
│ ├── ico_2_37.surf.gii
│ ├── ico_2_38.surf.gii
│ ├── ico_2_39.surf.gii
│ ├── ico_2_4.surf.gii
│ ├── ico_2_40.surf.gii
│ ├── ico_2_41.surf.gii
│ ├── ico_2_42.surf.gii
│ ├── ico_2_43.surf.gii
│ ├── ico_2_44.surf.gii
│ ├── ico_2_45.surf.gii
│ ├── ico_2_46.surf.gii
│ ├── ico_2_47.surf.gii
│ ├── ico_2_48.surf.gii
│ ├── ico_2_49.surf.gii
│ ├── ico_2_5.surf.gii
│ ├── ico_2_50.surf.gii
│ ├── ico_2_51.surf.gii
│ ├── ico_2_52.surf.gii
│ ├── ico_2_53.surf.gii
│ ├── ico_2_54.surf.gii
│ ├── ico_2_55.surf.gii
│ ├── ico_2_56.surf.gii
│ ├── ico_2_57.surf.gii
│ ├── ico_2_58.surf.gii
│ ├── ico_2_59.surf.gii
│ ├── ico_2_6.surf.gii
│ ├── ico_2_60.surf.gii
│ ├── ico_2_61.surf.gii
│ ├── ico_2_62.surf.gii
│ ├── ico_2_63.surf.gii
│ ├── ico_2_64.surf.gii
│ ├── ico_2_65.surf.gii
│ ├── ico_2_66.surf.gii
│ ├── ico_2_67.surf.gii
│ ├── ico_2_68.surf.gii
│ ├── ico_2_69.surf.gii
│ ├── ico_2_7.surf.gii
│ ├── ico_2_70.surf.gii
│ ├── ico_2_71.surf.gii
│ ├── ico_2_72.surf.gii
│ ├── ico_2_73.surf.gii
│ ├── ico_2_74.surf.gii
│ ├── ico_2_75.surf.gii
│ ├── ico_2_76.surf.gii
│ ├── ico_2_77.surf.gii
│ ├── ico_2_78.surf.gii
│ ├── ico_2_79.surf.gii
│ ├── ico_2_8.surf.gii
│ ├── ico_2_80.surf.gii
│ ├── ico_2_81.surf.gii
│ ├── ico_2_82.surf.gii
│ ├── ico_2_83.surf.gii
│ ├── ico_2_84.surf.gii
│ ├── ico_2_85.surf.gii
│ ├── ico_2_86.surf.gii
│ ├── ico_2_87.surf.gii
│ ├── ico_2_88.surf.gii
│ ├── ico_2_89.surf.gii
│ ├── ico_2_9.surf.gii
│ ├── ico_2_90.surf.gii
│ ├── ico_2_91.surf.gii
│ ├── ico_2_92.surf.gii
│ ├── ico_2_93.surf.gii
│ ├── ico_2_94.surf.gii
│ ├── ico_2_95.surf.gii
│ ├── ico_2_96.surf.gii
│ ├── ico_2_97.surf.gii
│ ├── ico_2_98.surf.gii
│ └── ico_2_99.surf.gii
└── week-40_hemi-left_space-dhcpSym_dens-40k_desc-medialwallsymm_mask.shape.gii
/.gitignore:
--------------------------------------------------------------------------------
1 | utils/__pycache__/*
2 | outputs/*
3 | models/MSG3D/__pycache__/*
4 | logs/*
5 | models/__pycache__/*
6 | data/*
7 | *test*
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # The Surface Vision Transformer family
2 |
3 | This repository contains codebase to use surface vision transformers models on surface data (e.g cortical data). This repository contains the **official** PyTorch implementation of:
4 |
5 | - SiT - [*The Surface Vision Transformers: Attention-Based Modelling applied to Cortical Analysis*](https://arxiv.org/abs/2203.16414) [MIDL2022]
6 |
7 | - MS-SiT - [*The Multiscale Surface Vision Transformer*](https://arxiv.org/abs/2303.11909) [MIDL2024]
8 |
9 |
12 |
13 | # Outline
14 |
15 | This repository list the instructions to access preprocessed cortical data for regression, classification and segmentation tasks; and to train *SiT* and *MS-SiT* models.
16 |
17 |
1. Updates
18 | 2. Installation & Set-up
19 | 3. Accessing data
20 | 4. Training & Inference
21 |
22 | Here, Surface Vision Transformer (**SiT**) is applied on cortical data for phenotype predictions.
23 |
24 |
25 |
26 | # 1. Updates
27 |
28 |
29 |
30 | V.3.1 - 24.09.24
31 | Minor codebase update - 24.09.24
32 |
33 | Updating pre-training script for Masked Patch Pre-training (MPP)
34 |
35 |
36 |
37 |
38 |
39 | 🔥 V.3.0 - 19.09.24
40 | Major codebase update - 18.09.24
41 |
42 | Adding MS-SiT segmentation codebase
43 | Adding metrics files dataloader for SiT and MS-SiT models (numpy loader stil available)
44 | Update GIN repository for dHCP access
45 | Adding new GIN repository with MindBoggle dataset
46 |
47 |
48 |
49 |
50 | V.2.0 - 22.07.24
51 | Major codebase update - 22.07.24
52 |
53 | Adding MS-SiT model into the codebase
54 |
55 |
56 |
57 |
58 | V.1.1 - 12.02.24
59 | Major codebase update - 12.02.24
60 |
61 | Adding masked patch pretraining code to codebase
62 | can be run as simply as with: python pretrain.py ../config/SiT/pretraining/mpp.yml
63 |
64 |
65 |
66 |
67 |
68 | V.1.0 - 18.07.22
69 | Major codebase update - 18.07.22
70 |
71 | birth age and scan age prediction tasks
72 | simplifying training script
73 | adding birth age prediction script
74 | simplifying preprocessing script
75 | ingle config file tasks (scan age / birth age) and data configurations (template / native)
76 | adding mesh indices to extract non-overlapping triangular patches from a cortical mesh ico 6 sphere representation
77 |
78 |
79 |
80 |
81 | V.0.2
82 | Update - 25.05.22
83 |
84 | testing file and config
85 | installation guidelines
86 | data access
87 |
88 |
89 |
90 |
91 | V.0.1
92 | Initial commits - 12.10.21
93 |
94 | training script
95 | README
96 | config file for training
97 |
98 |
99 |
100 | # 2. Installation & Set-up
101 |
102 |
103 | ## a. Connectome Workbench
104 |
105 | Connectome Workbench is a free software for visualising neuroimaging data and can be used for visualising cortical metrics on surfaces. Downloads and instructions [here](https://www.humanconnectome.org/software/connectome-workbench).
106 |
107 | ## b. Conda usage
108 |
109 | For PyTorch and dependencies installation with conda, please follow instructions in [install.md](docs/install.md).
110 |
111 | ## c. Docker usage
112 |
113 | **Coming soon**
114 |
115 | For docker support, please follow instructions in [docker.md](docs/docker.md)
116 |
117 |
118 | # 3. Accessing data
119 |
120 |
121 | ## a. dHCP pre-processed dataset - regression
122 |
123 |
124 | The data used in these projects for regression tasks are cortical metrics (cortical thickness, curvature, myelin maps and sulcal depth maps) from the [dHCP dataset](http://www.developingconnectome.org/). Instructions for processing MRI scans and extract cortical metrics can be found in [S. Dahan et al 2021](https://arxiv.org/abs/2203.16414) and references cited in.
125 |
126 | To simplify reproducibility of the work, data has been already pre-processed (compiled into **numpy array** or into raw **gifti** files) and is made available following the next guidelines.
127 |
128 | ### Accessing pre-processed data
129 |
130 | Cortical surface metrics (cortical thickness, curvature, myelin maps and sulcal depth maps) already processed as in [S. Dahan et al 2021](https://arxiv.org/abs/2203.16414) and [A. Fawaz et al 2021](https://www.biorxiv.org/content/10.1101/2021.12.01.470730v1) are available upon request.
131 |
132 |
133 | Sign dHCP access agreement
134 |
135 | To access the data please:
136 |
137 |
138 | Sign in here
139 | Sign the dHCP open access agreement
140 | Forward the confirmation email to simon.dahan@kcl.ac.uk
141 |
142 |
143 |
144 |
145 |
146 | Create a G-Node GIN account
147 |
148 | Please create an account on the GIN plateform here
149 |
150 |
151 |
152 |
153 | Get access to the G-Node GIN repository
154 |
155 |
156 | Please also share your G-Node username to simon.dahan@kcl.ac.uk
157 | Then, you will to be added to this repository SLCN 2023
158 |
159 |
160 |
161 |
162 |
163 |
164 | **Training**, **validation** and **testing** sets are available, as used in as in [S. Dahan et al 2021](https://arxiv.org/abs/2203.16414) and [A. Fawaz et al 2021](https://www.biorxiv.org/content/10.1101/2021.12.01.470730v1), for the task of **birth-age** (gestational age - GA) and **scan-age** (postmenstrual age at scan - PMA) prediction, in **template** and **native** configurations.
165 |
166 | dHCP data has been resampled to ico6 (40k vertices) resolution. Left and right hemispheres are symmetrised, see image below.
167 |
168 |
171 |
172 | Important, the dHCP data is accessible in two format: numpy and gifti format.
173 |
174 | ### Numpy format
175 |
176 | In numpy format, the surface data is already patched (as explained in [S. Dahan et al 2021](https://arxiv.org/abs/2203.16414)) with ico2 grid, and compiled into train, validation and test arrays. Each array has a shape of: `(B,N,C,V)`with B the number of subjects, N the number of patches (320), C the number of input channels (4) and V the number of verticse per patch (153).
177 |
178 | ### Gifti format
179 |
180 | We also make available gifti files with the different cortical metrics merged per subject and per hemisphere. For instance, `sub-CC00051XX02_ses-7702_L.shape.gii` contains the 4 cortical metrics merged into a single file at the ico6 (40k vertices) resolution.
181 |
182 | This data format is more flexible for futher post-processing (if needed) but also to build more complex dataloading strategies (with data augmentations for instance, see below xx).
183 |
184 |
185 | ## b. MindBoggle dataset - segmentation
186 |
187 | The MindBoggle dataset with cortical metrics (sulcal depth and curvature) has been further pre-processed with MSMSulc alignement and resampling to ico6 resolution (40k vertices).
188 |
189 | ### Accessing pre-processed data
190 |
191 | Pre-processed MindBoggle data is available in the following G-Node GIN repository: MindBoggle processed dataset .
192 |
193 |
194 | Please create an account and forward your username at **simon.dahan@kcl.ac.uk** to be added to the repository and access the data.
195 |
196 |
199 |
200 |
201 |
202 | # 4. Training & Inference
203 |
204 |
205 | This repository is thought as a modular framework. Most of the models and training hyperparameters can be set within config files, used as input to the training scripts. Training scripts are located within the `tools/` folder.
206 |
207 | ## Training SiT/MS-SiT for regression tasks
208 |
209 | Once in the `tools` folder, one can start training an SiT or MS-SiT model with the following command:
210 | ```
211 | python train.py ../config/SiT/training/hparams.yml
212 | ```
213 |
214 | or
215 |
216 | ```
217 | python train.py ../config/MS-SiT/training/hparams.yml
218 | ```
219 |
220 | Where all hyperparameters for training and model design models are to be set in the yaml file `config/SiT/training/hparams.yml` and `config/MS-SiT/training/hparams.yml`, such as:
221 |
222 | - Transformer architecture
223 | - Training strategy: from scratch, ImageNet or SSL weights
224 | - Optimisation strategy
225 | - Patching configuration
226 | - Logging
227 |
228 | One important point, as explained previously in the dHCP section, data is available either in *numpy* or *gifti* format. The parameter `data/loader` in the config files should be set accordingly.
229 |
230 | ## Training MS-SiT for segmentation tasks
231 |
232 | The MS-SiT model can be used to train segmentation model as follows:
233 |
234 | ```
235 | python train_segmentation.py ../config/MS-SiT/segmentation/hparams.yml
236 | ```
237 | Here, only the `metrics` dataloader is available.
238 |
239 | # Tensorboard support
240 |
241 | **Coming soon**
242 |
243 | # References
244 |
245 | This codebase uses the vision transformer implementation from
246 | [lucidrains/vit-pytorch](https://github.com/lucidrains/vit-pytorch) and the pre-trained ViT models from the [*timm*](https://github.com/rwightman/pytorch-image-models) librairy.
247 |
248 | # Citation
249 |
250 | Please cite these works if you found it useful:
251 |
252 | [Surface Vision Transformers: Attention-Based Modelling applied to Cortical Analysis](https://arxiv.org/abs/2203.16414)
253 |
254 | ```
255 | @InProceedings{pmlr-v172-dahan22a,
256 | title = {Surface Vision Transformers: Attention-Based Modelling applied to Cortical Analysis},
257 | author = {Dahan, Simon and Fawaz, Abdulah and Williams, Logan Z. J. and Yang, Chunhui and Coalson, Timothy S. and Glasser, Matthew F. and Edwards, A. David and Rueckert, Daniel and Robinson, Emma C.},
258 | booktitle = {Proceedings of The 5th International Conference on Medical Imaging with Deep Learning},
259 | pages = {282--303},
260 | year = {2022},
261 | editor = {Konukoglu, Ender and Menze, Bjoern and Venkataraman, Archana and Baumgartner, Christian and Dou, Qi and Albarqouni, Shadi},
262 | volume = {172},
263 | series = {Proceedings of Machine Learning Research},
264 | month = {06--08 Jul},
265 | publisher = {PMLR},
266 | pdf = {https://proceedings.mlr.press/v172/dahan22a/dahan22a.pdf},
267 | url = {https://proceedings.mlr.press/v172/dahan22a.html},
268 | }
269 |
270 | ```
271 |
272 | [The Multiscale Surface Vision Transformers](https://arxiv.org/abs/2303.11909)
273 |
274 | ```
275 | @misc{dahan2024multiscalesurfacevisiontransformer,
276 | title={The Multiscale Surface Vision Transformer},
277 | author={Simon Dahan and Logan Z. J. Williams and Daniel Rueckert and Emma C. Robinson},
278 | year={2024},
279 | eprint={2303.11909},
280 | archivePrefix={arXiv},
281 | primaryClass={eess.IV},
282 | url={https://arxiv.org/abs/2303.11909},
283 | }
284 | ```
285 |
--------------------------------------------------------------------------------
/config/MS-SiT/segmentation/hparams.yml:
--------------------------------------------------------------------------------
1 | # @Author: Simon Dahan @SD3004
2 | # @Date: 31-08-2022 01:00:00
3 |
4 | MODEL: ms-sit
5 |
6 | ################################## DATA & TASK ##################################
7 |
8 | mesh_resolution:
9 | ico_mesh: 6 #resolution of the input mesh
10 | ico_grid: 5 #resolution of the grid used to extract patches
11 | reorder: True #reorder the sequence of patches
12 |
13 | sub_ico_0:
14 | num_patches: 20
15 | num_vertices: 2145
16 |
17 | sub_ico_1:
18 | num_patches: 80
19 | num_vertices: 561
20 |
21 | sub_ico_2:
22 | num_patches: 320
23 | num_vertices: 153
24 |
25 | sub_ico_3:
26 | num_patches: 1280
27 | num_vertices: 45
28 |
29 | sub_ico_4:
30 | num_patches: 5120
31 | num_vertices: 15
32 |
33 | sub_ico_5:
34 | num_patches: 20480
35 | num_vertices: 6
36 |
37 | data:
38 | path_to_data: /drive/data #/home/sd20/data/, /drive/data
39 | path_to_template: ../utils/
40 | path_to_workdir: .. #
41 | dataset: MindBoggle #dHCP, HCP, UKB, MindBoggle
42 | loader: metrics #metrics
43 | task: segmentation #scan_age, birth_age, sex
44 | configuration: MSMSulc #MSMSulc, native, freesurfer; MSMSulc for MindBoggle
45 | normalise: sub-standardise #normalise,standardise, False
46 | modality: cortical_metrics #cortical_metrics, fMRI
47 | clipping: True #True, False
48 |
49 | logging:
50 | folder_to_save_model: "{}/logs/MS-SiT/{}" #{dataset},{modality},{task},{grid resolution},{configuration}
51 |
52 | ################################### MODEL ####################################
53 |
54 | transformer:
55 | dim: 48 #96, 48, 192
56 | depth: [2,2,6,2] # [1,1,3,1] [2,2,6,2]number of blocks per layer
57 | heads: [3,6,12,24] # number of head per layer [3,6,12,24]
58 | channels: [0,1] #[3,1] for UKB and [0,1] for mindboggle
59 | window_size: [64,64,64,320] #320,80
60 | window_size_factor: 2
61 | mlp_ratio: 4
62 | num_classes: 35 #32 for MindBoggle and 35 for UKB
63 | dropout: 0.0
64 | attention_dropout: 0.0
65 | dropout_path: 0.0
66 | use_pos_emb: True
67 | shifted_attention: False
68 |
69 | ################################## TRAINING ###################################
70 |
71 | training:
72 | LR: 0.0003
73 | bs: 1
74 | bs_val: 1
75 | epochs: 100
76 | val_epoch: 1
77 | gpu: 1
78 | loss: diceCE #ce, dice, diceCE, dice+CE, gdl, gdl+CE
79 | lambda_dice: 1.0 #default 1.0
80 | lambda_ce: 1.0 #default 1.0
81 | testing: True
82 | init_weights: False #ssl, imagenet or False, transfer-learning, restart
83 | init_optim: False #restart, False
84 | finetuning: True
85 | finetune_layers: last-block #lastblock, encoder, decoder...
86 | save_ckpt: True
87 | log_training_epoch: 1 #default 5, fMRI 1
88 | log_iteration: 5
89 | early_stopping: 0
90 | sampler: False ###TO DO
91 | use_cross_validation: False #True
92 |
93 | weights:
94 | ssl_mpp: ../logs/MS-SiT/pretraining/ico_grid_UKB/scan_age/2/no_augmentation/2022-07-27-16:06:05-tiny-finetune/pretrained-net-best.pt
95 | imagenet: 'vit_tiny_patch16_224' #ViT(dim=192, depth=12, heads=3,mlp_dim=768,dim_head=64)
96 | #imagenet: 'vit_small_patch16_224' #ViT(dim=384, depth=12, heads=6,mlp_dim=1536,dim_head=64)
97 | #imagenet: 'vit_base_patch16_224' #ViT(dim=768, depth=12, heads=12,mlp_dim=3072,dim_head=64)
98 | transfer_learning: ../logs/UKB/cortical_metrics/segmentation/MS-SiT/False_mask/ico_grid_5/template/augmentation/2023-03-07-17:55:42-tiny-finetune/checkpoint_best.pth
99 | restart: ../logs/MindBoggle/cortical_metrics/segmentation/MS-SiT/False_mask/ico_grid_5/template/augmentation/2023-03-07-21:27:30-tiny-transfer-learning-finetune/checkpoint_best.pth
100 |
101 | augmentation: # prob of augmentation techniques need to sum to 1
102 | prob_augmentation: 0.0 #probability of using any of the augmentation technique; 0.0 to not use any augmentation
103 | prob_rotation: 0.2 #use rotation
104 | max_abs_deg_rotation: 15
105 | apply_symmetry: False
106 | symmetry_angle: 0
107 | prob_warping: 0.8 #use non-linear warping
108 | prob_shuffle: 0.0 #use shuffling of patches
109 | warp_ico: 2
110 |
111 | ################################## OPTIMISATION ##################################
112 |
113 | optimisation:
114 | optimiser: AdamW
115 | use_scheduler: False
116 | scheduler: CosineDecay # CosineDecay, StepLR, ReduceLROnPlateau
117 | warmup: True
118 | nbr_step_warmup: 50
119 |
120 | SGD:
121 | weight_decay: 0. #default 0.0
122 | momentum: 0.9 #default 0.0
123 | nesterov: False
124 |
125 | Adam:
126 | weight_decay: 0.00 #default 0.0
127 |
128 | AdamW:
129 | weight_decay: 0.01 #default 0.01
130 |
131 | #################################### SCHEDULER ####################################
132 |
133 | StepLR:
134 | stepsize: 20 # number of epochs
135 | decay: 0.5
136 |
137 | CosineDecay:
138 | T_max: 250 # number of iteration to go from high to low # number of iterations not number of epochs
139 | eta_min: 0.00001 #minimum learning rate
140 |
141 | ####################################################################################
142 |
143 |
144 |
--------------------------------------------------------------------------------
/config/MS-SiT/training/hparams.yml:
--------------------------------------------------------------------------------
1 | # @Author: Simon Dahan
2 | # @Last Modified time: 2022-01-12 14:11:23
3 |
4 | MODEL: ms-sit
5 |
6 | mesh_resolution:
7 | ico_mesh: 6 ## full mesh resolution
8 | ico_grid: 4 ## patching grid resolution
9 | reorder: True ## reorder the patches; only for ms-sit
10 |
11 | data:
12 | path_to_workdir: ..
13 | path_to_numpy: ../data/ico_res_{}/{}/{}
14 | task: scan_age #scan_age # birth_age
15 | configuration: template #template # native
16 | dataset: dHCP
17 | loader: metrics #metrics, numpy
18 |
19 | #### following parameters are only for "loader:metrics"
20 | path_to_metrics: /home/sd20/data/{}/metrics/ico_6_msm/base/regression_{}_space_features #TODO: replace with your datapath
21 | path_to_template: ../utils/
22 | normalise: sub-standardise
23 | clipping: True
24 | channels: [0,1,2,3]
25 | masking: True # True to mask the cut (need a mask template)
26 |
27 | logging:
28 | folder_to_save_model: ../logs/SiT/
29 |
30 | training:
31 | LR: 0.00001
32 | bs: 16
33 | bs_val: 1
34 | epochs: 30
35 | gpu: 1
36 | loss: mse #mse, l1
37 | testing: True
38 | val_epoch: 10
39 | load_weights_ssl: False
40 | load_weights_imagenet: False
41 | save_ckpt: True
42 | finetuning: True
43 |
44 | weights:
45 | ssl_mpp: '..' # path to .pt checkpoint
46 | imagenet: 'vit_tiny_patch16_224' #ViT(dim=192, depth=12, heads=3,mlp_dim=768,dim_head=64)
47 | #imagenet: 'vit_small_patch16_224' #ViT(dim=384, depth=12, heads=6,mlp_dim=1536,dim_head=64)
48 | #imagenet: 'vit_base_patch16_224' #ViT(dim=768, depth=12, heads=12,mlp_dim=3072,dim_head=64)
49 |
50 | transformer:
51 | dim: 96 #96, 48
52 | depth: [2,2,6,2] # [1,1,3,1] [2,2,6,2] number of blocks per layer
53 | heads: [3,6,12,24] # number of head per layer
54 | channels: [0,1,2,3] #[0,1] for working memory and [0,1,2,3] for cortical metrics, rsns [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40], [UKB;rsns] [3,5,8,9,10]
55 | window_size: [64,64,64,80] #320,80, [64,64,64,320], [320,320,320,80]
56 | window_size_factor: 4
57 | mlp_ratio: 4
58 | num_classes: 1
59 | dropout: 0.0
60 | attention_dropout: 0.0
61 | drop_path_rate: 0.1 #0.1 default to try
62 | use_pos_emb: False
63 | shifted_attention: False
64 | model: ms-sit #ms-sit
65 |
66 | optimisation:
67 | optimiser: SGD
68 |
69 | Adam:
70 | weight_decay: 0.
71 |
72 | AdamW:
73 | weight_decay: 0.
74 | SGD:
75 | weight_decay: 0.
76 | momentum: 0.9
77 | nesterov: False
78 |
79 | StepLR:
80 | stepsize: 1000
81 | decay: 0.5
82 |
83 | CosineDecay:
84 | T_max: 5000
85 | eta_min: 0.0001
86 |
87 | sub_ico_0:
88 | num_patches: 20
89 | num_vertices: 2145
90 |
91 | sub_ico_1:
92 | num_patches: 80
93 | num_vertices: 561
94 |
95 | sub_ico_2:
96 | num_patches: 320
97 | num_vertices: 153
98 |
99 | sub_ico_3:
100 | num_patches: 1280
101 | num_vertices: 45
102 |
103 | sub_ico_4:
104 | num_patches: 5120
105 | num_vertices: 15
106 |
107 | sub_ico_5:
108 | num_patches: 20480
109 | num_vertices: 6
110 |
111 | augmentation:
112 | prob_augmentation: 0.0
113 | warp_ico: 2
--------------------------------------------------------------------------------
/config/SiT/attention_maps/hparams.yml:
--------------------------------------------------------------------------------
1 | # @Author: Simon Dahan @SD3004
2 | # @Date: 31-08-2022 01:00:00
3 |
4 | path_to_ckpt: '/home/sd20/workspace/github/metrics-lab/surface-vision-transformers/logs/SiT/2024-09-23-12:42:43-tiny-finetune'
5 | gpu: 0
6 | split_to_test: train #val, train
7 | subject_id: [2]
8 |
--------------------------------------------------------------------------------
/config/SiT/pretraining/mpp.yml:
--------------------------------------------------------------------------------
1 | # @Author: Simon Dahan
2 | # @Last Modified time: 2022-01-12 14:11:23
3 |
4 | MODEL: sit
5 | SSL: mpp
6 |
7 | mesh_resolution:
8 | ico_mesh: 6 ## full mesh resolution
9 | ico_grid: 2 ## patching grid resolution
10 |
11 | data:
12 | path_to_workdir: ..
13 | data_path: ../../data/{}/{}
14 | path_to_numpy: ../data/ico_res_{}/{}/{}
15 | task: scan_age #scan_age # birth_age
16 | configuration: template #template # native
17 | dataset: dHCP
18 | loader: numpy #metrics, numpy s
19 |
20 | logging:
21 | folder_to_save_model: ../logs/SiT/
22 |
23 | training:
24 | LR: 0.0003
25 | bs: 32
26 | bs_val: 1
27 | epochs: 50
28 | gpu: 0
29 | loss: mse #mse, l1
30 | testing: False
31 | val_epoch: 5
32 | load_weights_ssl: False
33 | load_weights_imagenet: False
34 | save_ckpt: True
35 | finetuning: True
36 | dataset_ssl: 'dhcp'
37 |
38 | weights:
39 | ssl_mpp: '..' # path to .pt checkpoint
40 | imagenet: 'vit_tiny_patch16_224' #ViT(dim=192, depth=12, heads=3,mlp_dim=768,dim_head=64)
41 | #imagenet: 'vit_small_patch16_224' #ViT(dim=384, depth=12, heads=6,mlp_dim=1536,dim_head=64)
42 | #imagenet: 'vit_base_patch16_224' #ViT(dim=768, depth=12, heads=12,mlp_dim=3072,dim_head=64)
43 |
44 | transformer:
45 | dim: 192 #192, 384, 768
46 | depth: 12 #12, 12, 12
47 | heads: 3 #3, 6, 12
48 | mlp_dim: 768 #768, 1536, 3072 ## 4*dim according to DeiT
49 | pool: 'cls' # or 'mean'
50 | num_classes: 1
51 | num_channels: 4
52 | dim_head: 64 #64
53 | dropout: 0.0
54 | emb_dropout: 0.0
55 | model: SiT
56 |
57 | pretraining_mpp:
58 | mask_prob: 0.75 #0.5
59 | replace_prob: 0.8 #0.8
60 | swap_prob: 0.02 #0.02
61 |
62 | optimisation:
63 | optimiser: SGD
64 |
65 | Adam:
66 | weight_decay: 0.
67 |
68 | AdamW:
69 | weight_decay: 0.
70 | SGD:
71 | weight_decay: 0.
72 | momentum: 0.9
73 | nesterov: False
74 |
75 | StepLR:
76 | stepsize: 1000
77 | decay: 0.5
78 |
79 | CosineDecay:
80 | T_max: 5000
81 | eta_min: 0.0001
82 |
83 | sub_ico_0:
84 | num_patches: 20
85 | num_vertices: 2145
86 |
87 | sub_ico_1:
88 | num_patches: 80
89 | num_vertices: 561
90 |
91 | sub_ico_2:
92 | num_patches: 320
93 | num_vertices: 153
--------------------------------------------------------------------------------
/config/SiT/testing/hparams.yml:
--------------------------------------------------------------------------------
1 | # @Author: Simon Dahan
2 | # @Last Modified time: 2022-01-12 14:13:45
3 | resolution:
4 | ico: 6
5 | sub_ico: 2
6 |
7 | data:
8 | data_path: ../data/{}/{}/
9 | task: scan_age #scan_age #birth_age
10 | configuration: template #template #native
11 | split: validation
12 |
13 | testing:
14 | bs_test: 1
15 | gpu: 0
16 | path_to_ckpt: ../logs/SiT/2022-07-16-16:07:49-tiny-finetune
17 |
18 | transformer:
19 | dim: 192 #192, 384, 768
20 | depth: 12 #12, 12, 12
21 | heads: 3 #3, 6, 12
22 | mlp_dim: 768 #768, 1536, 3072 ## 4*dim according to DeiT
23 | pool: 'cls' # or 'mean'
24 | num_classes: 1
25 | num_channels: 4
26 | dim_head: 64 #64
27 | dropout: 0.0
28 | emb_dropout: 0.0
29 |
30 | sub_ico_1:
31 | num_patches: 80
32 | num_vertices: 561
33 |
34 | sub_ico_2:
35 | num_patches: 320
36 | num_vertices: 153
37 |
38 |
39 |
40 |
--------------------------------------------------------------------------------
/config/SiT/training/hparams.yml:
--------------------------------------------------------------------------------
1 | # @Author: Simon Dahan
2 | # @Last Modified time: 2022-01-12 14:11:23
3 |
4 | MODEL: sit
5 |
6 | mesh_resolution:
7 | ico_mesh: 6 ## full mesh resolution
8 | ico_grid: 2 ## patching grid resolution
9 | reorder: False ## reorder the patches; only for ms-sit
10 |
11 | data:
12 | path_to_workdir: ..
13 | path_to_numpy: ../data/ico_res_{}/{}/{}
14 | task: scan_age #scan_age # birth_age
15 | configuration: template #template # native
16 | dataset: dHCP
17 | loader: metrics #metrics, numpy
18 |
19 | #### following parameters are only for "loader:metrics"
20 | path_to_metrics: /home/sd20/data/{}/metrics/ico_6_msm/base/regression_{}_space_features #TODO: replace with your datapath
21 | path_to_template: ../utils/
22 | normalise: sub-standardise
23 | clipping: True
24 | channels: [0,1,2,3]
25 | masking: True # True to mask the cut (need a mask template)
26 |
27 | logging:
28 | folder_to_save_model: ../logs/SiT/
29 |
30 | training:
31 | LR: 0.0001
32 | bs: 128
33 | bs_val: 1 #always 1
34 | epochs: 200
35 | gpu: 0
36 | loss: mse #mse, l1
37 | testing: True
38 | val_epoch: 10
39 | load_weights_ssl: False
40 | load_weights_imagenet: False
41 | save_ckpt: True
42 | finetuning: True
43 |
44 | weights:
45 | ssl_mpp: '..' # path to .pt checkpoint
46 | imagenet: 'vit_tiny_patch16_224' #ViT(dim=192, depth=12, heads=3,mlp_dim=768,dim_head=64)
47 | #imagenet: 'vit_small_patch16_224' #ViT(dim=384, depth=12, heads=6,mlp_dim=1536,dim_head=64)
48 | #imagenet: 'vit_base_patch16_224' #ViT(dim=768, depth=12, heads=12,mlp_dim=3072,dim_head=64)
49 |
50 | transformer:
51 | dim: 192 #192, 384, 768
52 | depth: 12 #12, 12, 12
53 | heads: 3 #3, 6, 12
54 | mlp_dim: 768 #768, 1536, 3072 ## 4*dim according to DeiT
55 | pool: 'cls' # or 'mean'
56 | num_classes: 1
57 | num_channels: 4
58 | dim_head: 64 #64
59 | dropout: 0.0
60 | emb_dropout: 0.0
61 | model: SiT
62 |
63 | optimisation:
64 | optimiser: SGD #Adam, AdamW, SGD
65 |
66 | Adam:
67 | weight_decay: 0.
68 |
69 | AdamW:
70 | weight_decay: 0.
71 | SGD:
72 | weight_decay: 0.
73 | momentum: 0.9
74 | nesterov: False
75 |
76 | StepLR:
77 | stepsize: 1000
78 | decay: 0.5
79 |
80 | CosineDecay:
81 | T_max: 5000
82 | eta_min: 0.0001
83 |
84 | sub_ico_0:
85 | num_patches: 20
86 | num_vertices: 2145
87 |
88 | sub_ico_1:
89 | num_patches: 80
90 | num_vertices: 561
91 |
92 | sub_ico_2:
93 | num_patches: 320
94 | num_vertices: 153
95 |
96 | sub_ico_3:
97 | num_patches: 1280
98 | num_vertices: 45
99 |
100 | augmentation:
101 | prob_augmentation: 0.0
102 | warp_ico: 2
--------------------------------------------------------------------------------
/config/preprocessing/hparams.yml:
--------------------------------------------------------------------------------
1 | # @Author: Simon Dahan
2 | # @Last Modified time: 2022-01-12 15:36:28
3 | resolution:
4 | ico: 6
5 | sub_ico: 4
6 |
7 | data:
8 | data_path: /data/dHCP/
9 | label_path: ../../labels/dHCP/
10 | task: scan_age #scan_age #birth_age
11 | configuration: template #template #native
12 | split: test #train #validation
13 | channels: 4
14 |
15 | output:
16 | folder: ../../data/{}/{}/
17 |
18 | sub_ico_1:
19 | num_patches: 80
20 | num_vertices: 561
21 |
22 | sub_ico_2:
23 | num_patches: 320
24 | num_vertices: 153
25 |
26 | sub_ico_3:
27 | num_patches: 1280
28 | num_vertices: 45
29 |
30 | sub_ico_4:
31 | num_patches: 5120
32 | num_vertices: 15
33 |
34 | sub_ico_5:
35 | num_patches: 20480
36 | num_vertices: 6
37 |
--------------------------------------------------------------------------------
/docs/docker.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/metrics-lab/surface-vision-transformers/377e5dd9fddb59344d673281e28d7bea13c305e3/docs/docker.md
--------------------------------------------------------------------------------
/docs/g-node.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/metrics-lab/surface-vision-transformers/377e5dd9fddb59344d673281e28d7bea13c305e3/docs/g-node.png
--------------------------------------------------------------------------------
/docs/install.md:
--------------------------------------------------------------------------------
1 | ## Installation
2 |
3 | We provide some advice for installation all dependencies with conda.
4 |
5 | ### Prepare environment
6 |
7 | 1. create a conda environement
8 |
9 | ```
10 | conda create -n SiT python=3.7
11 | ```
12 |
13 | 2. activate the environment
14 |
15 | ```
16 | conda activate SiT
17 | ```
18 |
19 | 3. install pytorch dependencies
20 |
21 |
22 | Assuming GPU support, please check your CUDA version and select the appropriate installation command from [PyTorch](https://pytorch.org/). This codebase works also for CPU only.
23 |
24 | For CUDA 11.3 PyTorch version:
25 |
26 | ```
27 | conda install pytorch torchvision torchaudio cudatoolkit=11.3 -c pytorch -c nvidia
28 | ```
29 |
30 | 4. install requierements
31 |
32 | ```
33 | conda install -c conda-forge --file requirements.txt
34 | ```
35 |
36 | 5. (Optional) Extra librairies
37 |
38 | If you wish to use warm-up scheduler, you will need to pip install the following:
39 |
40 | ```
41 | pip install git+https://github.com/ildoonet/pytorch-gradual-warmup-lr.git
42 | ```
43 |
--------------------------------------------------------------------------------
/docs/left_right_example.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/metrics-lab/surface-vision-transformers/377e5dd9fddb59344d673281e28d7bea13c305e3/docs/left_right_example.png
--------------------------------------------------------------------------------
/docs/mindboggle.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/metrics-lab/surface-vision-transformers/377e5dd9fddb59344d673281e28d7bea13c305e3/docs/mindboggle.png
--------------------------------------------------------------------------------
/docs/sit.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/metrics-lab/surface-vision-transformers/377e5dd9fddb59344d673281e28d7bea13c305e3/docs/sit.jpeg
--------------------------------------------------------------------------------
/docs/sit_gif.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/metrics-lab/surface-vision-transformers/377e5dd9fddb59344d673281e28d7bea13c305e3/docs/sit_gif.gif
--------------------------------------------------------------------------------
/labels/MindBoggle/half/template/means.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/metrics-lab/surface-vision-transformers/377e5dd9fddb59344d673281e28d7bea13c305e3/labels/MindBoggle/half/template/means.npy
--------------------------------------------------------------------------------
/labels/MindBoggle/half/template/stds.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/metrics-lab/surface-vision-transformers/377e5dd9fddb59344d673281e28d7bea13c305e3/labels/MindBoggle/half/template/stds.npy
--------------------------------------------------------------------------------
/labels/MindBoggle/half/train.csv:
--------------------------------------------------------------------------------
1 | ids,labels
2 | OASIS-TRT-20-5.L.ico6_fs_LR.shape.gii,lh.labels.OASIS-TRT-20-5.ico6.DKT31.manual.label.gii
3 | OASIS-TRT-20-5.R.ico6_fs_LR.shape.gii,rh.labels.OASIS-TRT-20-5.ico6.DKT31.manual.label.gii
4 | OASIS-TRT-20-20.L.ico6_fs_LR.shape.gii,lh.labels.OASIS-TRT-20-20.ico6.DKT31.manual.label.gii
5 | OASIS-TRT-20-20.R.ico6_fs_LR.shape.gii,rh.labels.OASIS-TRT-20-20.ico6.DKT31.manual.label.gii
6 | NKI-TRT-20-16.L.ico6_fs_LR.shape.gii,lh.labels.NKI-TRT-20-16.ico6.DKT31.manual.label.gii
7 | NKI-TRT-20-16.R.ico6_fs_LR.shape.gii,rh.labels.NKI-TRT-20-16.ico6.DKT31.manual.label.gii
8 | MMRR-21-12.L.ico6_fs_LR.shape.gii,lh.labels.MMRR-21-12.ico6.DKT31.manual.label.gii
9 | MMRR-21-12.R.ico6_fs_LR.shape.gii,rh.labels.MMRR-21-12.ico6.DKT31.manual.label.gii
10 | NKI-RS-22-12.L.ico6_fs_LR.shape.gii,lh.labels.NKI-RS-22-12.ico6.DKT31.manual.label.gii
11 | NKI-RS-22-12.R.ico6_fs_LR.shape.gii,rh.labels.NKI-RS-22-12.ico6.DKT31.manual.label.gii
12 | OASIS-TRT-20-9.L.ico6_fs_LR.shape.gii,lh.labels.OASIS-TRT-20-9.ico6.DKT31.manual.label.gii
13 | OASIS-TRT-20-9.R.ico6_fs_LR.shape.gii,rh.labels.OASIS-TRT-20-9.ico6.DKT31.manual.label.gii
14 | NKI-TRT-20-7.L.ico6_fs_LR.shape.gii,lh.labels.NKI-TRT-20-7.ico6.DKT31.manual.label.gii
15 | NKI-TRT-20-7.R.ico6_fs_LR.shape.gii,rh.labels.NKI-TRT-20-7.ico6.DKT31.manual.label.gii
16 | NKI-RS-22-2.L.ico6_fs_LR.shape.gii,lh.labels.NKI-RS-22-2.ico6.DKT31.manual.label.gii
17 | NKI-RS-22-2.R.ico6_fs_LR.shape.gii,rh.labels.NKI-RS-22-2.ico6.DKT31.manual.label.gii
18 | NKI-RS-22-21.L.ico6_fs_LR.shape.gii,lh.labels.NKI-RS-22-21.ico6.DKT31.manual.label.gii
19 | NKI-RS-22-21.R.ico6_fs_LR.shape.gii,rh.labels.NKI-RS-22-21.ico6.DKT31.manual.label.gii
20 | NKI-TRT-20-9.L.ico6_fs_LR.shape.gii,lh.labels.NKI-TRT-20-9.ico6.DKT31.manual.label.gii
21 | NKI-TRT-20-9.R.ico6_fs_LR.shape.gii,rh.labels.NKI-TRT-20-9.ico6.DKT31.manual.label.gii
22 | NKI-TRT-20-2.L.ico6_fs_LR.shape.gii,lh.labels.NKI-TRT-20-2.ico6.DKT31.manual.label.gii
23 | NKI-TRT-20-2.R.ico6_fs_LR.shape.gii,rh.labels.NKI-TRT-20-2.ico6.DKT31.manual.label.gii
24 | HLN-12-9.L.ico6_fs_LR.shape.gii,lh.labels.HLN-12-9.ico6.DKT31.manual.label.gii
25 | HLN-12-9.R.ico6_fs_LR.shape.gii,rh.labels.HLN-12-9.ico6.DKT31.manual.label.gii
26 | NKI-RS-22-4.L.ico6_fs_LR.shape.gii,lh.labels.NKI-RS-22-4.ico6.DKT31.manual.label.gii
27 | NKI-RS-22-4.R.ico6_fs_LR.shape.gii,rh.labels.NKI-RS-22-4.ico6.DKT31.manual.label.gii
28 | MMRR-21-5.L.ico6_fs_LR.shape.gii,lh.labels.MMRR-21-5.ico6.DKT31.manual.label.gii
29 | MMRR-21-5.R.ico6_fs_LR.shape.gii,rh.labels.MMRR-21-5.ico6.DKT31.manual.label.gii
30 | NKI-TRT-20-6.L.ico6_fs_LR.shape.gii,lh.labels.NKI-TRT-20-6.ico6.DKT31.manual.label.gii
31 | NKI-TRT-20-6.R.ico6_fs_LR.shape.gii,rh.labels.NKI-TRT-20-6.ico6.DKT31.manual.label.gii
32 | NKI-RS-22-10.L.ico6_fs_LR.shape.gii,lh.labels.NKI-RS-22-10.ico6.DKT31.manual.label.gii
33 | NKI-RS-22-10.R.ico6_fs_LR.shape.gii,rh.labels.NKI-RS-22-10.ico6.DKT31.manual.label.gii
34 | NKI-RS-22-18.L.ico6_fs_LR.shape.gii,lh.labels.NKI-RS-22-18.ico6.DKT31.manual.label.gii
35 | NKI-RS-22-18.R.ico6_fs_LR.shape.gii,rh.labels.NKI-RS-22-18.ico6.DKT31.manual.label.gii
36 | HLN-12-10.L.ico6_fs_LR.shape.gii,lh.labels.HLN-12-10.ico6.DKT31.manual.label.gii
37 | HLN-12-10.R.ico6_fs_LR.shape.gii,rh.labels.HLN-12-10.ico6.DKT31.manual.label.gii
38 | OASIS-TRT-20-8.L.ico6_fs_LR.shape.gii,lh.labels.OASIS-TRT-20-8.ico6.DKT31.manual.label.gii
39 | OASIS-TRT-20-8.R.ico6_fs_LR.shape.gii,rh.labels.OASIS-TRT-20-8.ico6.DKT31.manual.label.gii
40 | NKI-TRT-20-13.L.ico6_fs_LR.shape.gii,lh.labels.NKI-TRT-20-13.ico6.DKT31.manual.label.gii
41 | NKI-TRT-20-13.R.ico6_fs_LR.shape.gii,rh.labels.NKI-TRT-20-13.ico6.DKT31.manual.label.gii
42 | NKI-TRT-20-18.L.ico6_fs_LR.shape.gii,lh.labels.NKI-TRT-20-18.ico6.DKT31.manual.label.gii
43 | NKI-TRT-20-18.R.ico6_fs_LR.shape.gii,rh.labels.NKI-TRT-20-18.ico6.DKT31.manual.label.gii
44 | MMRR-21-15.L.ico6_fs_LR.shape.gii,lh.labels.MMRR-21-15.ico6.DKT31.manual.label.gii
45 | MMRR-21-15.R.ico6_fs_LR.shape.gii,rh.labels.MMRR-21-15.ico6.DKT31.manual.label.gii
46 | OASIS-TRT-20-10.L.ico6_fs_LR.shape.gii,lh.labels.OASIS-TRT-20-10.ico6.DKT31.manual.label.gii
47 | OASIS-TRT-20-10.R.ico6_fs_LR.shape.gii,rh.labels.OASIS-TRT-20-10.ico6.DKT31.manual.label.gii
48 | HLN-12-7.L.ico6_fs_LR.shape.gii,lh.labels.HLN-12-7.ico6.DKT31.manual.label.gii
49 | HLN-12-7.R.ico6_fs_LR.shape.gii,rh.labels.HLN-12-7.ico6.DKT31.manual.label.gii
50 | MMRR-21-13.L.ico6_fs_LR.shape.gii,lh.labels.MMRR-21-13.ico6.DKT31.manual.label.gii
51 | MMRR-21-13.R.ico6_fs_LR.shape.gii,rh.labels.MMRR-21-13.ico6.DKT31.manual.label.gii
52 | HLN-12-1.L.ico6_fs_LR.shape.gii,lh.labels.HLN-12-1.ico6.DKT31.manual.label.gii
53 | HLN-12-1.R.ico6_fs_LR.shape.gii,rh.labels.HLN-12-1.ico6.DKT31.manual.label.gii
54 | NKI-TRT-20-15.L.ico6_fs_LR.shape.gii,lh.labels.NKI-TRT-20-15.ico6.DKT31.manual.label.gii
55 | NKI-TRT-20-15.R.ico6_fs_LR.shape.gii,rh.labels.NKI-TRT-20-15.ico6.DKT31.manual.label.gii
56 | NKI-TRT-20-12.L.ico6_fs_LR.shape.gii,lh.labels.NKI-TRT-20-12.ico6.DKT31.manual.label.gii
57 | NKI-TRT-20-12.R.ico6_fs_LR.shape.gii,rh.labels.NKI-TRT-20-12.ico6.DKT31.manual.label.gii
58 | NKI-TRT-20-17.L.ico6_fs_LR.shape.gii,lh.labels.NKI-TRT-20-17.ico6.DKT31.manual.label.gii
59 | NKI-TRT-20-17.R.ico6_fs_LR.shape.gii,rh.labels.NKI-TRT-20-17.ico6.DKT31.manual.label.gii
60 | NKI-TRT-20-5.L.ico6_fs_LR.shape.gii,lh.labels.NKI-TRT-20-5.ico6.DKT31.manual.label.gii
61 | NKI-TRT-20-5.R.ico6_fs_LR.shape.gii,rh.labels.NKI-TRT-20-5.ico6.DKT31.manual.label.gii
62 | MMRR-21-10.L.ico6_fs_LR.shape.gii,lh.labels.MMRR-21-10.ico6.DKT31.manual.label.gii
63 | MMRR-21-10.R.ico6_fs_LR.shape.gii,rh.labels.MMRR-21-10.ico6.DKT31.manual.label.gii
64 | OASIS-TRT-20-19.L.ico6_fs_LR.shape.gii,lh.labels.OASIS-TRT-20-19.ico6.DKT31.manual.label.gii
65 | OASIS-TRT-20-19.R.ico6_fs_LR.shape.gii,rh.labels.OASIS-TRT-20-19.ico6.DKT31.manual.label.gii
66 | MMRR-21-7.L.ico6_fs_LR.shape.gii,lh.labels.MMRR-21-7.ico6.DKT31.manual.label.gii
67 | MMRR-21-7.R.ico6_fs_LR.shape.gii,rh.labels.MMRR-21-7.ico6.DKT31.manual.label.gii
68 | OASIS-TRT-20-4.L.ico6_fs_LR.shape.gii,lh.labels.OASIS-TRT-20-4.ico6.DKT31.manual.label.gii
69 | OASIS-TRT-20-4.R.ico6_fs_LR.shape.gii,rh.labels.OASIS-TRT-20-4.ico6.DKT31.manual.label.gii
70 | NKI-RS-22-6.L.ico6_fs_LR.shape.gii,lh.labels.NKI-RS-22-6.ico6.DKT31.manual.label.gii
71 | NKI-RS-22-6.R.ico6_fs_LR.shape.gii,rh.labels.NKI-RS-22-6.ico6.DKT31.manual.label.gii
72 | NKI-TRT-20-10.L.ico6_fs_LR.shape.gii,lh.labels.NKI-TRT-20-10.ico6.DKT31.manual.label.gii
73 | NKI-TRT-20-10.R.ico6_fs_LR.shape.gii,rh.labels.NKI-TRT-20-10.ico6.DKT31.manual.label.gii
74 | MMRR-21-11.L.ico6_fs_LR.shape.gii,lh.labels.MMRR-21-11.ico6.DKT31.manual.label.gii
75 | MMRR-21-11.R.ico6_fs_LR.shape.gii,rh.labels.MMRR-21-11.ico6.DKT31.manual.label.gii
76 | NKI-RS-22-16.L.ico6_fs_LR.shape.gii,lh.labels.NKI-RS-22-16.ico6.DKT31.manual.label.gii
77 | NKI-RS-22-16.R.ico6_fs_LR.shape.gii,rh.labels.NKI-RS-22-16.ico6.DKT31.manual.label.gii
78 | NKI-RS-22-8.L.ico6_fs_LR.shape.gii,lh.labels.NKI-RS-22-8.ico6.DKT31.manual.label.gii
79 | NKI-RS-22-8.R.ico6_fs_LR.shape.gii,rh.labels.NKI-RS-22-8.ico6.DKT31.manual.label.gii
80 | MMRR-21-18.L.ico6_fs_LR.shape.gii,lh.labels.MMRR-21-18.ico6.DKT31.manual.label.gii
81 | MMRR-21-18.R.ico6_fs_LR.shape.gii,rh.labels.MMRR-21-18.ico6.DKT31.manual.label.gii
82 | NKI-RS-22-9.L.ico6_fs_LR.shape.gii,lh.labels.NKI-RS-22-9.ico6.DKT31.manual.label.gii
83 | NKI-RS-22-9.R.ico6_fs_LR.shape.gii,rh.labels.NKI-RS-22-9.ico6.DKT31.manual.label.gii
84 | OASIS-TRT-20-13.L.ico6_fs_LR.shape.gii,lh.labels.OASIS-TRT-20-13.ico6.DKT31.manual.label.gii
85 | OASIS-TRT-20-13.R.ico6_fs_LR.shape.gii,rh.labels.OASIS-TRT-20-13.ico6.DKT31.manual.label.gii
86 | MMRR-21-9.L.ico6_fs_LR.shape.gii,lh.labels.MMRR-21-9.ico6.DKT31.manual.label.gii
87 | MMRR-21-9.R.ico6_fs_LR.shape.gii,rh.labels.MMRR-21-9.ico6.DKT31.manual.label.gii
88 | HLN-12-6.L.ico6_fs_LR.shape.gii,lh.labels.HLN-12-6.ico6.DKT31.manual.label.gii
89 | HLN-12-6.R.ico6_fs_LR.shape.gii,rh.labels.HLN-12-6.ico6.DKT31.manual.label.gii
90 | OASIS-TRT-20-7.L.ico6_fs_LR.shape.gii,lh.labels.OASIS-TRT-20-7.ico6.DKT31.manual.label.gii
91 | OASIS-TRT-20-7.R.ico6_fs_LR.shape.gii,rh.labels.OASIS-TRT-20-7.ico6.DKT31.manual.label.gii
92 | MMRR-21-6.L.ico6_fs_LR.shape.gii,lh.labels.MMRR-21-6.ico6.DKT31.manual.label.gii
93 | MMRR-21-6.R.ico6_fs_LR.shape.gii,rh.labels.MMRR-21-6.ico6.DKT31.manual.label.gii
94 | MMRR-21-3.L.ico6_fs_LR.shape.gii,lh.labels.MMRR-21-3.ico6.DKT31.manual.label.gii
95 | MMRR-21-3.R.ico6_fs_LR.shape.gii,rh.labels.MMRR-21-3.ico6.DKT31.manual.label.gii
96 | NKI-RS-22-19.L.ico6_fs_LR.shape.gii,lh.labels.NKI-RS-22-19.ico6.DKT31.manual.label.gii
97 | NKI-RS-22-19.R.ico6_fs_LR.shape.gii,rh.labels.NKI-RS-22-19.ico6.DKT31.manual.label.gii
98 | NKI-RS-22-3.L.ico6_fs_LR.shape.gii,lh.labels.NKI-RS-22-3.ico6.DKT31.manual.label.gii
99 | NKI-RS-22-3.R.ico6_fs_LR.shape.gii,rh.labels.NKI-RS-22-3.ico6.DKT31.manual.label.gii
100 | NKI-RS-22-1.L.ico6_fs_LR.shape.gii,lh.labels.NKI-RS-22-1.ico6.DKT31.manual.label.gii
101 | NKI-RS-22-1.R.ico6_fs_LR.shape.gii,rh.labels.NKI-RS-22-1.ico6.DKT31.manual.label.gii
102 | HLN-12-12.L.ico6_fs_LR.shape.gii,lh.labels.HLN-12-12.ico6.DKT31.manual.label.gii
103 | HLN-12-12.R.ico6_fs_LR.shape.gii,rh.labels.HLN-12-12.ico6.DKT31.manual.label.gii
104 | MMRR-21-2.L.ico6_fs_LR.shape.gii,lh.labels.MMRR-21-2.ico6.DKT31.manual.label.gii
105 | MMRR-21-2.R.ico6_fs_LR.shape.gii,rh.labels.MMRR-21-2.ico6.DKT31.manual.label.gii
106 | MMRR-21-16.L.ico6_fs_LR.shape.gii,lh.labels.MMRR-21-16.ico6.DKT31.manual.label.gii
107 | MMRR-21-16.R.ico6_fs_LR.shape.gii,rh.labels.MMRR-21-16.ico6.DKT31.manual.label.gii
108 | MMRR-21-4.L.ico6_fs_LR.shape.gii,lh.labels.MMRR-21-4.ico6.DKT31.manual.label.gii
109 | MMRR-21-4.R.ico6_fs_LR.shape.gii,rh.labels.MMRR-21-4.ico6.DKT31.manual.label.gii
110 | OASIS-TRT-20-18.L.ico6_fs_LR.shape.gii,lh.labels.OASIS-TRT-20-18.ico6.DKT31.manual.label.gii
111 | OASIS-TRT-20-18.R.ico6_fs_LR.shape.gii,rh.labels.OASIS-TRT-20-18.ico6.DKT31.manual.label.gii
112 | MMRR-21-8.L.ico6_fs_LR.shape.gii,lh.labels.MMRR-21-8.ico6.DKT31.manual.label.gii
113 | MMRR-21-8.R.ico6_fs_LR.shape.gii,rh.labels.MMRR-21-8.ico6.DKT31.manual.label.gii
114 | NKI-RS-22-13.L.ico6_fs_LR.shape.gii,lh.labels.NKI-RS-22-13.ico6.DKT31.manual.label.gii
115 | NKI-RS-22-13.R.ico6_fs_LR.shape.gii,rh.labels.NKI-RS-22-13.ico6.DKT31.manual.label.gii
116 | NKI-TRT-20-11.L.ico6_fs_LR.shape.gii,lh.labels.NKI-TRT-20-11.ico6.DKT31.manual.label.gii
117 | NKI-TRT-20-11.R.ico6_fs_LR.shape.gii,rh.labels.NKI-TRT-20-11.ico6.DKT31.manual.label.gii
118 | OASIS-TRT-20-2.L.ico6_fs_LR.shape.gii,lh.labels.OASIS-TRT-20-2.ico6.DKT31.manual.label.gii
119 | OASIS-TRT-20-2.R.ico6_fs_LR.shape.gii,rh.labels.OASIS-TRT-20-2.ico6.DKT31.manual.label.gii
120 | NKI-RS-22-14.L.ico6_fs_LR.shape.gii,lh.labels.NKI-RS-22-14.ico6.DKT31.manual.label.gii
121 | NKI-RS-22-14.R.ico6_fs_LR.shape.gii,rh.labels.NKI-RS-22-14.ico6.DKT31.manual.label.gii
122 | MMRR-21-21.L.ico6_fs_LR.shape.gii,lh.labels.MMRR-21-21.ico6.DKT31.manual.label.gii
123 | MMRR-21-21.R.ico6_fs_LR.shape.gii,rh.labels.MMRR-21-21.ico6.DKT31.manual.label.gii
124 | NKI-RS-22-15.L.ico6_fs_LR.shape.gii,lh.labels.NKI-RS-22-15.ico6.DKT31.manual.label.gii
125 | NKI-RS-22-15.R.ico6_fs_LR.shape.gii,rh.labels.NKI-RS-22-15.ico6.DKT31.manual.label.gii
126 | NKI-TRT-20-14.L.ico6_fs_LR.shape.gii,lh.labels.NKI-TRT-20-14.ico6.DKT31.manual.label.gii
127 | NKI-TRT-20-14.R.ico6_fs_LR.shape.gii,rh.labels.NKI-TRT-20-14.ico6.DKT31.manual.label.gii
128 | MMRR-21-17.L.ico6_fs_LR.shape.gii,lh.labels.MMRR-21-17.ico6.DKT31.manual.label.gii
129 | MMRR-21-17.R.ico6_fs_LR.shape.gii,rh.labels.MMRR-21-17.ico6.DKT31.manual.label.gii
130 | NKI-RS-22-11.L.ico6_fs_LR.shape.gii,lh.labels.NKI-RS-22-11.ico6.DKT31.manual.label.gii
131 | NKI-RS-22-11.R.ico6_fs_LR.shape.gii,rh.labels.NKI-RS-22-11.ico6.DKT31.manual.label.gii
132 | OASIS-TRT-20-12.L.ico6_fs_LR.shape.gii,lh.labels.OASIS-TRT-20-12.ico6.DKT31.manual.label.gii
133 | OASIS-TRT-20-12.R.ico6_fs_LR.shape.gii,rh.labels.OASIS-TRT-20-12.ico6.DKT31.manual.label.gii
134 |
--------------------------------------------------------------------------------
/labels/MindBoggle/half/val.csv:
--------------------------------------------------------------------------------
1 | ids,labels
2 | NKI-RS-22-5.L.ico6_fs_LR.shape.gii,lh.labels.NKI-RS-22-5.ico6.DKT31.manual.label.gii
3 | NKI-RS-22-5.R.ico6_fs_LR.shape.gii,rh.labels.NKI-RS-22-5.ico6.DKT31.manual.label.gii
4 | NKI-TRT-20-1.L.ico6_fs_LR.shape.gii,lh.labels.NKI-TRT-20-1.ico6.DKT31.manual.label.gii
5 | NKI-TRT-20-1.R.ico6_fs_LR.shape.gii,rh.labels.NKI-TRT-20-1.ico6.DKT31.manual.label.gii
6 | HLN-12-8.L.ico6_fs_LR.shape.gii,lh.labels.HLN-12-8.ico6.DKT31.manual.label.gii
7 | HLN-12-8.R.ico6_fs_LR.shape.gii,rh.labels.HLN-12-8.ico6.DKT31.manual.label.gii
8 | Colin27-1.L.ico6_fs_LR.shape.gii,lh.labels.Colin27-1.ico6.DKT31.manual.label.gii
9 | Colin27-1.R.ico6_fs_LR.shape.gii,rh.labels.Colin27-1.ico6.DKT31.manual.label.gii
10 | NKI-TRT-20-8.L.ico6_fs_LR.shape.gii,lh.labels.NKI-TRT-20-8.ico6.DKT31.manual.label.gii
11 | NKI-TRT-20-8.R.ico6_fs_LR.shape.gii,rh.labels.NKI-TRT-20-8.ico6.DKT31.manual.label.gii
12 | HLN-12-5.L.ico6_fs_LR.shape.gii,lh.labels.HLN-12-5.ico6.DKT31.manual.label.gii
13 | HLN-12-5.R.ico6_fs_LR.shape.gii,rh.labels.HLN-12-5.ico6.DKT31.manual.label.gii
14 | OASIS-TRT-20-3.L.ico6_fs_LR.shape.gii,lh.labels.OASIS-TRT-20-3.ico6.DKT31.manual.label.gii
15 | OASIS-TRT-20-3.R.ico6_fs_LR.shape.gii,rh.labels.OASIS-TRT-20-3.ico6.DKT31.manual.label.gii
16 | OASIS-TRT-20-6.L.ico6_fs_LR.shape.gii,lh.labels.OASIS-TRT-20-6.ico6.DKT31.manual.label.gii
17 | OASIS-TRT-20-6.R.ico6_fs_LR.shape.gii,rh.labels.OASIS-TRT-20-6.ico6.DKT31.manual.label.gii
18 | NKI-RS-22-17.L.ico6_fs_LR.shape.gii,lh.labels.NKI-RS-22-17.ico6.DKT31.manual.label.gii
19 | NKI-RS-22-17.R.ico6_fs_LR.shape.gii,rh.labels.NKI-RS-22-17.ico6.DKT31.manual.label.gii
20 | NKI-RS-22-7.L.ico6_fs_LR.shape.gii,lh.labels.NKI-RS-22-7.ico6.DKT31.manual.label.gii
21 | NKI-RS-22-7.R.ico6_fs_LR.shape.gii,rh.labels.NKI-RS-22-7.ico6.DKT31.manual.label.gii
22 | MMRR-21-14.L.ico6_fs_LR.shape.gii,lh.labels.MMRR-21-14.ico6.DKT31.manual.label.gii
23 | MMRR-21-14.R.ico6_fs_LR.shape.gii,rh.labels.MMRR-21-14.ico6.DKT31.manual.label.gii
24 |
--------------------------------------------------------------------------------
/labels/MindBoggle/ids.csv:
--------------------------------------------------------------------------------
1 | ids,labels
2 | OASIS-TRT-20-5.L.ico6_fs_LR.shape.gii,lh.labels.OASIS-TRT-20-5.ico6.DKT31.manual.label.gii
3 | OASIS-TRT-20-5.R.ico6_fs_LR.shape.gii,rh.labels.OASIS-TRT-20-5.ico6.DKT31.manual.label.gii
4 | OASIS-TRT-20-20.L.ico6_fs_LR.shape.gii,lh.labels.OASIS-TRT-20-20.ico6.DKT31.manual.label.gii
5 | OASIS-TRT-20-20.R.ico6_fs_LR.shape.gii,rh.labels.OASIS-TRT-20-20.ico6.DKT31.manual.label.gii
6 | NKI-TRT-20-16.L.ico6_fs_LR.shape.gii,lh.labels.NKI-TRT-20-16.ico6.DKT31.manual.label.gii
7 | NKI-TRT-20-16.R.ico6_fs_LR.shape.gii,rh.labels.NKI-TRT-20-16.ico6.DKT31.manual.label.gii
8 | MMRR-21-12.L.ico6_fs_LR.shape.gii,lh.labels.MMRR-21-12.ico6.DKT31.manual.label.gii
9 | MMRR-21-12.R.ico6_fs_LR.shape.gii,rh.labels.MMRR-21-12.ico6.DKT31.manual.label.gii
10 | NKI-RS-22-12.L.ico6_fs_LR.shape.gii,lh.labels.NKI-RS-22-12.ico6.DKT31.manual.label.gii
11 | NKI-RS-22-12.R.ico6_fs_LR.shape.gii,rh.labels.NKI-RS-22-12.ico6.DKT31.manual.label.gii
12 | OASIS-TRT-20-9.L.ico6_fs_LR.shape.gii,lh.labels.OASIS-TRT-20-9.ico6.DKT31.manual.label.gii
13 | OASIS-TRT-20-9.R.ico6_fs_LR.shape.gii,rh.labels.OASIS-TRT-20-9.ico6.DKT31.manual.label.gii
14 | NKI-TRT-20-7.L.ico6_fs_LR.shape.gii,lh.labels.NKI-TRT-20-7.ico6.DKT31.manual.label.gii
15 | NKI-TRT-20-7.R.ico6_fs_LR.shape.gii,rh.labels.NKI-TRT-20-7.ico6.DKT31.manual.label.gii
16 | NKI-RS-22-2.L.ico6_fs_LR.shape.gii,lh.labels.NKI-RS-22-2.ico6.DKT31.manual.label.gii
17 | NKI-RS-22-2.R.ico6_fs_LR.shape.gii,rh.labels.NKI-RS-22-2.ico6.DKT31.manual.label.gii
18 | NKI-RS-22-21.L.ico6_fs_LR.shape.gii,lh.labels.NKI-RS-22-21.ico6.DKT31.manual.label.gii
19 | NKI-RS-22-21.R.ico6_fs_LR.shape.gii,rh.labels.NKI-RS-22-21.ico6.DKT31.manual.label.gii
20 | NKI-TRT-20-9.L.ico6_fs_LR.shape.gii,lh.labels.NKI-TRT-20-9.ico6.DKT31.manual.label.gii
21 | NKI-TRT-20-9.R.ico6_fs_LR.shape.gii,rh.labels.NKI-TRT-20-9.ico6.DKT31.manual.label.gii
22 | NKI-TRT-20-2.L.ico6_fs_LR.shape.gii,lh.labels.NKI-TRT-20-2.ico6.DKT31.manual.label.gii
23 | NKI-TRT-20-2.R.ico6_fs_LR.shape.gii,rh.labels.NKI-TRT-20-2.ico6.DKT31.manual.label.gii
24 | HLN-12-9.L.ico6_fs_LR.shape.gii,lh.labels.HLN-12-9.ico6.DKT31.manual.label.gii
25 | HLN-12-9.R.ico6_fs_LR.shape.gii,rh.labels.HLN-12-9.ico6.DKT31.manual.label.gii
26 | NKI-RS-22-4.L.ico6_fs_LR.shape.gii,lh.labels.NKI-RS-22-4.ico6.DKT31.manual.label.gii
27 | NKI-RS-22-4.R.ico6_fs_LR.shape.gii,rh.labels.NKI-RS-22-4.ico6.DKT31.manual.label.gii
28 | MMRR-21-5.L.ico6_fs_LR.shape.gii,lh.labels.MMRR-21-5.ico6.DKT31.manual.label.gii
29 | MMRR-21-5.R.ico6_fs_LR.shape.gii,rh.labels.MMRR-21-5.ico6.DKT31.manual.label.gii
30 | NKI-TRT-20-6.L.ico6_fs_LR.shape.gii,lh.labels.NKI-TRT-20-6.ico6.DKT31.manual.label.gii
31 | NKI-TRT-20-6.R.ico6_fs_LR.shape.gii,rh.labels.NKI-TRT-20-6.ico6.DKT31.manual.label.gii
32 | NKI-RS-22-10.L.ico6_fs_LR.shape.gii,lh.labels.NKI-RS-22-10.ico6.DKT31.manual.label.gii
33 | NKI-RS-22-10.R.ico6_fs_LR.shape.gii,rh.labels.NKI-RS-22-10.ico6.DKT31.manual.label.gii
34 | NKI-RS-22-18.L.ico6_fs_LR.shape.gii,lh.labels.NKI-RS-22-18.ico6.DKT31.manual.label.gii
35 | NKI-RS-22-18.R.ico6_fs_LR.shape.gii,rh.labels.NKI-RS-22-18.ico6.DKT31.manual.label.gii
36 | HLN-12-10.L.ico6_fs_LR.shape.gii,lh.labels.HLN-12-10.ico6.DKT31.manual.label.gii
37 | HLN-12-10.R.ico6_fs_LR.shape.gii,rh.labels.HLN-12-10.ico6.DKT31.manual.label.gii
38 | OASIS-TRT-20-8.L.ico6_fs_LR.shape.gii,lh.labels.OASIS-TRT-20-8.ico6.DKT31.manual.label.gii
39 | OASIS-TRT-20-8.R.ico6_fs_LR.shape.gii,rh.labels.OASIS-TRT-20-8.ico6.DKT31.manual.label.gii
40 | NKI-TRT-20-13.L.ico6_fs_LR.shape.gii,lh.labels.NKI-TRT-20-13.ico6.DKT31.manual.label.gii
41 | NKI-TRT-20-13.R.ico6_fs_LR.shape.gii,rh.labels.NKI-TRT-20-13.ico6.DKT31.manual.label.gii
42 | NKI-TRT-20-18.L.ico6_fs_LR.shape.gii,lh.labels.NKI-TRT-20-18.ico6.DKT31.manual.label.gii
43 | NKI-TRT-20-18.R.ico6_fs_LR.shape.gii,rh.labels.NKI-TRT-20-18.ico6.DKT31.manual.label.gii
44 | MMRR-21-15.L.ico6_fs_LR.shape.gii,lh.labels.MMRR-21-15.ico6.DKT31.manual.label.gii
45 | MMRR-21-15.R.ico6_fs_LR.shape.gii,rh.labels.MMRR-21-15.ico6.DKT31.manual.label.gii
46 | OASIS-TRT-20-10.L.ico6_fs_LR.shape.gii,lh.labels.OASIS-TRT-20-10.ico6.DKT31.manual.label.gii
47 | OASIS-TRT-20-10.R.ico6_fs_LR.shape.gii,rh.labels.OASIS-TRT-20-10.ico6.DKT31.manual.label.gii
48 | HLN-12-7.L.ico6_fs_LR.shape.gii,lh.labels.HLN-12-7.ico6.DKT31.manual.label.gii
49 | HLN-12-7.R.ico6_fs_LR.shape.gii,rh.labels.HLN-12-7.ico6.DKT31.manual.label.gii
50 | MMRR-21-13.L.ico6_fs_LR.shape.gii,lh.labels.MMRR-21-13.ico6.DKT31.manual.label.gii
51 | MMRR-21-13.R.ico6_fs_LR.shape.gii,rh.labels.MMRR-21-13.ico6.DKT31.manual.label.gii
52 | HLN-12-1.L.ico6_fs_LR.shape.gii,lh.labels.HLN-12-1.ico6.DKT31.manual.label.gii
53 | HLN-12-1.R.ico6_fs_LR.shape.gii,rh.labels.HLN-12-1.ico6.DKT31.manual.label.gii
54 | NKI-TRT-20-15.L.ico6_fs_LR.shape.gii,lh.labels.NKI-TRT-20-15.ico6.DKT31.manual.label.gii
55 | NKI-TRT-20-15.R.ico6_fs_LR.shape.gii,rh.labels.NKI-TRT-20-15.ico6.DKT31.manual.label.gii
56 | NKI-TRT-20-12.L.ico6_fs_LR.shape.gii,lh.labels.NKI-TRT-20-12.ico6.DKT31.manual.label.gii
57 | NKI-TRT-20-12.R.ico6_fs_LR.shape.gii,rh.labels.NKI-TRT-20-12.ico6.DKT31.manual.label.gii
58 | NKI-TRT-20-17.L.ico6_fs_LR.shape.gii,lh.labels.NKI-TRT-20-17.ico6.DKT31.manual.label.gii
59 | NKI-TRT-20-17.R.ico6_fs_LR.shape.gii,rh.labels.NKI-TRT-20-17.ico6.DKT31.manual.label.gii
60 | NKI-TRT-20-5.L.ico6_fs_LR.shape.gii,lh.labels.NKI-TRT-20-5.ico6.DKT31.manual.label.gii
61 | NKI-TRT-20-5.R.ico6_fs_LR.shape.gii,rh.labels.NKI-TRT-20-5.ico6.DKT31.manual.label.gii
62 | MMRR-21-10.L.ico6_fs_LR.shape.gii,lh.labels.MMRR-21-10.ico6.DKT31.manual.label.gii
63 | MMRR-21-10.R.ico6_fs_LR.shape.gii,rh.labels.MMRR-21-10.ico6.DKT31.manual.label.gii
64 | OASIS-TRT-20-19.L.ico6_fs_LR.shape.gii,lh.labels.OASIS-TRT-20-19.ico6.DKT31.manual.label.gii
65 | OASIS-TRT-20-19.R.ico6_fs_LR.shape.gii,rh.labels.OASIS-TRT-20-19.ico6.DKT31.manual.label.gii
66 | MMRR-21-7.L.ico6_fs_LR.shape.gii,lh.labels.MMRR-21-7.ico6.DKT31.manual.label.gii
67 | MMRR-21-7.R.ico6_fs_LR.shape.gii,rh.labels.MMRR-21-7.ico6.DKT31.manual.label.gii
68 | OASIS-TRT-20-4.L.ico6_fs_LR.shape.gii,lh.labels.OASIS-TRT-20-4.ico6.DKT31.manual.label.gii
69 | OASIS-TRT-20-4.R.ico6_fs_LR.shape.gii,rh.labels.OASIS-TRT-20-4.ico6.DKT31.manual.label.gii
70 | NKI-RS-22-6.L.ico6_fs_LR.shape.gii,lh.labels.NKI-RS-22-6.ico6.DKT31.manual.label.gii
71 | NKI-RS-22-6.R.ico6_fs_LR.shape.gii,rh.labels.NKI-RS-22-6.ico6.DKT31.manual.label.gii
72 | NKI-TRT-20-10.L.ico6_fs_LR.shape.gii,lh.labels.NKI-TRT-20-10.ico6.DKT31.manual.label.gii
73 | NKI-TRT-20-10.R.ico6_fs_LR.shape.gii,rh.labels.NKI-TRT-20-10.ico6.DKT31.manual.label.gii
74 | MMRR-21-11.L.ico6_fs_LR.shape.gii,lh.labels.MMRR-21-11.ico6.DKT31.manual.label.gii
75 | MMRR-21-11.R.ico6_fs_LR.shape.gii,rh.labels.MMRR-21-11.ico6.DKT31.manual.label.gii
76 | NKI-RS-22-16.L.ico6_fs_LR.shape.gii,lh.labels.NKI-RS-22-16.ico6.DKT31.manual.label.gii
77 | NKI-RS-22-16.R.ico6_fs_LR.shape.gii,rh.labels.NKI-RS-22-16.ico6.DKT31.manual.label.gii
78 | NKI-RS-22-8.L.ico6_fs_LR.shape.gii,lh.labels.NKI-RS-22-8.ico6.DKT31.manual.label.gii
79 | NKI-RS-22-8.R.ico6_fs_LR.shape.gii,rh.labels.NKI-RS-22-8.ico6.DKT31.manual.label.gii
80 | MMRR-21-18.L.ico6_fs_LR.shape.gii,lh.labels.MMRR-21-18.ico6.DKT31.manual.label.gii
81 | MMRR-21-18.R.ico6_fs_LR.shape.gii,rh.labels.MMRR-21-18.ico6.DKT31.manual.label.gii
82 | NKI-RS-22-9.L.ico6_fs_LR.shape.gii,lh.labels.NKI-RS-22-9.ico6.DKT31.manual.label.gii
83 | NKI-RS-22-9.R.ico6_fs_LR.shape.gii,rh.labels.NKI-RS-22-9.ico6.DKT31.manual.label.gii
84 | OASIS-TRT-20-13.L.ico6_fs_LR.shape.gii,lh.labels.OASIS-TRT-20-13.ico6.DKT31.manual.label.gii
85 | OASIS-TRT-20-13.R.ico6_fs_LR.shape.gii,rh.labels.OASIS-TRT-20-13.ico6.DKT31.manual.label.gii
86 | MMRR-21-9.L.ico6_fs_LR.shape.gii,lh.labels.MMRR-21-9.ico6.DKT31.manual.label.gii
87 | MMRR-21-9.R.ico6_fs_LR.shape.gii,rh.labels.MMRR-21-9.ico6.DKT31.manual.label.gii
88 | HLN-12-6.L.ico6_fs_LR.shape.gii,lh.labels.HLN-12-6.ico6.DKT31.manual.label.gii
89 | HLN-12-6.R.ico6_fs_LR.shape.gii,rh.labels.HLN-12-6.ico6.DKT31.manual.label.gii
90 | OASIS-TRT-20-7.L.ico6_fs_LR.shape.gii,lh.labels.OASIS-TRT-20-7.ico6.DKT31.manual.label.gii
91 | OASIS-TRT-20-7.R.ico6_fs_LR.shape.gii,rh.labels.OASIS-TRT-20-7.ico6.DKT31.manual.label.gii
92 | MMRR-21-6.L.ico6_fs_LR.shape.gii,lh.labels.MMRR-21-6.ico6.DKT31.manual.label.gii
93 | MMRR-21-6.R.ico6_fs_LR.shape.gii,rh.labels.MMRR-21-6.ico6.DKT31.manual.label.gii
94 | MMRR-21-3.L.ico6_fs_LR.shape.gii,lh.labels.MMRR-21-3.ico6.DKT31.manual.label.gii
95 | MMRR-21-3.R.ico6_fs_LR.shape.gii,rh.labels.MMRR-21-3.ico6.DKT31.manual.label.gii
96 | NKI-RS-22-19.L.ico6_fs_LR.shape.gii,lh.labels.NKI-RS-22-19.ico6.DKT31.manual.label.gii
97 | NKI-RS-22-19.R.ico6_fs_LR.shape.gii,rh.labels.NKI-RS-22-19.ico6.DKT31.manual.label.gii
98 | NKI-RS-22-3.L.ico6_fs_LR.shape.gii,lh.labels.NKI-RS-22-3.ico6.DKT31.manual.label.gii
99 | NKI-RS-22-3.R.ico6_fs_LR.shape.gii,rh.labels.NKI-RS-22-3.ico6.DKT31.manual.label.gii
100 | NKI-RS-22-1.L.ico6_fs_LR.shape.gii,lh.labels.NKI-RS-22-1.ico6.DKT31.manual.label.gii
101 | NKI-RS-22-1.R.ico6_fs_LR.shape.gii,rh.labels.NKI-RS-22-1.ico6.DKT31.manual.label.gii
102 | HLN-12-12.L.ico6_fs_LR.shape.gii,lh.labels.HLN-12-12.ico6.DKT31.manual.label.gii
103 | HLN-12-12.R.ico6_fs_LR.shape.gii,rh.labels.HLN-12-12.ico6.DKT31.manual.label.gii
104 | MMRR-21-2.L.ico6_fs_LR.shape.gii,lh.labels.MMRR-21-2.ico6.DKT31.manual.label.gii
105 | MMRR-21-2.R.ico6_fs_LR.shape.gii,rh.labels.MMRR-21-2.ico6.DKT31.manual.label.gii
106 | MMRR-21-16.L.ico6_fs_LR.shape.gii,lh.labels.MMRR-21-16.ico6.DKT31.manual.label.gii
107 | MMRR-21-16.R.ico6_fs_LR.shape.gii,rh.labels.MMRR-21-16.ico6.DKT31.manual.label.gii
108 | MMRR-21-4.L.ico6_fs_LR.shape.gii,lh.labels.MMRR-21-4.ico6.DKT31.manual.label.gii
109 | MMRR-21-4.R.ico6_fs_LR.shape.gii,rh.labels.MMRR-21-4.ico6.DKT31.manual.label.gii
110 | OASIS-TRT-20-18.L.ico6_fs_LR.shape.gii,lh.labels.OASIS-TRT-20-18.ico6.DKT31.manual.label.gii
111 | OASIS-TRT-20-18.R.ico6_fs_LR.shape.gii,rh.labels.OASIS-TRT-20-18.ico6.DKT31.manual.label.gii
112 | MMRR-21-8.L.ico6_fs_LR.shape.gii,lh.labels.MMRR-21-8.ico6.DKT31.manual.label.gii
113 | MMRR-21-8.R.ico6_fs_LR.shape.gii,rh.labels.MMRR-21-8.ico6.DKT31.manual.label.gii
114 | NKI-RS-22-13.L.ico6_fs_LR.shape.gii,lh.labels.NKI-RS-22-13.ico6.DKT31.manual.label.gii
115 | NKI-RS-22-13.R.ico6_fs_LR.shape.gii,rh.labels.NKI-RS-22-13.ico6.DKT31.manual.label.gii
116 | NKI-TRT-20-11.L.ico6_fs_LR.shape.gii,lh.labels.NKI-TRT-20-11.ico6.DKT31.manual.label.gii
117 | NKI-TRT-20-11.R.ico6_fs_LR.shape.gii,rh.labels.NKI-TRT-20-11.ico6.DKT31.manual.label.gii
118 | OASIS-TRT-20-2.L.ico6_fs_LR.shape.gii,lh.labels.OASIS-TRT-20-2.ico6.DKT31.manual.label.gii
119 | OASIS-TRT-20-2.R.ico6_fs_LR.shape.gii,rh.labels.OASIS-TRT-20-2.ico6.DKT31.manual.label.gii
120 | NKI-RS-22-14.L.ico6_fs_LR.shape.gii,lh.labels.NKI-RS-22-14.ico6.DKT31.manual.label.gii
121 | NKI-RS-22-14.R.ico6_fs_LR.shape.gii,rh.labels.NKI-RS-22-14.ico6.DKT31.manual.label.gii
122 | MMRR-21-21.L.ico6_fs_LR.shape.gii,lh.labels.MMRR-21-21.ico6.DKT31.manual.label.gii
123 | MMRR-21-21.R.ico6_fs_LR.shape.gii,rh.labels.MMRR-21-21.ico6.DKT31.manual.label.gii
124 | NKI-RS-22-15.L.ico6_fs_LR.shape.gii,lh.labels.NKI-RS-22-15.ico6.DKT31.manual.label.gii
125 | NKI-RS-22-15.R.ico6_fs_LR.shape.gii,rh.labels.NKI-RS-22-15.ico6.DKT31.manual.label.gii
126 | NKI-TRT-20-14.L.ico6_fs_LR.shape.gii,lh.labels.NKI-TRT-20-14.ico6.DKT31.manual.label.gii
127 | NKI-TRT-20-14.R.ico6_fs_LR.shape.gii,rh.labels.NKI-TRT-20-14.ico6.DKT31.manual.label.gii
128 | MMRR-21-17.L.ico6_fs_LR.shape.gii,lh.labels.MMRR-21-17.ico6.DKT31.manual.label.gii
129 | MMRR-21-17.R.ico6_fs_LR.shape.gii,rh.labels.MMRR-21-17.ico6.DKT31.manual.label.gii
130 | NKI-RS-22-11.L.ico6_fs_LR.shape.gii,lh.labels.NKI-RS-22-11.ico6.DKT31.manual.label.gii
131 | NKI-RS-22-11.R.ico6_fs_LR.shape.gii,rh.labels.NKI-RS-22-11.ico6.DKT31.manual.label.gii
132 | OASIS-TRT-20-12.L.ico6_fs_LR.shape.gii,lh.labels.OASIS-TRT-20-12.ico6.DKT31.manual.label.gii
133 | OASIS-TRT-20-12.R.ico6_fs_LR.shape.gii,rh.labels.OASIS-TRT-20-12.ico6.DKT31.manual.label.gii
134 | NKI-RS-22-5.L.ico6_fs_LR.shape.gii,lh.labels.NKI-RS-22-5.ico6.DKT31.manual.label.gii
135 | NKI-RS-22-5.R.ico6_fs_LR.shape.gii,rh.labels.NKI-RS-22-5.ico6.DKT31.manual.label.gii
136 | NKI-TRT-20-1.L.ico6_fs_LR.shape.gii,lh.labels.NKI-TRT-20-1.ico6.DKT31.manual.label.gii
137 | NKI-TRT-20-1.R.ico6_fs_LR.shape.gii,rh.labels.NKI-TRT-20-1.ico6.DKT31.manual.label.gii
138 | HLN-12-8.L.ico6_fs_LR.shape.gii,lh.labels.HLN-12-8.ico6.DKT31.manual.label.gii
139 | HLN-12-8.R.ico6_fs_LR.shape.gii,rh.labels.HLN-12-8.ico6.DKT31.manual.label.gii
140 | Colin27-1.L.ico6_fs_LR.shape.gii,lh.labels.Colin27-1.ico6.DKT31.manual.label.gii
141 | Colin27-1.R.ico6_fs_LR.shape.gii,rh.labels.Colin27-1.ico6.DKT31.manual.label.gii
142 | NKI-TRT-20-8.L.ico6_fs_LR.shape.gii,lh.labels.NKI-TRT-20-8.ico6.DKT31.manual.label.gii
143 | NKI-TRT-20-8.R.ico6_fs_LR.shape.gii,rh.labels.NKI-TRT-20-8.ico6.DKT31.manual.label.gii
144 | HLN-12-5.L.ico6_fs_LR.shape.gii,lh.labels.HLN-12-5.ico6.DKT31.manual.label.gii
145 | HLN-12-5.R.ico6_fs_LR.shape.gii,rh.labels.HLN-12-5.ico6.DKT31.manual.label.gii
146 | OASIS-TRT-20-3.L.ico6_fs_LR.shape.gii,lh.labels.OASIS-TRT-20-3.ico6.DKT31.manual.label.gii
147 | OASIS-TRT-20-3.R.ico6_fs_LR.shape.gii,rh.labels.OASIS-TRT-20-3.ico6.DKT31.manual.label.gii
148 | OASIS-TRT-20-6.L.ico6_fs_LR.shape.gii,lh.labels.OASIS-TRT-20-6.ico6.DKT31.manual.label.gii
149 | OASIS-TRT-20-6.R.ico6_fs_LR.shape.gii,rh.labels.OASIS-TRT-20-6.ico6.DKT31.manual.label.gii
150 | NKI-RS-22-17.L.ico6_fs_LR.shape.gii,lh.labels.NKI-RS-22-17.ico6.DKT31.manual.label.gii
151 | NKI-RS-22-17.R.ico6_fs_LR.shape.gii,rh.labels.NKI-RS-22-17.ico6.DKT31.manual.label.gii
152 | NKI-RS-22-7.L.ico6_fs_LR.shape.gii,lh.labels.NKI-RS-22-7.ico6.DKT31.manual.label.gii
153 | NKI-RS-22-7.R.ico6_fs_LR.shape.gii,rh.labels.NKI-RS-22-7.ico6.DKT31.manual.label.gii
154 | MMRR-21-14.L.ico6_fs_LR.shape.gii,lh.labels.MMRR-21-14.ico6.DKT31.manual.label.gii
155 | MMRR-21-14.R.ico6_fs_LR.shape.gii,rh.labels.MMRR-21-14.ico6.DKT31.manual.label.gii
156 | MMRR-21-20.L.ico6_fs_LR.shape.gii,lh.labels.MMRR-21-20.ico6.DKT31.manual.label.gii
157 | MMRR-21-20.R.ico6_fs_LR.shape.gii,rh.labels.MMRR-21-20.ico6.DKT31.manual.label.gii
158 | HLN-12-4.L.ico6_fs_LR.shape.gii,lh.labels.HLN-12-4.ico6.DKT31.manual.label.gii
159 | HLN-12-4.R.ico6_fs_LR.shape.gii,rh.labels.HLN-12-4.ico6.DKT31.manual.label.gii
160 | NKI-TRT-20-19.L.ico6_fs_LR.shape.gii,lh.labels.NKI-TRT-20-19.ico6.DKT31.manual.label.gii
161 | NKI-TRT-20-19.R.ico6_fs_LR.shape.gii,rh.labels.NKI-TRT-20-19.ico6.DKT31.manual.label.gii
162 | HLN-12-11.L.ico6_fs_LR.shape.gii,lh.labels.HLN-12-11.ico6.DKT31.manual.label.gii
163 | HLN-12-11.R.ico6_fs_LR.shape.gii,rh.labels.HLN-12-11.ico6.DKT31.manual.label.gii
164 | HLN-12-2.L.ico6_fs_LR.shape.gii,lh.labels.HLN-12-2.ico6.DKT31.manual.label.gii
165 | HLN-12-2.R.ico6_fs_LR.shape.gii,rh.labels.HLN-12-2.ico6.DKT31.manual.label.gii
166 | MMRR-21-19.L.ico6_fs_LR.shape.gii,lh.labels.MMRR-21-19.ico6.DKT31.manual.label.gii
167 | MMRR-21-19.R.ico6_fs_LR.shape.gii,rh.labels.MMRR-21-19.ico6.DKT31.manual.label.gii
168 | NKI-TRT-20-4.L.ico6_fs_LR.shape.gii,lh.labels.NKI-TRT-20-4.ico6.DKT31.manual.label.gii
169 | NKI-TRT-20-4.R.ico6_fs_LR.shape.gii,rh.labels.NKI-TRT-20-4.ico6.DKT31.manual.label.gii
170 | MMRR-21-1.L.ico6_fs_LR.shape.gii,lh.labels.MMRR-21-1.ico6.DKT31.manual.label.gii
171 | MMRR-21-1.R.ico6_fs_LR.shape.gii,rh.labels.MMRR-21-1.ico6.DKT31.manual.label.gii
172 | NKI-RS-22-20.L.ico6_fs_LR.shape.gii,lh.labels.NKI-RS-22-20.ico6.DKT31.manual.label.gii
173 | NKI-RS-22-20.R.ico6_fs_LR.shape.gii,rh.labels.NKI-RS-22-20.ico6.DKT31.manual.label.gii
174 | OASIS-TRT-20-17.L.ico6_fs_LR.shape.gii,lh.labels.OASIS-TRT-20-17.ico6.DKT31.manual.label.gii
175 | OASIS-TRT-20-17.R.ico6_fs_LR.shape.gii,rh.labels.OASIS-TRT-20-17.ico6.DKT31.manual.label.gii
176 | NKI-RS-22-22.L.ico6_fs_LR.shape.gii,lh.labels.NKI-RS-22-22.ico6.DKT31.manual.label.gii
177 | NKI-RS-22-22.R.ico6_fs_LR.shape.gii,rh.labels.NKI-RS-22-22.ico6.DKT31.manual.label.gii
178 |
--------------------------------------------------------------------------------
/labels/dHCP/birth_age/half/native/means.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/metrics-lab/surface-vision-transformers/377e5dd9fddb59344d673281e28d7bea13c305e3/labels/dHCP/birth_age/half/native/means.npy
--------------------------------------------------------------------------------
/labels/dHCP/birth_age/half/native/stds.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/metrics-lab/surface-vision-transformers/377e5dd9fddb59344d673281e28d7bea13c305e3/labels/dHCP/birth_age/half/native/stds.npy
--------------------------------------------------------------------------------
/labels/dHCP/birth_age/half/template/means.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/metrics-lab/surface-vision-transformers/377e5dd9fddb59344d673281e28d7bea13c305e3/labels/dHCP/birth_age/half/template/means.npy
--------------------------------------------------------------------------------
/labels/dHCP/birth_age/half/template/stds.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/metrics-lab/surface-vision-transformers/377e5dd9fddb59344d673281e28d7bea13c305e3/labels/dHCP/birth_age/half/template/stds.npy
--------------------------------------------------------------------------------
/labels/dHCP/birth_age/half/val.csv:
--------------------------------------------------------------------------------
1 | ,ids,labels,confounds
2 | 0,sub-CC00511XX08_ses-149000_L.shape.gii,40.71428571,41.0
3 | 1,sub-CC00511XX08_ses-149000_R.shape.gii,40.71428571,41.0
4 | 2,sub-CC01223XX11_ses-149330_L.shape.gii,38.71428571,40.86
5 | 3,sub-CC01223XX11_ses-149330_R.shape.gii,38.71428571,40.86
6 | 4,sub-CC01194XX16_ses-149530_L.shape.gii,42.0,43.86
7 | 5,sub-CC01194XX16_ses-149530_R.shape.gii,42.0,43.86
8 | 6,sub-CC00520XX09_ses-150201_L.shape.gii,38.0,38.29
9 | 7,sub-CC00520XX09_ses-150201_R.shape.gii,38.0,38.29
10 | 8,sub-CC01207XX11_ses-150330_L.shape.gii,40.28571429,43.0
11 | 9,sub-CC01207XX11_ses-150330_R.shape.gii,40.28571429,43.0
12 | 10,sub-CC00512XX09_ses-150700_L.shape.gii,38.85714286,39.14
13 | 11,sub-CC00512XX09_ses-150700_R.shape.gii,38.85714286,39.14
14 | 12,sub-CC00515XX12_ses-152600_L.shape.gii,37.85714286,38.86
15 | 13,sub-CC00515XX12_ses-152600_R.shape.gii,37.85714286,38.86
16 | 14,sub-CC00516XX13_ses-152902_L.shape.gii,40.14285714,40.57
17 | 15,sub-CC00516XX13_ses-152902_R.shape.gii,40.14285714,40.57
18 | 16,sub-CC00564XX12_ses-154100_L.shape.gii,40.42857143,41.86
19 | 17,sub-CC00564XX12_ses-154100_R.shape.gii,40.42857143,41.86
20 | 18,sub-CC00532XX13_ses-154200_L.shape.gii,41.14285714,41.43
21 | 19,sub-CC00532XX13_ses-154200_R.shape.gii,41.14285714,41.43
22 | 20,sub-CC01200XX04_ses-154330_L.shape.gii,38.85714286,41.43
23 | 21,sub-CC01200XX04_ses-154330_R.shape.gii,38.85714286,41.43
24 | 22,sub-CC00534XX15_ses-155100_L.shape.gii,39.0,39.14
25 | 23,sub-CC00534XX15_ses-155100_R.shape.gii,39.0,39.14
26 | 24,sub-CC01236XX16_ses-155830_L.shape.gii,40.71428571,44.43
27 | 25,sub-CC01236XX16_ses-155830_R.shape.gii,40.71428571,44.43
28 | 26,sub-CC00438XX18_ses-156800_L.shape.gii,41.14285714,43.43
29 | 27,sub-CC00438XX18_ses-156800_R.shape.gii,41.14285714,43.43
30 | 28,sub-CC00536XX17_ses-156900_L.shape.gii,38.85714286,42.71
31 | 29,sub-CC00536XX17_ses-156900_R.shape.gii,38.85714286,42.71
32 | 30,sub-CC00537XX18_ses-157100_L.shape.gii,38.85714286,39.0
33 | 31,sub-CC00537XX18_ses-157100_R.shape.gii,38.85714286,39.0
34 | 32,sub-CC00538XX19_ses-157200_L.shape.gii,40.14285714,40.43
35 | 33,sub-CC00538XX19_ses-157200_R.shape.gii,40.14285714,40.43
36 | 34,sub-CC00547XX20_ses-157300_L.shape.gii,38.71428571,39.0
37 | 35,sub-CC00547XX20_ses-157300_R.shape.gii,38.71428571,39.0
38 | 36,sub-CC00548XX21_ses-157400_L.shape.gii,40.42857143,40.71
39 | 37,sub-CC00548XX21_ses-157400_R.shape.gii,40.42857143,40.71
40 | 38,sub-CC00549XX22_ses-157600_L.shape.gii,42.0,42.14
41 | 39,sub-CC00549XX22_ses-157600_R.shape.gii,42.0,42.14
42 | 40,sub-CC00550XX06_ses-157800_L.shape.gii,39.85714286,40.0
43 | 41,sub-CC00550XX06_ses-157800_R.shape.gii,39.85714286,40.0
44 | 42,sub-CC00551XX07_ses-158001_L.shape.gii,39.71428571,39.71
45 | 43,sub-CC00551XX07_ses-158001_R.shape.gii,39.71428571,39.71
46 | 44,sub-CC00553XX09_ses-159000_L.shape.gii,41.42857143,41.57
47 | 45,sub-CC00553XX09_ses-159000_R.shape.gii,41.42857143,41.57
48 | 46,sub-CC00552XX08_ses-159300_L.shape.gii,39.0,39.14
49 | 47,sub-CC00552XX08_ses-159300_R.shape.gii,39.0,39.14
50 | 48,sub-CC00554XX10_ses-160200_L.shape.gii,40.85714286,41.14
51 | 49,sub-CC00554XX10_ses-160200_R.shape.gii,40.85714286,41.14
52 | 50,sub-CC00556XX12_ses-162902_L.shape.gii,38.42857143,41.57
53 | 51,sub-CC00556XX12_ses-162902_R.shape.gii,38.42857143,41.57
54 | 52,sub-CC00540XX13_ses-164400_L.shape.gii,40.71428571,41.29
55 | 53,sub-CC00540XX13_ses-164400_R.shape.gii,40.71428571,41.29
56 | 54,sub-CC00542XX15_ses-165800_L.shape.gii,40.42857143,42.71
57 | 55,sub-CC00542XX15_ses-165800_R.shape.gii,40.42857143,42.71
58 | 56,sub-CC00544XX17_ses-169300_L.shape.gii,41.42857143,41.86
59 | 57,sub-CC00544XX17_ses-169300_R.shape.gii,41.42857143,41.86
60 | 58,sub-CC00580XX12_ses-173700_L.shape.gii,41.14285714,41.43
61 | 59,sub-CC00580XX12_ses-173700_R.shape.gii,41.14285714,41.43
62 | 60,sub-CC00461XX08_ses-175100_L.shape.gii,41.42857143,43.86
63 | 61,sub-CC00461XX08_ses-175100_R.shape.gii,41.42857143,43.86
64 | 62,sub-CC00581XX13_ses-177101_L.shape.gii,39.71428571,39.86
65 | 63,sub-CC00581XX13_ses-177101_R.shape.gii,39.71428571,39.86
66 | 64,sub-CC00582XX14_ses-178300_L.shape.gii,38.14285714,39.0
67 | 65,sub-CC00582XX14_ses-178300_R.shape.gii,38.14285714,39.0
68 | 66,sub-CC00583XX15_ses-178600_L.shape.gii,40.42857143,42.0
69 | 67,sub-CC00583XX15_ses-178600_R.shape.gii,40.42857143,42.0
70 | 68,sub-CC00584XX16_ses-178800_L.shape.gii,41.85714286,42.0
71 | 69,sub-CC00584XX16_ses-178800_R.shape.gii,41.85714286,42.0
72 | 70,sub-CC00585XX17_ses-178900_L.shape.gii,39.57142857,39.71
73 | 71,sub-CC00585XX17_ses-178900_R.shape.gii,39.57142857,39.71
74 | 72,sub-CC00586XX18_ses-179000_L.shape.gii,40.14285714,40.29
75 | 73,sub-CC00586XX18_ses-179000_R.shape.gii,40.14285714,40.29
76 | 74,sub-CC00607XX13_ses-179300_L.shape.gii,40.57142857,42.43
77 | 75,sub-CC00607XX13_ses-179300_R.shape.gii,40.57142857,42.43
78 | 76,sub-CC00587XX19_ses-180300_L.shape.gii,38.28571429,38.57
79 | 77,sub-CC00587XX19_ses-180300_R.shape.gii,38.28571429,38.57
80 | 78,sub-CC00613XX11_ses-181601_L.shape.gii,40.42857143,40.57
81 | 79,sub-CC00613XX11_ses-181601_R.shape.gii,40.42857143,40.57
82 | 80,sub-CC00528XX17_ses-183200_L.shape.gii,39.85714286,43.71
83 | 81,sub-CC00528XX17_ses-183200_R.shape.gii,39.85714286,43.71
84 | 82,sub-CC00588XX20_ses-183600_L.shape.gii,38.42857143,41.86
85 | 83,sub-CC00588XX20_ses-183600_R.shape.gii,38.42857143,41.86
86 | 84,sub-CC00627XX17_ses-180600_L.shape.gii,36.85714286,37.0
87 | 85,sub-CC00627XX17_ses-180600_R.shape.gii,36.85714286,37.0
88 | 86,sub-CC00530XX11_ses-181301_L.shape.gii,26.14285714,42.43
89 | 87,sub-CC00530XX11_ses-181301_R.shape.gii,26.14285714,42.43
90 | 88,sub-CC00617XX15_ses-188400_L.shape.gii,31.57142857,40.14
91 | 89,sub-CC00617XX15_ses-188400_R.shape.gii,31.57142857,40.14
92 | 90,sub-CC00628XX18_ses-193500_L.shape.gii,29.57142857,42.0
93 | 91,sub-CC00628XX18_ses-193500_R.shape.gii,29.57142857,42.0
94 | 92,sub-CC00621XX11_ses-195900_L.shape.gii,29.0,43.14
95 | 93,sub-CC00621XX11_ses-195900_R.shape.gii,29.0,43.14
96 | 94,sub-CC00632XX14_ses-196000_L.shape.gii,32.71428571,43.14
97 | 95,sub-CC00632XX14_ses-196000_R.shape.gii,32.71428571,43.14
98 | 96,sub-CC00695XX20_ses-202600_L.shape.gii,36.85714286,39.57
99 | 97,sub-CC00695XX20_ses-202600_R.shape.gii,36.85714286,39.57
100 | 98,sub-CC00648XX22_ses-204400_L.shape.gii,34.71428571,44.14
101 | 99,sub-CC00648XX22_ses-204400_R.shape.gii,34.71428571,44.14
102 | 100,sub-CC00702BN09_ses-204500_L.shape.gii,32.57142857,44.14
103 | 101,sub-CC00702BN09_ses-204500_R.shape.gii,32.57142857,44.14
104 |
--------------------------------------------------------------------------------
/labels/dHCP/birth_age/native/means.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/metrics-lab/surface-vision-transformers/377e5dd9fddb59344d673281e28d7bea13c305e3/labels/dHCP/birth_age/native/means.npy
--------------------------------------------------------------------------------
/labels/dHCP/birth_age/native/stds.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/metrics-lab/surface-vision-transformers/377e5dd9fddb59344d673281e28d7bea13c305e3/labels/dHCP/birth_age/native/stds.npy
--------------------------------------------------------------------------------
/labels/dHCP/birth_age/template/means.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/metrics-lab/surface-vision-transformers/377e5dd9fddb59344d673281e28d7bea13c305e3/labels/dHCP/birth_age/template/means.npy
--------------------------------------------------------------------------------
/labels/dHCP/birth_age/template/stds.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/metrics-lab/surface-vision-transformers/377e5dd9fddb59344d673281e28d7bea13c305e3/labels/dHCP/birth_age/template/stds.npy
--------------------------------------------------------------------------------
/labels/dHCP/birth_age/validation.csv:
--------------------------------------------------------------------------------
1 | ,ids,labels,scan_age
2 | 0,CC00511XX08_149000,40.71428571,41.0
3 | 1,CC01223XX11_149330,38.71428571,40.86
4 | 2,CC01194XX16_149530,42.0,43.86
5 | 3,CC00520XX09_150201,38.0,38.29
6 | 4,CC01207XX11_150330,40.28571429,43.0
7 | 5,CC00512XX09_150700,38.85714286,39.14
8 | 6,CC00515XX12_152600,37.85714286,38.86
9 | 7,CC00516XX13_152902,40.14285714,40.57
10 | 8,CC00564XX12_154100,40.42857143,41.86
11 | 9,CC00532XX13_154200,41.14285714,41.43
12 | 10,CC01200XX04_154330,38.85714286,41.43
13 | 11,CC00534XX15_155100,39.0,39.14
14 | 12,CC01236XX16_155830,40.71428571,44.43
15 | 13,CC00438XX18_156800,41.14285714,43.43
16 | 14,CC00536XX17_156900,38.85714286,42.71
17 | 15,CC00537XX18_157100,38.85714286,39.0
18 | 16,CC00538XX19_157200,40.14285714,40.43
19 | 17,CC00547XX20_157300,38.71428571,39.0
20 | 18,CC00548XX21_157400,40.42857143,40.71
21 | 19,CC00549XX22_157600,42.0,42.14
22 | 20,CC00550XX06_157800,39.85714286,40.0
23 | 21,CC00551XX07_158001,39.71428571,39.71
24 | 22,CC00553XX09_159000,41.42857143,41.57
25 | 23,CC00552XX08_159300,39.0,39.14
26 | 24,CC00554XX10_160200,40.85714286,41.14
27 | 25,CC00556XX12_162902,38.42857143,41.57
28 | 26,CC00540XX13_164400,40.71428571,41.29
29 | 27,CC00542XX15_165800,40.42857143,42.71
30 | 28,CC00544XX17_169300,41.42857143,41.86
31 | 29,CC00580XX12_173700,41.14285714,41.43
32 | 30,CC00461XX08_175100,41.42857143,43.86
33 | 31,CC00581XX13_177101,39.71428571,39.86
34 | 32,CC00582XX14_178300,38.14285714,39.0
35 | 33,CC00583XX15_178600,40.42857143,42.0
36 | 34,CC00584XX16_178800,41.85714286,42.0
37 | 35,CC00585XX17_178900,39.57142857,39.71
38 | 36,CC00586XX18_179000,40.14285714,40.29
39 | 37,CC00607XX13_179300,40.57142857,42.43
40 | 38,CC00587XX19_180300,38.28571429,38.57
41 | 39,CC00613XX11_181601,40.42857143,40.57
42 | 40,CC00528XX17_183200,39.85714286,43.71
43 | 41,CC00588XX20_183600,38.42857143,41.86
44 | 42,CC00627XX17_180600,36.85714286,37.0
45 | 43,CC00530XX11_181301,26.14285714,42.43
46 | 44,CC00617XX15_188400,31.57142857,40.14
47 | 45,CC00628XX18_193500,29.57142857,42.0
48 | 46,CC00621XX11_195900,29.0,43.14
49 | 47,CC00632XX14_196000,32.71428571,43.14
50 | 48,CC00695XX20_202600,36.85714286,39.57
51 | 49,CC00648XX22_204400,34.71428571,44.14
52 | 50,CC00702BN09_204500,32.57142857,44.14
53 |
--------------------------------------------------------------------------------
/labels/dHCP/scan_age/half/native/means.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/metrics-lab/surface-vision-transformers/377e5dd9fddb59344d673281e28d7bea13c305e3/labels/dHCP/scan_age/half/native/means.npy
--------------------------------------------------------------------------------
/labels/dHCP/scan_age/half/native/stds.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/metrics-lab/surface-vision-transformers/377e5dd9fddb59344d673281e28d7bea13c305e3/labels/dHCP/scan_age/half/native/stds.npy
--------------------------------------------------------------------------------
/labels/dHCP/scan_age/half/template/means.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/metrics-lab/surface-vision-transformers/377e5dd9fddb59344d673281e28d7bea13c305e3/labels/dHCP/scan_age/half/template/means.npy
--------------------------------------------------------------------------------
/labels/dHCP/scan_age/half/template/stds.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/metrics-lab/surface-vision-transformers/377e5dd9fddb59344d673281e28d7bea13c305e3/labels/dHCP/scan_age/half/template/stds.npy
--------------------------------------------------------------------------------
/labels/dHCP/scan_age/half/val.csv:
--------------------------------------------------------------------------------
1 | ,ids,labels
2 | 0,sub-CC00511XX08_ses-149000_L.shape.gii,41.0
3 | 1,sub-CC00511XX08_ses-149000_R.shape.gii,41.0
4 | 2,sub-CC01223XX11_ses-149330_L.shape.gii,40.86
5 | 3,sub-CC01223XX11_ses-149330_R.shape.gii,40.86
6 | 4,sub-CC01194XX16_ses-149530_L.shape.gii,43.86
7 | 5,sub-CC01194XX16_ses-149530_R.shape.gii,43.86
8 | 6,sub-CC00520XX09_ses-150201_L.shape.gii,38.29
9 | 7,sub-CC00520XX09_ses-150201_R.shape.gii,38.29
10 | 8,sub-CC01207XX11_ses-150330_L.shape.gii,43.0
11 | 9,sub-CC01207XX11_ses-150330_R.shape.gii,43.0
12 | 10,sub-CC00512XX09_ses-150700_L.shape.gii,39.14
13 | 11,sub-CC00512XX09_ses-150700_R.shape.gii,39.14
14 | 12,sub-CC00515XX12_ses-152600_L.shape.gii,38.86
15 | 13,sub-CC00515XX12_ses-152600_R.shape.gii,38.86
16 | 14,sub-CC00516XX13_ses-152902_L.shape.gii,40.57
17 | 15,sub-CC00516XX13_ses-152902_R.shape.gii,40.57
18 | 16,sub-CC00564XX12_ses-154100_L.shape.gii,41.86
19 | 17,sub-CC00564XX12_ses-154100_R.shape.gii,41.86
20 | 18,sub-CC00532XX13_ses-154200_L.shape.gii,41.43
21 | 19,sub-CC00532XX13_ses-154200_R.shape.gii,41.43
22 | 20,sub-CC01200XX04_ses-154330_L.shape.gii,41.43
23 | 21,sub-CC01200XX04_ses-154330_R.shape.gii,41.43
24 | 22,sub-CC00534XX15_ses-155100_L.shape.gii,39.14
25 | 23,sub-CC00534XX15_ses-155100_R.shape.gii,39.14
26 | 24,sub-CC01236XX16_ses-155830_L.shape.gii,44.43
27 | 25,sub-CC01236XX16_ses-155830_R.shape.gii,44.43
28 | 26,sub-CC00438XX18_ses-156800_L.shape.gii,43.43
29 | 27,sub-CC00438XX18_ses-156800_R.shape.gii,43.43
30 | 28,sub-CC00536XX17_ses-156900_L.shape.gii,42.71
31 | 29,sub-CC00536XX17_ses-156900_R.shape.gii,42.71
32 | 30,sub-CC00537XX18_ses-157100_L.shape.gii,39.0
33 | 31,sub-CC00537XX18_ses-157100_R.shape.gii,39.0
34 | 32,sub-CC00538XX19_ses-157200_L.shape.gii,40.43
35 | 33,sub-CC00538XX19_ses-157200_R.shape.gii,40.43
36 | 34,sub-CC00547XX20_ses-157300_L.shape.gii,39.0
37 | 35,sub-CC00547XX20_ses-157300_R.shape.gii,39.0
38 | 36,sub-CC00548XX21_ses-157400_L.shape.gii,40.71
39 | 37,sub-CC00548XX21_ses-157400_R.shape.gii,40.71
40 | 38,sub-CC00549XX22_ses-157600_L.shape.gii,42.14
41 | 39,sub-CC00549XX22_ses-157600_R.shape.gii,42.14
42 | 40,sub-CC00550XX06_ses-157800_L.shape.gii,40.0
43 | 41,sub-CC00550XX06_ses-157800_R.shape.gii,40.0
44 | 42,sub-CC00551XX07_ses-158001_L.shape.gii,39.71
45 | 43,sub-CC00551XX07_ses-158001_R.shape.gii,39.71
46 | 44,sub-CC00553XX09_ses-159000_L.shape.gii,41.57
47 | 45,sub-CC00553XX09_ses-159000_R.shape.gii,41.57
48 | 46,sub-CC00552XX08_ses-159300_L.shape.gii,39.14
49 | 47,sub-CC00552XX08_ses-159300_R.shape.gii,39.14
50 | 48,sub-CC00554XX10_ses-160200_L.shape.gii,41.14
51 | 49,sub-CC00554XX10_ses-160200_R.shape.gii,41.14
52 | 50,sub-CC00556XX12_ses-162902_L.shape.gii,41.57
53 | 51,sub-CC00556XX12_ses-162902_R.shape.gii,41.57
54 | 52,sub-CC00540XX13_ses-164400_L.shape.gii,41.29
55 | 53,sub-CC00540XX13_ses-164400_R.shape.gii,41.29
56 | 54,sub-CC00542XX15_ses-165800_L.shape.gii,42.71
57 | 55,sub-CC00542XX15_ses-165800_R.shape.gii,42.71
58 | 56,sub-CC00544XX17_ses-169300_L.shape.gii,41.86
59 | 57,sub-CC00544XX17_ses-169300_R.shape.gii,41.86
60 | 58,sub-CC00580XX12_ses-173700_L.shape.gii,41.43
61 | 59,sub-CC00580XX12_ses-173700_R.shape.gii,41.43
62 | 60,sub-CC00461XX08_ses-175100_L.shape.gii,43.86
63 | 61,sub-CC00461XX08_ses-175100_R.shape.gii,43.86
64 | 62,sub-CC00581XX13_ses-177101_L.shape.gii,39.86
65 | 63,sub-CC00581XX13_ses-177101_R.shape.gii,39.86
66 | 64,sub-CC00582XX14_ses-178300_L.shape.gii,39.0
67 | 65,sub-CC00582XX14_ses-178300_R.shape.gii,39.0
68 | 66,sub-CC00583XX15_ses-178600_L.shape.gii,42.0
69 | 67,sub-CC00583XX15_ses-178600_R.shape.gii,42.0
70 | 68,sub-CC00584XX16_ses-178800_L.shape.gii,42.0
71 | 69,sub-CC00584XX16_ses-178800_R.shape.gii,42.0
72 | 70,sub-CC00585XX17_ses-178900_L.shape.gii,39.71
73 | 71,sub-CC00585XX17_ses-178900_R.shape.gii,39.71
74 | 72,sub-CC00586XX18_ses-179000_L.shape.gii,40.29
75 | 73,sub-CC00586XX18_ses-179000_R.shape.gii,40.29
76 | 74,sub-CC00607XX13_ses-179300_L.shape.gii,42.43
77 | 75,sub-CC00607XX13_ses-179300_R.shape.gii,42.43
78 | 76,sub-CC00587XX19_ses-180300_L.shape.gii,38.57
79 | 77,sub-CC00587XX19_ses-180300_R.shape.gii,38.57
80 | 78,sub-CC00613XX11_ses-181601_L.shape.gii,40.57
81 | 79,sub-CC00613XX11_ses-181601_R.shape.gii,40.57
82 | 80,sub-CC00528XX17_ses-183200_L.shape.gii,43.71
83 | 81,sub-CC00528XX17_ses-183200_R.shape.gii,43.71
84 | 82,sub-CC00588XX20_ses-183600_L.shape.gii,41.86
85 | 83,sub-CC00588XX20_ses-183600_R.shape.gii,41.86
86 | 84,sub-CC00617XX15_ses-176500_L.shape.gii,34.14
87 | 85,sub-CC00617XX15_ses-176500_R.shape.gii,34.14
88 | 86,sub-CC00618XX16_ses-177201_L.shape.gii,29.86
89 | 87,sub-CC00618XX16_ses-177201_R.shape.gii,29.86
90 | 88,sub-CC00621XX11_ses-177900_L.shape.gii,32.14
91 | 89,sub-CC00621XX11_ses-177900_R.shape.gii,32.14
92 | 90,sub-CC00628XX18_ses-181800_L.shape.gii,34.14
93 | 91,sub-CC00628XX18_ses-181800_R.shape.gii,34.14
94 | 92,sub-CC00629XX19_ses-182000_L.shape.gii,31.57
95 | 93,sub-CC00629XX19_ses-182000_R.shape.gii,31.57
96 | 94,sub-CC00632XX14_ses-183300_L.shape.gii,35.29
97 | 95,sub-CC00632XX14_ses-183300_R.shape.gii,35.29
98 | 96,sub-CC00634AN16_ses-184100_L.shape.gii,29.14
99 | 97,sub-CC00634AN16_ses-184100_R.shape.gii,29.14
100 | 98,sub-CC00648XX22_ses-191100_L.shape.gii,36.14
101 | 99,sub-CC00648XX22_ses-191100_R.shape.gii,36.14
102 | 100,sub-CC00657XX14_ses-193700_L.shape.gii,29.86
103 | 101,sub-CC00657XX14_ses-193700_R.shape.gii,29.86
104 | 102,sub-CC00670XX11_ses-197200_L.shape.gii,36.57
105 | 103,sub-CC00670XX11_ses-197200_R.shape.gii,36.57
106 | 104,sub-CC00672AN13_ses-197601_L.shape.gii,30.0
107 | 105,sub-CC00672AN13_ses-197601_R.shape.gii,30.0
108 |
--------------------------------------------------------------------------------
/labels/dHCP/scan_age/native/means.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/metrics-lab/surface-vision-transformers/377e5dd9fddb59344d673281e28d7bea13c305e3/labels/dHCP/scan_age/native/means.npy
--------------------------------------------------------------------------------
/labels/dHCP/scan_age/native/stds.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/metrics-lab/surface-vision-transformers/377e5dd9fddb59344d673281e28d7bea13c305e3/labels/dHCP/scan_age/native/stds.npy
--------------------------------------------------------------------------------
/labels/dHCP/scan_age/template/means.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/metrics-lab/surface-vision-transformers/377e5dd9fddb59344d673281e28d7bea13c305e3/labels/dHCP/scan_age/template/means.npy
--------------------------------------------------------------------------------
/labels/dHCP/scan_age/template/stds.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/metrics-lab/surface-vision-transformers/377e5dd9fddb59344d673281e28d7bea13c305e3/labels/dHCP/scan_age/template/stds.npy
--------------------------------------------------------------------------------
/labels/dHCP/scan_age/train.csv:
--------------------------------------------------------------------------------
1 | ids,labels
2 | CC00749XX24_2600,44.0
3 | CC00818XX18_4020,42.57
4 | CC00815XX15_4120,42.57
5 | CC00766XX16_4200,42.0
6 | CC00843XX19_4330,40.71
7 | CC00850XX09_4930,40.86
8 | CC00860XX11_7030,40.71
9 | CC00879XX22_7430,43.71
10 | CC00051XX02_7702,40.0
11 | CC00765XX15_8210,42.14
12 | CC00052XX03_8300,38.71
13 | CC00053XX04_8607,40.43
14 | CC00054XX05_8800,42.14
15 | CC00055XX06_9300,40.43
16 | CC00056XX07_10700,41.14
17 | CC00057XX08_11002,41.29
18 | CC00757XX15_12010,43.29
19 | CC00865XX16_12330,41.71
20 | CC00060XX03_12501,40.29
21 | CC00882XX17_13030,42.14
22 | CC00061XX04_13300,41.43
23 | CC00890XX17_13330,43.71
24 | CC00062XX05_13801,41.57
25 | CC00856XX15_15330,44.29
26 | CC00822XX14_15710,40.57
27 | CC00868XX19_16530,42.43
28 | CC00875XX18_16630,43.0
29 | CC00884XX19_17230,43.43
30 | CC00876XX19_17931,41.14
31 | CC00886XX21_18030,40.71
32 | CC00064XX07_18303,38.86
33 | CC00915XX16_18530,40.14
34 | CC00065XX08_18600,40.71
35 | CC00911XX12_18830,40.29
36 | CC00066XX09_19200,38.43
37 | CC00067XX10_20200,40.14
38 | CC00881XX16_20230,42.57
39 | CC00068XX11_20701,40.71
40 | CC00861XX12_20830,44.0
41 | CC00840XX16_24910,40.86
42 | CC00069XX12_26300,39.57
43 | CC00846XX22_26710,43.29
44 | CC00071XX06_27000,40.43
45 | CC00791XX17_27611,43.0
46 | CC00073XX08_27800,41.0
47 | CC00074XX09_28000,39.86
48 | CC00852XX11_28210,43.43
49 | CC00075XX10_28400,38.71
50 | CC00810XX10_29010,40.71
51 | CC00801XX09_29510,44.29
52 | CC00078XX13_29900,40.71
53 | CC00079XX14_30000,41.57
54 | CC00948XX25_30030,43.29
55 | CC00914XX15_30130,43.29
56 | CC00080XX07_30300,39.43
57 | CC00082XX09_30700,39.71
58 | CC00083XX10_30900,42.29
59 | CC00084XX11_31201,40.86
60 | CC00085XX12_31400,41.86
61 | CC00086XX13_31500,40.86
62 | CC00088XX15_31801,40.86
63 | CC00089XX16_32101,41.29
64 | CC00858XX17_32210,43.71
65 | CC00091XX10_32300,41.57
66 | CC00094BN13_33301,38.14
67 | CC00094AN13_33500,38.14
68 | CC00095XX14_33501,38.71
69 | CC00096XX15_33700,39.86
70 | CC00099BN18_34201,37.71
71 | CC00863XX14_34810,43.57
72 | CC00100XX01_35000,40.0
73 | CC00101XX02_35001,40.86
74 | CC00102XX03_35200,39.14
75 | CC00104XX05_35800,41.71
76 | CC00105XX06_35801,38.57
77 | CC00106XX07_36100,40.43
78 | CC00939XX24_36230,39.57
79 | CC00107XX08_36300,41.29
80 | CC00924XX17_36330,40.57
81 | CC00108XX09_36800,42.57
82 | CC00111XX04_37002,40.86
83 | CC00908XX17_37030,42.43
84 | CC00114XX07_37100,39.43
85 | CC00113XX06_37200,41.71
86 | CC00928XX21_37430,39.71
87 | CC00956XX16_37730,41.29
88 | CC00115XX08_37801,40.29
89 | CC00116XX09_38001,39.43
90 | CC00117XX10_38200,42.14
91 | CC00824XX16_38310,41.57
92 | CC00871XX14_38810,41.71
93 | CC00917XX18_39130,43.43
94 | CC00119XX12_39400,43.71
95 | CC00816XX16_40010,42.43
96 | CC00958XX18_40630,43.43
97 | CC00120XX05_41600,40.14
98 | CC00122XX07_42000,38.29
99 | CC00126XX11_43100,38.29
100 | CC00957XX17_43130,43.43
101 | CC00127XX12_43200,41.86
102 | CC00947XX24_43430,43.29
103 | CC00909XX18_43730,42.29
104 | CC00930XX15_43930,42.0
105 | CC00130XX07_44001,43.43
106 | CC00131XX08_44200,39.14
107 | CC00961XX13_44630,43.71
108 | CC00971XX15_45431,40.71
109 | CC00138XX15_46200,41.57
110 | CC00925XX18_46230,43.43
111 | CC00143AN12_47501,38.29
112 | CC00143BN12_47600,38.29
113 | CC00938XX23_47931,40.71
114 | CC00145XX14_48100,38.71
115 | CC00146XX15_48400,40.71
116 | CC00923XX16_48430,43.43
117 | CC00149XX18_49000,41.29
118 | CC00150BN02_49100,37.71
119 | CC00153XX05_50300,41.57
120 | CC00155XX07_51300,39.57
121 | CC01004XX06_51630,41.14
122 | CC00157XX09_51900,39.43
123 | CC00158XX10_52200,40.29
124 | CC00159XX11_52600,41.43
125 | CC00160XX04_52700,39.43
126 | CC00163XX07_53701,40.86
127 | CC00164XX08_54000,38.86
128 | CC00165XX09_54600,41.14
129 | CC00150AN02_54800,41.0
130 | CC00168XX12_55700,43.86
131 | CC00170XX06_56100,38.43
132 | CC00171XX07_56300,40.43
133 | CC00172AN08_56901,37.86
134 | CC00174XX10_57200,41.43
135 | CC00172BN08_57300,38.0
136 | CC00176XX12_57600,43.57
137 | CC00967XX19_58330,41.57
138 | CC00178XX14_58600,41.0
139 | CC00179XX15_58800,40.57
140 | CC00180XX08_59500,42.29
141 | CC01014XX08_59530,41.86
142 | CC00181XX09_60000,40.86
143 | CC00182XX10_60200,41.86
144 | CC00183XX11_60300,41.14
145 | CC00184XX12_60501,44.71
146 | CC00185XX13_60800,40.71
147 | CC00992XX20_62530,41.57
148 | CC01074XX11_63930,44.43
149 | CC00976XX20_64331,43.29
150 | CC00193XX13_64400,42.0
151 | CC00987XX23_66031,43.57
152 | CC00197XX17_66500,43.14
153 | CC00198XX18_66600,40.86
154 | CC00199XX19_66700,41.0
155 | CC00200XX02_67204,44.43
156 | CC00201XX03_67600,44.14
157 | CC01007XX09_67630,40.86
158 | CC00202XX04_67800,39.29
159 | CC01082XX11_68830,41.43
160 | CC01053XX06_69230,43.29
161 | CC00203XX05_69500,40.86
162 | CC00204XX06_69900,38.86
163 | CC00205XX07_70000,39.71
164 | CC00206XX08_70100,40.86
165 | CC00990XX18_70230,43.14
166 | CC00209XX11_70400,42.86
167 | CC01044XX14_72531,43.0
168 | CC01020XX06_72930,38.57
169 | CC00217XX11_73700,41.29
170 | CC01023XX09_74030,43.86
171 | CC01042XX12_74431,43.14
172 | CC00221XX07_75000,39.43
173 | CC01093AN14_75130,43.86
174 | CC01093BN14_75230,43.86
175 | CC00223XX09_75900,40.43
176 | CC01032XX10_77330,43.0
177 | CC01037XX15_77430,41.0
178 | CC01055XX08_77830,43.29
179 | CC00236XX14_78700,38.14
180 | CC01019XX13_79330,42.0
181 | CC01022XX08_79430,42.0
182 | CC01041XX11_81131,44.0
183 | CC01050XX03_82330,43.43
184 | CC01027XX13_82630,43.71
185 | CC00247XX17_82801,40.14
186 | CC00250XX03_83700,40.71
187 | CC00251XX04_83800,43.57
188 | CC00252XX05_84000,43.14
189 | CC00254XX07_84300,39.57
190 | CC01117XX12_84730,42.71
191 | CC00258XX11_84900,42.86
192 | CC00260XX05_85300,39.71
193 | CC01070XX07_86430,41.43
194 | CC00219XX13_86600,42.29
195 | CC00265XX10_86901,40.43
196 | CC00267XX12_87700,44.14
197 | CC00268XX13_87800,39.29
198 | CC00269XX14_88300,40.86
199 | CC00270XX07_88600,41.86
200 | CC00207XX09_88700,39.14
201 | CC00286XX15_91700,44.43
202 | CC00291XX12_93100,40.29
203 | CC00292XX13_93800,43.71
204 | CC00298XX19_94700,42.0
205 | CC00300XX03_96000,41.0
206 | CC00303XX06_96900,43.43
207 | CC00306XX09_98700,40.29
208 | CC00307XX10_98800,43.57
209 | CC00308XX11_98900,37.43
210 | CC01105XX08_99330,44.43
211 | CC01087XX16_99430,42.43
212 | CC00313XX08_100000,39.14
213 | CC00314XX09_100101,39.43
214 | CC01086XX15_100430,41.29
215 | CC00316XX11_101300,44.43
216 | CC00320XX07_102300,40.29
217 | CC00325XX12_103900,41.29
218 | CC00328XX15_104800,42.29
219 | CC00329XX16_105400,39.43
220 | CC00332XX11_105700,41.57
221 | CC00334XX13_106100,38.71
222 | CC00337XX16_107000,39.29
223 | CC00339XX18_107200,41.14
224 | CC00338BN17_107600,37.71
225 | CC00338AN17_107700,37.71
226 | CC00341XX12_108000,39.29
227 | CC00343XX14_108500,42.0
228 | CC00344XX15_108600,39.43
229 | CC00347XX18_109600,41.43
230 | CC00348XX19_110200,41.71
231 | CC00349XX20_110300,42.43
232 | CC00352XX06_110700,43.86
233 | CC00353XX07_111000,38.43
234 | CC00324XX11_111200,39.43
235 | CC00304XX07_111600,41.14
236 | CC00356XX10_112901,40.43
237 | CC00302XX05_113500,40.86
238 | CC00357XX11_113900,41.71
239 | CC00362XX08_114500,40.29
240 | CC00363XX09_114900,39.14
241 | CC00364XX10_115200,41.29
242 | CC00367XX13_116000,39.86
243 | CC00366XX12_116300,41.43
244 | CC00368XX14_116600,41.14
245 | CC00319XX14_117300,41.71
246 | CC00376XX14_118400,40.57
247 | CC00289XX18_119700,39.43
248 | CC00377XX15_119800,40.71
249 | CC00378XX16_120200,39.57
250 | CC00379XX17_120400,40.71
251 | CC00380XX10_121200,41.43
252 | CC00382XX12_121700,43.0
253 | CC00383XX13_121800,41.71
254 | CC00397XX19_122700,41.86
255 | CC00398XX20_123100,39.0
256 | CC00400XX04_123700,40.29
257 | CC00401XX05_123900,39.29
258 | CC00402XX06_124300,40.43
259 | CC00403XX07_124400,38.14
260 | CC00404XX08_124500,37.57
261 | CC00405XX09_124900,40.43
262 | CC00408XX12_125500,41.0
263 | CC00409XX13_125600,41.0
264 | CC00410XX06_125800,41.14
265 | CC00421AN09_126000,38.14
266 | CC00421BN09_126100,38.14
267 | CC00411XX07_126200,40.57
268 | CC00412XX08_126301,38.0
269 | CC00413XX09_127100,39.29
270 | CC00415XX11_127400,39.0
271 | CC00434AN14_127700,38.86
272 | CC00417XX13_129000,41.29
273 | CC00424XX12_129400,41.29
274 | CC00425XX13_129800,42.71
275 | CC00426XX14_129900,39.71
276 | CC01191XX13_130330,41.29
277 | CC00428XX16_130400,40.86
278 | CC00431XX11_131500,43.86
279 | CC00432XX12_131800,38.86
280 | CC00433XX13_132000,42.43
281 | CC00439XX19_132100,42.0
282 | CC00440XX12_132200,41.43
283 | CC01176XX14_132530,40.14
284 | CC00442XX14_133300,41.57
285 | CC00443XX15_133900,40.43
286 | CC00462XX09_134100,40.57
287 | CC00371XX09_134700,42.57
288 | CC00445XX17_134800,38.57
289 | CC00447XX19_135600,39.71
290 | CC00448XX20_135800,42.14
291 | CC00450XX05_136100,38.43
292 | CC00453XX08_136600,39.71
293 | CC00455XX10_137700,42.0
294 | CC00466BN13_138300,37.86
295 | CC00466AN13_138400,37.86
296 | CC00457XX12_138601,41.43
297 | CC00458XX13_138900,41.29
298 | CC00468XX15_139100,41.86
299 | CC00469XX16_139200,41.43
300 | CC00470XX09_139600,42.0
301 | CC00472XX11_140000,41.43
302 | CC00473XX12_140100,39.14
303 | CC00474XX13_140500,40.29
304 | CC01198XX20_140930,42.0
305 | CC01199XX21_141130,40.14
306 | CC00475XX14_141400,41.71
307 | CC00476XX15_141500,44.29
308 | CC00477XX16_141600,41.14
309 | CC00478XX17_141601,39.86
310 | CC00479XX18_141608,39.43
311 | CC00480XX11_141609,44.0
312 | CC01201XX05_141630,41.29
313 | CC00481XX12_141800,41.57
314 | CC00482XX13_142000,41.14
315 | CC00370XX08_142400,40.86
316 | CC00484XX15_142700,41.29
317 | CC01190XX12_143030,42.14
318 | CC00485XX16_143100,42.29
319 | CC01206XX10_143530,43.86
320 | CC00483XX14_144200,41.0
321 | CC00486XX17_144300,40.71
322 | CC00497XX20_144500,41.29
323 | CC00498XX21_144900,42.29
324 | CC00499XX22_145800,40.43
325 | CC00500XX05_145900,43.0
326 | CC01211XX07_145930,38.29
327 | CC01215XX11_146331,42.0
328 | CC00501XX06_146500,40.57
329 | CC00502XX07_146700,40.0
330 | CC00504XX09_146800,40.57
331 | CC00505XX10_146900,40.86
332 | CC01192XX14_147330,43.57
333 | CC00507XX12_148202,40.14
334 | CC00508XX13_148700,42.0
335 | CC01220XX08_148731,39.71
336 | CC00509XX14_148800,40.29
337 | CC00802XX10_1000,32.29
338 | CC00889AN24_2220,34.86
339 | CC00889BN24_2320,34.86
340 | CC00907XX16_4230,34.71
341 | CC00919XX20_7830,31.86
342 | CC00823XX15_15810,33.29
343 | CC00829XX21_17610,33.14
344 | CC00830XX14_18910,31.14
345 | CC00955XX15_21030,31.0
346 | CC00834XX18_21210,26.86
347 | CC00964XX16_21430,30.43
348 | CC00979XX23_25330,33.0
349 | CC00845AN21_26510,35.57
350 | CC00986AN22_26730,35.43
351 | CC00986BN22_26830,35.43
352 | CC00855XX14_30210,33.57
353 | CC00087AN14_31800,36.14
354 | CC00087BN14_32100,36.71
355 | CC00997BN25_33630,31.71
356 | CC00998BN26_33930,34.29
357 | CC00998AN26_34030,34.29
358 | CC01005XX07_36930,35.0
359 | CC00867XX18_37111,28.29
360 | CC01006XX08_38531,30.57
361 | CC01011XX05_42230,35.14
362 | CC00124XX09_42302,34.43
363 | CC00129AN14_43500,36.86
364 | CC00135BN12_44704,35.0
365 | CC00135AN12_44900,35.14
366 | CC00136BN13_45000,33.29
367 | CC00136AN13_45100,33.29
368 | CC01018XX12_46130,33.57
369 | CC00147XX16_48500,35.14
370 | CC00154XX06_50700,34.0
371 | CC01038XX16_52230,32.43
372 | CC00161XX05_53100,36.71
373 | CC01045XX15_54630,34.0
374 | CC00177XX13_58500,34.86
375 | CC00191XX11_65200,34.71
376 | CC01077XX14_65230,31.14
377 | CC01080XX09_68030,31.29
378 | CC00216AN10_73100,36.86
379 | CC00216BN10_73200,36.86
380 | CC00218BN12_74800,34.86
381 | CC00238AN16_80300,36.71
382 | CC00238BN16_80400,36.71
383 | CC00245BN15_82600,35.14
384 | CC01116AN11_84230,35.43
385 | CC01116BN11_84330,35.43
386 | CC00259XX12_85100,36.86
387 | CC00281AN10_90400,36.86
388 | CC00284BN13_90801,32.29
389 | CC00301XX04_96400,32.71
390 | CC00293AN14_97401,34.0
391 | CC00293BN14_97500,34.0
392 | CC00305XX08_98101,32.71
393 | CC01129XX16_101430,35.14
394 | CC01153AN07_102030,33.71
395 | CC00326XX13_104100,35.57
396 | CC00361XX07_111700,35.57
397 | CC00385XX15_118500,35.71
398 | CC00389XX19_119100,29.86
399 | CC00406XX10_122900,36.43
400 | CC00407AN11_124000,35.57
401 | CC00407BN11_124100,35.57
402 | CC00418AN14_125200,36.86
403 | CC00489XX20_138800,35.71
404 | CC00492BN15_140800,36.57
405 | CC00492AN15_140900,36.57
406 | CC01208XX12_144731,34.29
407 | CC00517XX14_145000,36.29
408 | CC01212XX08_146030,32.43
409 | CC01218XX14_147430,35.43
410 | CC01222XX10_149231,36.71
411 | CC00526XX15_150500,32.43
412 | CC00525XX14_150600,33.57
413 | CC00529BN18_151200,33.29
414 | CC00529AN18_151300,33.29
415 | CC01232BN12_152130,35.43
416 | CC01232AN12_152530,35.57
417 | CC00563XX11_153900,35.29
418 | CC01234AN14_155030,33.29
419 | CC01234BN14_155230,33.43
420 | CC01229XX17_155630,35.86
421 | CC00569XX17_158300,35.43
422 | CC00570XX10_158900,35.86
423 | CC00578BN18_164800,32.57
424 | CC00578AN18_164900,32.57
425 |
--------------------------------------------------------------------------------
/labels/dHCP/scan_age/val.csv:
--------------------------------------------------------------------------------
1 | ids,labels
2 | CC00511XX08_149000,41.0
3 | CC01223XX11_149330,40.86
4 | CC01194XX16_149530,43.86
5 | CC00520XX09_150201,38.29
6 | CC01207XX11_150330,43.0
7 | CC00512XX09_150700,39.14
8 | CC00515XX12_152600,38.86
9 | CC00516XX13_152902,40.57
10 | CC00564XX12_154100,41.86
11 | CC00532XX13_154200,41.43
12 | CC01200XX04_154330,41.43
13 | CC00534XX15_155100,39.14
14 | CC01236XX16_155830,44.43
15 | CC00438XX18_156800,43.43
16 | CC00536XX17_156900,42.71
17 | CC00537XX18_157100,39.0
18 | CC00538XX19_157200,40.43
19 | CC00547XX20_157300,39.0
20 | CC00548XX21_157400,40.71
21 | CC00549XX22_157600,42.14
22 | CC00550XX06_157800,40.0
23 | CC00551XX07_158001,39.71
24 | CC00553XX09_159000,41.57
25 | CC00552XX08_159300,39.14
26 | CC00554XX10_160200,41.14
27 | CC00556XX12_162902,41.57
28 | CC00540XX13_164400,41.29
29 | CC00542XX15_165800,42.71
30 | CC00544XX17_169300,41.86
31 | CC00580XX12_173700,41.43
32 | CC00461XX08_175100,43.86
33 | CC00581XX13_177101,39.86
34 | CC00582XX14_178300,39.0
35 | CC00583XX15_178600,42.0
36 | CC00584XX16_178800,42.0
37 | CC00585XX17_178900,39.71
38 | CC00586XX18_179000,40.29
39 | CC00607XX13_179300,42.43
40 | CC00587XX19_180300,38.57
41 | CC00613XX11_181601,40.57
42 | CC00528XX17_183200,43.71
43 | CC00588XX20_183600,41.86
44 | CC00617XX15_176500,34.14
45 | CC00618XX16_177201,29.86
46 | CC00621XX11_177900,32.14
47 | CC00628XX18_181800,34.14
48 | CC00629XX19_182000,31.57
49 | CC00632XX14_183300,35.29
50 | CC00634AN16_184100,29.14
51 | CC00648XX22_191100,36.14
52 | CC00657XX14_193700,29.86
53 | CC00670XX11_197200,36.57
54 | CC00672AN13_197601,30.0
55 |
--------------------------------------------------------------------------------
/models/mpp.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # @Author: Your name
3 | # @Date: 1970-01-01 01:00:00
4 | # @Last Modified by: Your name
5 | # @Last Modified time: 2022-02-14 17:50:22
6 | #
7 | # Created on Mon Oct 18 2021
8 | #
9 | # by Simon Dahan @SD3004
10 | #
11 | # Copyright (c) 2021 MeTrICS Lab
12 | #
13 |
14 |
15 | import math
16 | from random import random
17 |
18 | import torch
19 | import torch.nn as nn
20 | import torch.nn.functional as F
21 |
22 | from einops import rearrange, repeat
23 |
24 |
25 | def get_mask_from_prob(inputs, prob):
26 | '''
27 | This function creates a mask on the sequence of tokens, per sample
28 | Based on the probability of masking.
29 | return: a boolean mask of the shape of the inputs.
30 | '''
31 | batch, seq_len, _, device = *inputs.shape, inputs.device
32 | max_masked = math.ceil(prob * seq_len)
33 |
34 | rand = torch.rand((batch, seq_len), device=device)
35 | _, sampled_indices = rand.topk(max_masked, dim=-1)
36 |
37 | new_mask = torch.zeros((batch, seq_len), device=device)
38 | new_mask.scatter_(1, sampled_indices, 1)
39 | return new_mask.bool()
40 |
41 | def prob_mask_like(inputs, prob):
42 | batch, seq_length, _ = inputs.shape
43 | return torch.zeros((batch, seq_length)).float().uniform_(0, 1) < prob
44 |
45 |
46 | class masked_patch_pretraining(nn.Module):
47 |
48 | def __init__(
49 | self,
50 | transformer,
51 | dim_in,
52 | dim_out,
53 | device,
54 | mask_prob=0.15,
55 | replace_prob=0.5,
56 | swap_prob=0.3,
57 | channels=4,
58 | num_vertices=561,):
59 |
60 | super().__init__()
61 | self.transformer = transformer
62 |
63 | self.dim_out = dim_out
64 | self.dim_in = dim_in
65 |
66 | self.to_original = nn.Linear(dim_in,dim_out)
67 | self.to_original.to(device)
68 |
69 | self.mask_prob = mask_prob
70 | self.replace_prob = replace_prob
71 | self.swap_prob = swap_prob
72 |
73 | # token ids
74 | self.mask_token = nn.Parameter(torch.randn(1, 1, channels * num_vertices))
75 |
76 |
77 | def forward(self, batch, **kwargs):
78 |
79 | transformer = self.transformer
80 |
81 | # clone original image for loss
82 | batch = rearrange(batch,
83 | 'b c n v -> b n (v c)')
84 |
85 | corrupted_sequence = get_mask_from_prob(batch, self.mask_prob)
86 |
87 | corrupted_batch = batch.clone().detach()
88 |
89 | #randomly swap patches in the sequence
90 | if self.swap_prob > 0:
91 | random_patch_sampling_prob = self.swap_prob / (
92 | 1 - self.replace_prob)
93 |
94 | random_patch_prob = prob_mask_like(batch,
95 | random_patch_sampling_prob).to(corrupted_sequence.device)
96 |
97 | bool_random_patch_prob = corrupted_sequence * (random_patch_prob == True)
98 |
99 | random_patches = torch.randint(0,
100 | batch.shape[1],
101 | (batch.shape[0], batch.shape[1]),
102 | device=batch.device)
103 | #shuffle entierely masked_batch
104 | randomized_input = corrupted_batch[
105 | torch.arange(corrupted_batch.shape[0]).unsqueeze(-1),
106 | random_patches]
107 | corrupted_batch[bool_random_patch_prob] = randomized_input[bool_random_patch_prob]
108 |
109 | tokens_to_mask = prob_mask_like(batch, self.replace_prob).to(corrupted_sequence.device)
110 |
111 | bool_mask_replace = (corrupted_sequence * tokens_to_mask) == True
112 | corrupted_batch[bool_mask_replace] = self.mask_token.to(corrupted_sequence.device)
113 |
114 | # linear embedding of patches
115 | corrupted_batch = transformer.to_patch_embedding[-1](corrupted_batch)
116 | emb_masked_sequence = corrupted_batch.clone().detach()
117 |
118 | # add cls token to input sequence
119 | b, n, _ = corrupted_batch.shape
120 | cls_tokens = repeat(transformer.cls_token, '() n d -> b n d', b=b)
121 | corrupted_batch = torch.cat((cls_tokens, corrupted_batch), dim=1)
122 |
123 | # add positional embeddings to input
124 | corrupted_batch += transformer.pos_embedding[:, :(n + 1)]
125 | corrupted_batch = transformer.dropout(corrupted_batch)
126 |
127 | # get generator output and get mpp loss
128 | batch_out = transformer.transformer(corrupted_batch, **kwargs)
129 | batch_out = self.to_original(batch_out[:,1:,:])
130 |
131 | # compute loss
132 | mpp_loss = F.mse_loss(batch_out[corrupted_sequence], batch[corrupted_sequence])
133 |
134 | return mpp_loss, batch_out
135 |
136 |
--------------------------------------------------------------------------------
/models/ms_sit.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 |
4 | from timm.models.layers import DropPath, to_2tuple, trunc_normal_
5 | from einops.layers.torch import Rearrange
6 |
7 | from einops import repeat
8 |
9 |
10 | '''
11 | This implementation of SwinTransformer is greatly inspired by the official codebase: https://github.com/microsoft/Swin-Transformer
12 | and adapted for surface analysis on icospheric meshes.
13 | '''
14 |
15 | #OK
16 |
17 | #TODO - DROPATH RATE TO IMPLEMENT
18 | class MSSiT(nn.Module):
19 | """
20 | Args:
21 | window_size (int): Number of patches to apply local-attention to.
22 | """
23 |
24 |
25 | def __init__(self,ico_init_resolution=4,num_channels=4,num_classes=1,
26 | embed_dim=96, depths=[2,2,6,2],num_heads=[3,6,12,24],
27 | window_size=80,mlp_ratio=4,qkv_bias=True,qk_scale=True,
28 | dropout=0, attention_dropout=0,drop_path_rate=0.1,
29 | norm_layer=nn.LayerNorm, use_pos_emb=False,patch_norm=True,
30 | use_confounds =False,**kwargs):
31 | super().__init__()
32 |
33 | self.num_classes = num_classes
34 | self.num_layers = len(depths)
35 | self.embed_dim = embed_dim
36 | self.use_pos_emb = use_pos_emb
37 | self.patch_norm = patch_norm
38 | self.num_features = int(embed_dim * 2 ** (self.num_layers-1)) #number of features after the last layer
39 | self.mlp_ratio = mlp_ratio
40 | self.num_channels = num_channels
41 |
42 | if isinstance(window_size,int):
43 | self.window_sizes = [window_size for i in range(self.num_layers)]
44 | elif isinstance(window_size,list):
45 | self.window_sizes = window_size
46 |
47 | print('window size: {}'.format(self.window_sizes))
48 |
49 | if ico_init_resolution==4:
50 | self.num_patches = 5120
51 | self.num_vertices = 15
52 | patch_dim = self.num_vertices * self.num_channels
53 | elif ico_init_resolution==5:
54 | self.num_patches = 20480
55 | self.num_vertices = 6
56 | patch_dim = self.num_vertices * self.num_channels
57 | elif ico_init_resolution==2:
58 | self.num_patches = 320
59 | self.num_vertices = 153
60 | patch_dim = self.num_vertices * self.num_channels
61 |
62 | # absolute position embedding
63 | if use_pos_emb:
64 | self.absolute_pos_embed = nn.Parameter(torch.zeros(1, self.num_patches, self.embed_dim))
65 | trunc_normal_(self.absolute_pos_embed, std=.02)
66 |
67 | #need another version with conv1d
68 | self.to_patch_embedding = nn.Sequential(
69 | Rearrange('b c n v -> b n (v c)'),
70 | nn.Linear(patch_dim, self.embed_dim),
71 | )
72 |
73 | if use_confounds:
74 | self.proj_confound = nn.Sequential(
75 | nn.BatchNorm1d(1),
76 | nn.Linear(1,embed_dim))
77 | self.use_confounds = use_confounds
78 |
79 | self.pos_dropout = nn.Dropout(p=dropout)
80 |
81 | # stochastic depth
82 | dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
83 |
84 | self.layers = nn.ModuleList()
85 | for i_layer in range(self.num_layers):
86 | layer = BasicLayer(hidden_dim=int(embed_dim * 2 ** i_layer),
87 | input_resolution=ico_init_resolution,
88 | depth= depths[i_layer],
89 | num_heads=num_heads[i_layer],
90 | window_size=self.window_sizes[i_layer],
91 | mlp_ratio =mlp_ratio,
92 | qkv_bias=qkv_bias, qk_scale=qk_scale,
93 | drop = dropout, attn_drop=attention_dropout,
94 | drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],
95 | norm_layer=norm_layer,
96 | downsample=PatchMerging if (i_layer < self.num_layers -1) else None,
97 | )
98 | self.layers.append(layer)
99 |
100 | self.pre_norm = norm_layer(self.embed_dim)
101 | self.norm = norm_layer(self.num_features)
102 | self.avgpool = nn.AdaptiveAvgPool1d(1)
103 | self.head = nn.Linear(self.num_features,num_classes) if num_classes > 0 else nn.Identity()
104 |
105 | self.apply(self._init_weights)
106 |
107 | ## normalise patches
108 |
109 | def _init_weights(self, m):
110 |
111 | if isinstance(m, nn.Linear):
112 | trunc_normal_(m.weight, std=.02)
113 | if isinstance(m, nn.Linear) and m.bias is not None:
114 | nn.init.constant_(m.bias, 0)
115 | elif isinstance(m, nn.LayerNorm):
116 | nn.init.constant_(m.bias, 0)
117 | nn.init.constant_(m.weight, 1.0)
118 |
119 |
120 | def forward_features(self,x,confounds=None):
121 | # input: B,C,L,V
122 | x = self.to_patch_embedding(x) # B, L, embed_dim=C
123 | b, n, _ = x.shape
124 |
125 | x = self.pre_norm(x)
126 |
127 | if self.use_pos_emb:
128 | x += self.absolute_pos_embed
129 | # deconfounding technique
130 | if self.use_confounds and (confounds is not None):
131 | confounds = self.proj_confound(confounds.view(-1,1))
132 | confounds = repeat(confounds, 'b d -> b n d', n=n)
133 | x += confounds
134 | x = self.pos_dropout(x)
135 |
136 | att_list_encoder= []
137 |
138 | #import pdb;pdb.set_trace()
139 |
140 | for i, layer in enumerate(self.layers):
141 | #if i==0:
142 | # x, att = layer(x,return_attention=True)
143 | #else:
144 | # x = layer(x,return_attention=False)
145 | x, att = layer(x,return_attention=True)
146 | att_list_encoder.append(att)
147 |
148 |
149 | x = self.norm(x) # B,L,C=int(embed_dim * 2 ** (num_layer-1))
150 | x = self.avgpool(x.transpose(1,2)) # B,C,1
151 | x = torch.flatten(x,1) # B,C
152 | return x, att_list_encoder
153 |
154 | def forward(self,x,confounds=None):
155 | # input: B,C,L,V
156 | x, att = self.forward_features(x,confounds) #B,int(embed_dim * 2 ** i_layer)
157 | x = self.head(x) #B, num_classes
158 | #return x, att
159 | return x
160 |
161 |
162 | #OK
163 | class BasicLayer(nn.Module):
164 |
165 | """
166 | Basic Swin Transformer layer for one stage
167 | """
168 |
169 | def __init__(self,hidden_dim, input_resolution, depth, num_heads,window_size,
170 | mlp_ratio=4, qkv_bias=True, qk_scale=None, drop=0, attn_drop=0,
171 | drop_path=0., norm_layer=nn.LayerNorm, downsample=None):
172 | super().__init__()
173 |
174 | self.hidden_dim = hidden_dim
175 | self.input_resolution = input_resolution
176 | self.depth = depth
177 | #build blocks
178 | self.blocks = nn.ModuleList([
179 | SwinTransformerBlock(hidden_dim=hidden_dim, input_resolution=input_resolution,
180 | num_heads=num_heads, window_size=window_size,
181 | mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
182 | drop=drop,attn_drop=attn_drop,
183 | drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
184 | norm_layer=norm_layer)
185 | for i in range(depth)
186 | ])
187 |
188 | #merging layer
189 | if downsample is not None:
190 | self.downsample = downsample(hidden_dim=hidden_dim, norm_layer= norm_layer)
191 | else:
192 | self.downsample = None
193 |
194 | def forward(self,x, return_attention=False):
195 | att_list = []
196 | for i, block in enumerate(self.blocks):
197 | if return_attention:
198 | x, att = block(x,return_attention)
199 | att_list.append(att)
200 | else:
201 | x = block(x,return_attention=False)
202 |
203 | if self.downsample is not None:
204 | x = self.downsample(x)
205 |
206 | if return_attention:
207 | return x, att_list
208 | else:
209 | return x
210 |
211 |
212 |
213 | class MLP(nn.Module):
214 | '''
215 | similar as https://github.com/microsoft/Swin-Transformer/blob/main/models/swin_transformer.py
216 | '''
217 | def __init__(self,in_features, hidden_features=None, out_features=None, act_layer = nn.GELU, drop=0.):
218 | super().__init__()
219 |
220 | out_features = out_features or in_features
221 | hidden_features = hidden_features or in_features
222 | self.fc1 = nn.Linear(in_features,hidden_features)
223 | self.act = act_layer()
224 | self.fc2 = nn.Linear(hidden_features, out_features)
225 | self.drop = nn.Dropout(p=drop)
226 |
227 | def forward(self,x):
228 | x=self.fc1(x)
229 | x=self.act(x)
230 | x=self.drop(x)
231 | x=self.fc2(x)
232 | x=self.drop(x)
233 |
234 | return x
235 |
236 | class SwinTransformerBlock(nn.Module):
237 |
238 | """
239 | Swin Transformer basic block
240 | """
241 |
242 | def __init__(self,hidden_dim, input_resolution, num_heads, window_size=80, shift_size=0,
243 | mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., drop_path=0.,
244 | act_layer=nn.GELU, norm_layer=nn.LayerNorm,):
245 | super().__init__()
246 |
247 | self.hidden_dim = hidden_dim
248 | self.input_resolution = input_resolution
249 | self.num_heads = num_heads
250 | self.window_size = window_size
251 | self.mlp_ratio = mlp_ratio
252 |
253 |
254 | self.norm1 = norm_layer(hidden_dim)
255 | #attention
256 | self.attention = WindowAttention(hidden_dim=hidden_dim,window_size=window_size,num_heads=num_heads,
257 | qkv_bias=qkv_bias,qk_scale=qk_scale,attn_drop=attn_drop,proj_drop=drop)
258 |
259 | self.drop_path = DropPath(drop_path) if drop_path >0 else nn.Identity()
260 | self.norm2 = norm_layer(hidden_dim)
261 | mlp_hidden_dim = int(hidden_dim*mlp_ratio)
262 | self.mlp = MLP(in_features=hidden_dim,hidden_features=mlp_hidden_dim,act_layer=act_layer,drop=drop)
263 |
264 |
265 | def forward(self,x, return_attention=False):
266 |
267 | B,L,C = x.shape
268 |
269 | shortcut = x
270 | x = self.norm1(x)
271 |
272 | x_windows = window_partition(x,self.window_size)
273 |
274 | #attention
275 |
276 | attention_windows, attention_matrix = self.attention(x_windows)
277 |
278 | x = window_reverse(attention_windows,self.window_size,L)
279 |
280 | x = shortcut + self.drop_path(x)
281 |
282 | # FFN
283 |
284 | x = x + self.drop_path(self.mlp(self.norm2(x)))
285 |
286 | if return_attention:
287 | return x, attention_matrix
288 | else:
289 | return x
290 |
291 | #OK
292 |
293 | #OK
294 | def window_partition(x,window_size):
295 | """
296 | Args:
297 | x: (B,L,C)
298 | window_size (int): window_size
299 |
300 | Returns:
301 | windows: (num_windows*B, window_size,C)
302 |
303 | """
304 | B,L,C = x.shape
305 | x = x.view(B,L//window_size, window_size,C)
306 | windows = x.permute(0,2,1,3).contiguous().view(-1,window_size,C)
307 |
308 | return windows
309 |
310 | #OK
311 | def window_reverse(windows,window_size,L):
312 |
313 | B = int(windows.shape[0] / (L//window_size))
314 | x = windows.view(B, L//window_size, window_size,-1)
315 | x = x.contiguous().view(B,L,-1)
316 |
317 | return x
318 |
319 | #OK
320 |
321 | #OK need to add relative position bias [done]
322 |
323 | class WindowAttention(nn.Module):
324 |
325 | """
326 | Args:
327 | x: input features with the sahpe of (num_windows*B, N, C)
328 | """
329 |
330 |
331 | def __init__(self,hidden_dim,window_size, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.):
332 |
333 | super().__init__()
334 |
335 | self.hidden_dim = hidden_dim
336 | self.window_size = window_size
337 | self.num_heads = num_heads
338 | head_dim = hidden_dim // num_heads
339 | self.scale = qk_scale or head_dim ** -0.5
340 |
341 | self.qkv = nn.Linear(hidden_dim, hidden_dim * 3, bias=qkv_bias)
342 | self.attn_drop = nn.Dropout(attn_drop)
343 | self.proj = nn.Linear(hidden_dim,hidden_dim)
344 | self.proj_drop = nn.Dropout(proj_drop)
345 |
346 | self.softmax = nn.Softmax(dim=-1)
347 |
348 |
349 | ## relative position bias
350 | self.relative_position_bias_table = nn.Parameter(torch.zeros(2*window_size-1, num_heads))
351 | coords = torch.arange(self.window_size)
352 | relative_coords = coords[:, None] - coords[None, :]
353 | relative_coords += self.window_size - 1
354 | self.register_buffer("relative_position_index", relative_coords)
355 | trunc_normal_(self.relative_position_bias_table, std=.02)
356 |
357 |
358 | def forward(self,x):
359 |
360 | Bw,L,C = x.shape
361 | #print('batch size window: {}'.format(x.shape))
362 |
363 | qkv = self.qkv(x).reshape(Bw, L, 3, self.num_heads, C // self.num_heads).permute(2,0,3,1,4)
364 | q, k, v = qkv[0], qkv[1], qkv[2]
365 |
366 | q = q *self.scale
367 | attention = (q @ k.transpose(-2,-1))
368 |
369 | ## relative position bias
370 | relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(self.window_size, self.window_size, -1)
371 | relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous()
372 | attention = attention + relative_position_bias.unsqueeze(0)
373 |
374 | attention = self.softmax(attention)
375 | #print(attention.shape)
376 | #print(v.shape)
377 | attention = self.attn_drop(attention)
378 |
379 | x = (attention @ v).transpose(1,2).reshape(Bw,L,C)
380 |
381 | #print((attention @ v).transpose(1,2).shape)
382 | #print(x.shape)
383 | #print('*******')
384 | self.proj(x)
385 | self.proj_drop(x)
386 |
387 | return x, attention
388 | class PatchMerging(nn.Module):
389 |
390 | def __init__(self, hidden_dim, norm_layer = nn.LayerNorm):
391 | super().__init__()
392 |
393 | self.hidden_dim = hidden_dim
394 |
395 | #self.merging = Rearrange('b (v h) n -> b v (n h)', h=4) ###### double check it is doing what i expect
396 | self.merging = Rearrange('b (v h) n -> b v (h n)', h=4)
397 |
398 | self.reduction = nn.Linear(4*hidden_dim, 2*hidden_dim,bias=False)
399 | self.norm = norm_layer(4*hidden_dim)
400 |
401 | def forward(self,x):
402 |
403 | #print(x.shape)
404 | #import pdb;pdb.set_trace()
405 |
406 | #x = x.view(B,-1, 4*C)
407 | x = self.merging(x)
408 | #import pdb;pdb.set_trace()
409 | x = self.norm(x)
410 | #import pdb;pdb.set_trace()
411 | x = self.reduction(x)
412 |
413 | return x
414 |
415 | if __name__ == '__main__':
416 |
417 | model = MSSiT()
418 |
419 |
420 |
421 |
422 |
423 |
424 |
425 |
426 |
427 |
428 |
429 |
430 |
431 |
432 |
--------------------------------------------------------------------------------
/models/ms_sit_shifted.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 |
4 | from timm.models.layers import DropPath, to_2tuple, trunc_normal_
5 | from einops.layers.torch import Rearrange
6 |
7 | from einops import repeat
8 |
9 |
10 | '''
11 | This implementation of SwinTransformer is greatly inspired by the official codebase: https://github.com/microsoft/Swin-Transformer
12 | and adapted for surface analysis on icospheric meshes.
13 | '''
14 |
15 | class MLP(nn.Module):
16 | '''
17 | similar as https://github.com/microsoft/Swin-Transformer/blob/main/models/swin_transformer.py
18 | '''
19 | def __init__(self,in_features, hidden_features=None, out_features=None, act_layer = nn.GELU, drop=0.):
20 | super().__init__()
21 |
22 | out_features = out_features or in_features
23 | hidden_features = hidden_features or in_features
24 | self.fc1 = nn.Linear(in_features,hidden_features)
25 | self.act = act_layer()
26 | self.fc2 = nn.Linear(hidden_features, out_features)
27 | self.drop = nn.Dropout(p=drop)
28 |
29 | def forward(self,x):
30 | x=self.fc1(x)
31 | x=self.act(x)
32 | x=self.drop(x)
33 | x=self.fc2(x)
34 | x=self.drop(x)
35 |
36 | return x
37 |
38 | def window_partition(x,window_size):
39 | """
40 | Args:
41 | x: (B,L,C)
42 | window_size (int): window_size
43 |
44 | Returns:
45 | windows: (num_windows*B, window_size,C)
46 |
47 | """
48 | B,L,C = x.shape
49 | x = x.view(B,L//window_size, window_size,C)
50 | windows = x.permute(0,2,1,3).contiguous().view(-1,window_size,C)
51 |
52 | return windows
53 |
54 | def window_reverse(windows,window_size,L):
55 |
56 | B = int(windows.shape[0] / (L//window_size))
57 | x = windows.view(B, L//window_size, window_size,-1)
58 | x = x.contiguous().view(B,L,-1)
59 |
60 | return x
61 |
62 |
63 | class WindowAttention(nn.Module):
64 |
65 | """
66 | Args:
67 | x: input features with the sahpe of (num_windows*B, N, C)
68 | """
69 |
70 |
71 | def __init__(self,hidden_dim,window_size, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.):
72 |
73 | super().__init__()
74 |
75 | self.hidden_dim = hidden_dim
76 | self.window_size = window_size
77 | self.num_heads = num_heads
78 | head_dim = hidden_dim // num_heads
79 | self.scale = qk_scale or head_dim ** -0.5
80 |
81 | self.qkv = nn.Linear(hidden_dim, hidden_dim * 3, bias=qkv_bias)
82 | self.attn_drop = nn.Dropout(attn_drop)
83 | self.proj = nn.Linear(hidden_dim,hidden_dim)
84 | self.proj_drop = nn.Dropout(proj_drop)
85 |
86 | self.softmax = nn.Softmax(dim=-1)
87 |
88 |
89 | ## relative position bias
90 | self.relative_position_bias_table = nn.Parameter(torch.zeros(2*window_size-1, num_heads))
91 | coords = torch.arange(self.window_size)
92 | relative_coords = coords[:, None] - coords[None, :]
93 | relative_coords += self.window_size - 1
94 | self.register_buffer("relative_position_index", relative_coords)
95 | trunc_normal_(self.relative_position_bias_table, std=.02)
96 |
97 |
98 | def forward(self,x):
99 |
100 | Bw,L,C = x.shape
101 | #print('batch size window: {}'.format(x.shape))
102 |
103 | qkv = self.qkv(x).reshape(Bw, L, 3, self.num_heads, C // self.num_heads).permute(2,0,3,1,4)
104 | q, k, v = qkv[0], qkv[1], qkv[2]
105 |
106 | q = q *self.scale
107 | attention = (q @ k.transpose(-2,-1))
108 |
109 | ## relative position bias
110 | relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(self.window_size, self.window_size, -1)
111 | relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous()
112 | attention = attention + relative_position_bias.unsqueeze(0)
113 |
114 | attention = self.softmax(attention)
115 | #print(attention.shape)
116 | #print(v.shape)
117 | attention = self.attn_drop(attention)
118 |
119 | x = (attention @ v).transpose(1,2).reshape(Bw,L,C)
120 | self.proj(x)
121 | self.proj_drop(x)
122 |
123 | return x
124 |
125 | class SwinTransformerBlock(nn.Module):
126 |
127 | """
128 | Swin Transformer basic block
129 | """
130 |
131 | def __init__(self,hidden_dim, input_resolution, num_heads, window_size=80,window_size_factor=1, shift=0,
132 | mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., drop_path=0.,
133 | act_layer=nn.GELU, norm_layer=nn.LayerNorm, use_global_attention=False):
134 | super().__init__()
135 |
136 | self.hidden_dim = hidden_dim
137 | self.input_resolution = input_resolution
138 | self.num_heads = num_heads
139 | self.window_size = window_size
140 | self.window_size_factor = window_size_factor
141 | self.mlp_ratio = mlp_ratio
142 | self.shift = shift
143 | self.use_global_attention = use_global_attention
144 |
145 |
146 | self.norm1 = norm_layer(hidden_dim)
147 | #attention
148 | self.attention = WindowAttention(hidden_dim=hidden_dim,window_size=window_size,num_heads=num_heads,
149 | qkv_bias=qkv_bias,qk_scale=qk_scale,attn_drop=attn_drop,proj_drop=drop)
150 |
151 | self.drop_path = DropPath(drop_path) if drop_path >0 else nn.Identity()
152 | self.norm2 = norm_layer(hidden_dim)
153 | mlp_hidden_dim = int(hidden_dim*mlp_ratio)
154 | self.mlp = MLP(in_features=hidden_dim,hidden_features=mlp_hidden_dim,act_layer=act_layer,drop=drop)
155 |
156 | '''
157 | if self.shift:
158 | self.R = torch.Tensor([[ 0.9484011 , -0.22552829, -0.22287293],
159 | [ 0.30020292, 0.86491236, 0.40224971],
160 | [ 0.10204686, -0.44840117, 0.88798808]])
161 |
162 | self.coordinates = torch.Tensor((nb.load('./coordinates_ico_6_L.shape.gii').agg_data()).T)
163 |
164 | self.rotated_coordinates = torch.matmul(self.coordinates,self.R)
165 |
166 |
167 | self.R_1 = torch.Tensor([[ 0.9484011 , 0.30020292, 0.10204686],
168 | [-0.22552829, 0.86491236, -0.44840117],
169 | [-0.22287293, 0.40224971, 0.88798808]])
170 |
171 | self.rotated_coordinates_1 = torch.matmul(self.coordinates,self.R_1)
172 | '''
173 |
174 |
175 | def forward(self,x):
176 |
177 | B,L,C = x.shape
178 | shortcut = x
179 | x = self.norm1(x)
180 |
181 | if self.shift and not self.use_global_attention:
182 | x_shifted = torch.roll(x,self.window_size//self.window_size_factor,dims=1)
183 | x_windows = window_partition(x_shifted,self.window_size)
184 |
185 | else:
186 | x_windows = window_partition(x,self.window_size)
187 |
188 | #attention
189 | attention_windows = self.attention(x_windows)
190 |
191 | if self.shift and not self.use_global_attention:
192 | attention_windows_reversed = window_reverse(attention_windows,self.window_size,L)
193 | x = torch.roll(attention_windows_reversed,-self.window_size//self.window_size_factor,dims=1)
194 | else:
195 | x = window_reverse(attention_windows,self.window_size,L)
196 |
197 | x = shortcut + self.drop_path(x)
198 |
199 | # FFN
200 | x = x + self.drop_path(self.mlp(self.norm2(x)))
201 |
202 | return x
203 |
204 |
205 | class PatchMerging(nn.Module):
206 |
207 | def __init__(self, hidden_dim, norm_layer = nn.LayerNorm):
208 | super().__init__()
209 |
210 | self.hidden_dim = hidden_dim
211 |
212 | #self.merging = Rearrange('b (v h) n -> b v (n h)', h=4) ###### double check it is doing what i expect
213 | self.merging = Rearrange('b (v h) n -> b v (h n)', h=4)
214 |
215 | self.reduction = nn.Linear(4*hidden_dim, 2*hidden_dim,bias=False)
216 | self.norm = norm_layer(4*hidden_dim)
217 |
218 | def forward(self,x):
219 |
220 | #print(x.shape)
221 | #import pdb;pdb.set_trace()
222 |
223 | #x = x.view(B,-1, 4*C)
224 | x = self.merging(x)
225 | #import pdb;pdb.set_trace()
226 | x = self.norm(x)
227 | #import pdb;pdb.set_trace()
228 | x = self.reduction(x)
229 |
230 | return x
231 |
232 |
233 | class BasicLayer(nn.Module):
234 |
235 | """
236 | Basic Swin Transformer layer for one stage
237 | """
238 |
239 | def __init__(self,hidden_dim, input_resolution, depth, num_heads,window_size, window_size_factor,
240 | mlp_ratio=4, qkv_bias=True, qk_scale=None, drop=0, attn_drop=0,
241 | drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_global_attention= False):
242 | super().__init__()
243 |
244 | self.hidden_dim = hidden_dim
245 | self.input_resolution = input_resolution
246 | self.depth = depth
247 | #build blocks
248 | self.blocks = nn.ModuleList([
249 | SwinTransformerBlock(hidden_dim=hidden_dim, input_resolution=input_resolution,
250 | num_heads=num_heads, window_size=window_size,
251 | window_size_factor=window_size_factor,
252 | shift = 0 if (i%2 ==0) else 1,
253 | mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
254 | drop=drop,attn_drop=attn_drop,
255 | drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
256 | norm_layer=norm_layer, use_global_attention = use_global_attention)
257 | for i in range(depth)
258 | ])
259 |
260 | #merging layer
261 | if downsample is not None:
262 | self.downsample = downsample(hidden_dim=hidden_dim, norm_layer= norm_layer)
263 | else:
264 | self.downsample = None
265 |
266 | def forward(self,x):
267 | for block in self.blocks:
268 | x = block(x)
269 | if self.downsample is not None:
270 | x = self.downsample(x)
271 | return x
272 |
273 |
274 | class MSSiT_shifted(nn.Module):
275 | """
276 | Args:
277 | window_size (int): Number of patches to apply local-attention to.
278 | """
279 |
280 |
281 | def __init__(self,ico_init_resolution=4,num_channels=4,num_classes=1,
282 | embed_dim=96, depths=[2,2,6,2],num_heads=[3,6,12,24],
283 | window_size=80,window_size_factor=1, mlp_ratio=4,qkv_bias=True,qk_scale=True,
284 | dropout=0, attention_dropout=0,drop_path_rate=0.1,
285 | norm_layer=nn.LayerNorm, use_pos_emb=False,patch_norm=True,
286 | use_confounds =False,**kwargs):
287 | super().__init__()
288 |
289 | self.num_classes = num_classes
290 | self.num_layers = len(depths)
291 | self.embed_dim = embed_dim
292 | self.use_pos_emb = use_pos_emb
293 | self.patch_norm = patch_norm
294 | self.num_features = int(embed_dim * 2 ** (self.num_layers-1)) #number of features after the last layer
295 | self.mlp_ratio = mlp_ratio
296 | self.num_channels = num_channels
297 |
298 | if isinstance(window_size,int):
299 | self.window_sizes = [window_size for i in range(self.num_layers)]
300 | elif isinstance(window_size,list):
301 | self.window_sizes = window_size
302 |
303 | self.window_size_factor = window_size_factor
304 | print('window size: {}'.format(self.window_sizes))
305 | print('window size factor: {}'.format(self.window_size_factor))
306 |
307 | if ico_init_resolution==4:
308 | self.num_patches = 5120
309 | self.num_vertices = 15
310 | patch_dim = self.num_vertices * self.num_channels
311 | if len(self.window_sizes)==4 and self.window_sizes[-1]==80:
312 | print('*** use global attention in the last stage**')
313 | self.use_global_att = [False, False, False, True]
314 | else:
315 | print('** NOT using global attention in the last stage**')
316 | self.use_global_att = [False for i in range(len(self.window_sizes))]
317 | elif ico_init_resolution==5:
318 | self.num_patches = 20480
319 | self.num_vertices = 6
320 | patch_dim = self.num_vertices * self.num_channels
321 | if len(self.window_sizes)==4 and self.window_sizes[-1]==320:
322 | print('*** use global attention in the last stage**')
323 | self.use_global_att = [False, False, False, True]
324 | elif len(self.window_sizes)==5 and self.window_sizes[-1]==80:
325 | print('**use global attention in the last stage**')
326 | self.use_global_att = [False, False, False, False, True]
327 | else:
328 | print('** NOT using global attention in the last stage**')
329 | self.use_global_att = [False for i in range(len(self.window_sizes))]
330 |
331 | # absolute position embedding
332 | if use_pos_emb:
333 | self.absolute_pos_embed = nn.Parameter(torch.zeros(1, self.num_patches, self.embed_dim))
334 | trunc_normal_(self.absolute_pos_embed, std=.02)
335 |
336 | #need another version with conv1d
337 | self.to_patch_embedding = nn.Sequential(
338 | Rearrange('b c n v -> b n (v c)'),
339 | nn.Linear(patch_dim, self.embed_dim),
340 | )
341 |
342 | if use_confounds:
343 | self.proj_confound = nn.Sequential(
344 | nn.BatchNorm1d(1),
345 | nn.Linear(1,embed_dim))
346 | self.use_confounds = use_confounds
347 |
348 | self.pos_dropout = nn.Dropout(p=dropout)
349 |
350 | # stochastic depth
351 | dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
352 |
353 | self.layers = nn.ModuleList()
354 | for i_layer in range(self.num_layers):
355 | layer = BasicLayer(hidden_dim=int(embed_dim * 2 ** i_layer),
356 | input_resolution=ico_init_resolution,
357 | depth= depths[i_layer],
358 | num_heads=num_heads[i_layer],
359 | window_size=self.window_sizes[i_layer],
360 | window_size_factor= self.window_size_factor,
361 | mlp_ratio =mlp_ratio,
362 | qkv_bias=qkv_bias, qk_scale=qk_scale,
363 | drop = dropout, attn_drop=attention_dropout,
364 | drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],
365 | norm_layer=norm_layer,
366 | downsample=PatchMerging if (i_layer < self.num_layers -1) else None,
367 | use_global_attention = self.use_global_att[i_layer])
368 | self.layers.append(layer)
369 |
370 | self.pre_norm = norm_layer(self.embed_dim)
371 | self.norm = norm_layer(self.num_features)
372 | self.avgpool = nn.AdaptiveAvgPool1d(1)
373 | self.head = nn.Linear(self.num_features,num_classes) if num_classes > 0 else nn.Identity()
374 |
375 | self.apply(self._init_weights)
376 |
377 | ## normalise patches
378 |
379 | def _init_weights(self, m):
380 |
381 | if isinstance(m, nn.Linear):
382 | trunc_normal_(m.weight, std=.02)
383 | if isinstance(m, nn.Linear) and m.bias is not None:
384 | nn.init.constant_(m.bias, 0)
385 | elif isinstance(m, nn.LayerNorm):
386 | nn.init.constant_(m.bias, 0)
387 | nn.init.constant_(m.weight, 1.0)
388 |
389 |
390 | def forward_features(self,x,confounds=None):
391 | # input: B,C,L,V
392 | x = self.to_patch_embedding(x) # B, L, embed_dim=C
393 | b, n, _ = x.shape
394 |
395 | x = self.pre_norm(x)
396 |
397 | if self.use_pos_emb:
398 | x += self.absolute_pos_embed
399 | # deconfounding technique
400 | if self.use_confounds and (confounds is not None):
401 | confounds = self.proj_confound(confounds.view(-1,1))
402 | confounds = repeat(confounds, 'b d -> b n d', n=n)
403 | x += confounds
404 | x = self.pos_dropout(x)
405 |
406 | for i, layer in enumerate(self.layers):
407 | x = layer(x)
408 | x = self.norm(x) # B,L,C=int(embed_dim * 2 ** num_layer)
409 | x = self.avgpool(x.transpose(1,2)) # B,C,1
410 | x = torch.flatten(x,1) # B,C
411 | return x
412 |
413 | def forward(self,x,confounds=None):
414 | # input: B,C,L,V
415 | x = self.forward_features(x,confounds) #B,int(embed_dim * 2 ** i_layer)
416 | x = self.head(x) #B, num_classes
417 | return x
418 |
419 |
420 | if __name__ == '__main__':
421 |
422 | model = MSSiT_shifted()
423 |
424 | import pdb;pdb.set_trace()
425 |
426 |
427 |
428 |
429 |
430 |
431 |
432 |
433 |
434 |
435 |
436 |
437 |
438 |
439 |
440 |
441 |
442 |
--------------------------------------------------------------------------------
/models/sit.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # @Author: Simon Dahan
3 | # @Last Modified time: 2021-12-16 10:41:47
4 | #
5 | # Created on Fri Oct 01 2021
6 | #
7 | # by Simon Dahan @SD3004
8 | #
9 | # Copyright (c) 2021 MeTrICS Lab
10 | #
11 |
12 | '''
13 | This file contains the implementation of the ViT model: https://arxiv.org/abs/2010.11929 adapted to the case of surface patching.
14 | Input data is a sequence of non-overlapping patches.
15 | '''
16 |
17 | import torch
18 | from torch import nn
19 |
20 | from einops import repeat
21 | from einops.layers.torch import Rearrange
22 |
23 | from vit_pytorch.vit import Transformer
24 |
25 | class SiT(nn.Module):
26 | def __init__(self, *,
27 | dim,
28 | depth,
29 | heads,
30 | mlp_dim,
31 | pool = 'cls',
32 | num_patches = 20,
33 | num_classes= 1,
34 | num_channels =4,
35 | num_vertices = 2145,
36 | dim_head = 64,
37 | dropout = 0.,
38 | emb_dropout = 0.
39 | ):
40 |
41 | super().__init__()
42 |
43 | assert pool in {'cls', 'mean'}, 'pool type must be either cls (cls token) or mean (mean pooling)'
44 |
45 | patch_dim = num_channels * num_vertices
46 |
47 | # inputs has size = b * c * n * v
48 | self.to_patch_embedding = nn.Sequential(
49 | Rearrange('b c n v -> b n (v c)'),
50 | nn.Linear(patch_dim, dim),
51 | )
52 |
53 | self.pos_embedding = nn.Parameter(torch.randn(1, num_patches + 1, dim))
54 | self.cls_token = nn.Parameter(torch.randn(1, 1, dim))
55 | self.dropout = nn.Dropout(emb_dropout)
56 |
57 | self.transformer = Transformer(dim, depth, heads, dim_head, mlp_dim, dropout)
58 |
59 | self.pool = pool
60 | self.to_latent = nn.Identity()
61 | self.mlp_head = nn.Sequential(
62 | nn.LayerNorm(dim),
63 | nn.Linear(dim, num_classes)
64 | )
65 |
66 | def forward(self, img):
67 | x = self.to_patch_embedding(img)
68 | b, n, _ = x.shape
69 |
70 | cls_tokens = repeat(self.cls_token, '() n d -> b n d', b = b)
71 |
72 | x = torch.cat((cls_tokens, x), dim=1)
73 | x += self.pos_embedding[:, :(n + 1)]
74 | x = self.dropout(x)
75 |
76 | x = self.transformer(x)
77 |
78 | x = x.mean(dim = 1) if self.pool == 'mean' else x[:, 0]
79 |
80 | x = self.to_latent(x)
81 |
82 | return self.mlp_head(x)
--------------------------------------------------------------------------------
/patch_extraction/reorder_patches/order_ico1.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/metrics-lab/surface-vision-transformers/377e5dd9fddb59344d673281e28d7bea13c305e3/patch_extraction/reorder_patches/order_ico1.npy
--------------------------------------------------------------------------------
/patch_extraction/reorder_patches/order_ico2.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/metrics-lab/surface-vision-transformers/377e5dd9fddb59344d673281e28d7bea13c305e3/patch_extraction/reorder_patches/order_ico2.npy
--------------------------------------------------------------------------------
/patch_extraction/reorder_patches/order_ico3.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/metrics-lab/surface-vision-transformers/377e5dd9fddb59344d673281e28d7bea13c305e3/patch_extraction/reorder_patches/order_ico3.npy
--------------------------------------------------------------------------------
/patch_extraction/reorder_patches/order_ico4.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/metrics-lab/surface-vision-transformers/377e5dd9fddb59344d673281e28d7bea13c305e3/patch_extraction/reorder_patches/order_ico4.npy
--------------------------------------------------------------------------------
/patch_extraction/reorder_patches/order_ico5.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/metrics-lab/surface-vision-transformers/377e5dd9fddb59344d673281e28d7bea13c305e3/patch_extraction/reorder_patches/order_ico5.npy
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | pyyaml
2 | timm
3 | pandas
4 | einops
5 | vit-pytorch
6 | nibabel
7 | tensorboard
8 | #warmup-scheduler
--------------------------------------------------------------------------------
/surfaces/img_indices_40962.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/metrics-lab/surface-vision-transformers/377e5dd9fddb59344d673281e28d7bea13c305e3/surfaces/img_indices_40962.npy
--------------------------------------------------------------------------------
/surfaces/img_weights_40962.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/metrics-lab/surface-vision-transformers/377e5dd9fddb59344d673281e28d7bea13c305e3/surfaces/img_weights_40962.npy
--------------------------------------------------------------------------------
/surfaces/metric_resample.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 | """
4 | Created on Mon Dec 10 18:07:54 2022
5 | @author: Mohamed A. Suliman
6 | """
7 | import numpy as np
8 | import torch
9 | import math
10 |
11 | #files_dir = '../surfaces/'
12 |
13 | def lat_lon_img_metrics(files_dir , moving_feat, device):
14 | num_ver = len(moving_feat)
15 |
16 | img_idxs = np.load(files_dir+'img_indices_'+ str(num_ver) +'.npy').astype(np.int64)
17 | img_weights = np.load(files_dir+'img_weights_'+ str(num_ver) +'.npy').astype(np.float32)
18 |
19 | img_idxs =torch.from_numpy(img_idxs).to(device)
20 | img_weights = torch.from_numpy(img_weights).to(device)
21 |
22 | W = int(np.sqrt(len(img_idxs)))
23 |
24 | img = torch.sum(((moving_feat[img_idxs.flatten()]).reshape(img_idxs.shape[0], img_idxs.shape[1], moving_feat.shape[1]))*((img_weights.unsqueeze(2)).repeat(1,1,moving_feat.shape[1])),1)
25 |
26 | img = img.reshape(W, W, moving_feat.shape[1])
27 |
28 | return img
29 |
30 |
31 | def bilinear_sphere_resample(rot_grid, org_img, radius, device):
32 |
33 | assert rot_grid.shape[1] == 3, "grid.shape[1] ≠ 3"
34 |
35 | rot_grid_r1 = rot_grid/radius
36 |
37 | w = org_img.shape[0]
38 |
39 | rot_grid_r1[:,2] = torch.clamp(rot_grid_r1[:,2].clone(), -0.9999999, 0.9999999)
40 |
41 | Theta = torch.acos(rot_grid_r1[:,2]/1.0)
42 | Phi = torch.zeros_like(Theta)
43 |
44 | zero_idxs = (rot_grid_r1[:,0] == 0).nonzero(as_tuple=True)[0]
45 | rot_grid_r1[zero_idxs, 0] = 1e-15
46 |
47 | pos_idxs = (rot_grid_r1[:,0] > 0).nonzero(as_tuple=True)[0]
48 | Phi[pos_idxs] = torch.atan(rot_grid_r1[pos_idxs, 1]/rot_grid_r1[pos_idxs, 0])
49 |
50 | neg_idxs = (rot_grid_r1[:,0] < 0).nonzero(as_tuple=True)[0]
51 | Phi[neg_idxs] = torch.atan(rot_grid_r1[neg_idxs, 1]/rot_grid_r1[neg_idxs, 0]) + math.pi
52 |
53 | Phi = torch.remainder(Phi + 2 * math.pi, 2*math.pi)
54 |
55 | assert len(pos_idxs) + len(neg_idxs) == len(rot_grid_r1)
56 |
57 | u = Phi/(2*math.pi/(w-1))
58 | v = Theta/(math.pi/(w-1))
59 |
60 | v = torch.clamp(v, 0.0000001, org_img.shape[1]-1.00000001).to(device)
61 | u = torch.clamp(u, 0.0000001, org_img.shape[1]-1.1).to(device)
62 |
63 | u_floor = torch.floor(u)
64 | u_ceil = u_floor + 1
65 | v_floor = torch.floor(v)
66 | v_ceil = v_floor + 1
67 |
68 | img1 = org_img[v_floor.long(), u_floor.long()]
69 | img2 = org_img[v_floor.long(), u_ceil.long()]
70 | img3 = org_img[v_ceil.long() , u_floor.long()]
71 | img4 = org_img[v_ceil.long() , u_ceil.long()]
72 |
73 | Q1 = (u_ceil-u).unsqueeze(1)*img1 + (u-u_floor).unsqueeze(1)*img2
74 | Q2 = (u_ceil-u).unsqueeze(1)*img3 + (u-u_floor).unsqueeze(1)*img4
75 | Q = (v_ceil-v).unsqueeze(1)*Q1 + (v-v_floor).unsqueeze(1)*Q2
76 |
77 | return Q
78 |
79 | def lat_lon_img_batch(files_dir , moving_feat):
80 |
81 | num_ver = moving_feat.shape[1]
82 | b, v, c = moving_feat.shape
83 |
84 | img_idxs = np.load(files_dir+'img_indices_'+ str(num_ver) +'.npy').astype(np.int64)
85 | img_weights = np.load(files_dir+'img_weights_'+ str(num_ver) +'.npy').astype(np.float32)
86 | print(img_idxs.shape)
87 |
88 | img_idxs =torch.from_numpy(img_idxs)
89 | img_weights = torch.from_numpy(img_weights)
90 |
91 | W = int(np.sqrt(len(img_idxs)))
92 | print(W)
93 |
94 | print(moving_feat[:,img_idxs.flatten(),:].shape)
95 | print(b, img_idxs.shape[0], img_idxs.shape[1], moving_feat.shape[2])
96 | print(img_weights.unsqueeze(2).shape)
97 |
98 | img = torch.sum(((moving_feat[:,img_idxs.flatten(),:]).reshape(b, img_idxs.shape[0], img_idxs.shape[1], moving_feat.shape[2]))*((img_weights.unsqueeze(2)).repeat(1,1,1,moving_feat.shape[2])),2)
99 |
100 | img = img.reshape(b, W, W, moving_feat.shape[2])
101 |
102 | return img
103 |
104 | def bilinear_sphere_resample_batch(rot_grid, org_img, radius):
105 |
106 | assert rot_grid.shape[1] == 3, "grid.shape[1] ≠ 3"
107 |
108 | rot_grid_r1 = rot_grid/radius
109 |
110 | w = org_img.shape[1]
111 |
112 | rot_grid_r1[:,2] = torch.clamp(rot_grid_r1[:,2].clone(), -0.9999999, 0.9999999)
113 |
114 | Theta = torch.acos(rot_grid_r1[:,2]/1.0)
115 | Phi = torch.zeros_like(Theta)
116 |
117 | zero_idxs = (rot_grid_r1[:,0] == 0).nonzero(as_tuple=True)[0]
118 | rot_grid_r1[zero_idxs, 0] = 1e-15
119 |
120 | pos_idxs = (rot_grid_r1[:,0] > 0).nonzero(as_tuple=True)[0]
121 | Phi[pos_idxs] = torch.atan(rot_grid_r1[pos_idxs, 1]/rot_grid_r1[pos_idxs, 0])
122 |
123 | neg_idxs = (rot_grid_r1[:,0] < 0).nonzero(as_tuple=True)[0]
124 | Phi[neg_idxs] = torch.atan(rot_grid_r1[neg_idxs, 1]/rot_grid_r1[neg_idxs, 0]) + math.pi
125 |
126 | Phi = torch.remainder(Phi + 2 * math.pi, 2*math.pi)
127 |
128 | assert len(pos_idxs) + len(neg_idxs) == len(rot_grid_r1)
129 |
130 | u = Phi/(2*math.pi/(w-1))
131 | v = Theta/(math.pi/(w-1))
132 |
133 | v = torch.clamp(v, 0.0000001, org_img.shape[2]-1.00000001)
134 | u = torch.clamp(u, 0.0000001, org_img.shape[2]-1.1)
135 |
136 | u_floor = torch.floor(u)
137 | u_ceil = u_floor + 1
138 | v_floor = torch.floor(v)
139 | v_ceil = v_floor + 1
140 |
141 | img1 = org_img[:,v_floor.long(), u_floor.long()]
142 | img2 = org_img[:,v_floor.long(), u_ceil.long()]
143 | img3 = org_img[:,v_ceil.long() , u_floor.long()]
144 | img4 = org_img[:,v_ceil.long() , u_ceil.long()]
145 |
146 | Q1 = (u_ceil-u).unsqueeze(1)*img1 + (u-u_floor).unsqueeze(1)*img2
147 | Q2 = (u_ceil-u).unsqueeze(1)*img3 + (u-u_floor).unsqueeze(1)*img4
148 | Q = (v_ceil-v).unsqueeze(1)*Q1 + (v-v_floor).unsqueeze(1)*Q2
149 |
150 | return Q
151 |
152 | def nearest_neighbour_sphere_resample(rot_grid, org_img, radius, device):
153 |
154 | assert rot_grid.shape[1] == 3, "grid.shape[1] ≠ 3"
155 |
156 | rot_grid_r1 = rot_grid/radius
157 |
158 | w = org_img.shape[0]
159 |
160 | rot_grid_r1[:,2] = torch.clamp(rot_grid_r1[:,2].clone(), -0.9999999, 0.9999999)
161 |
162 | Theta = torch.acos(rot_grid_r1[:,2]/1.0)
163 | Phi = torch.zeros_like(Theta)
164 |
165 | zero_idxs = (rot_grid_r1[:,0] == 0).nonzero(as_tuple=True)[0]
166 | rot_grid_r1[zero_idxs, 0] = 1e-15
167 |
168 | pos_idxs = (rot_grid_r1[:,0] > 0).nonzero(as_tuple=True)[0]
169 | Phi[pos_idxs] = torch.atan(rot_grid_r1[pos_idxs, 1]/rot_grid_r1[pos_idxs, 0])
170 |
171 | neg_idxs = (rot_grid_r1[:,0] < 0).nonzero(as_tuple=True)[0]
172 | Phi[neg_idxs] = torch.atan(rot_grid_r1[neg_idxs, 1]/rot_grid_r1[neg_idxs, 0]) + math.pi
173 |
174 | Phi = torch.remainder(Phi + 2 * math.pi, 2*math.pi)
175 |
176 | assert len(pos_idxs) + len(neg_idxs) == len(rot_grid_r1)
177 |
178 | u = Phi/(2*math.pi/(w-1))
179 | v = Theta/(math.pi/(w-1))
180 |
181 | v = torch.clamp(v, 0.0000001, org_img.shape[1]-1.00000001).to(device)
182 | u = torch.clamp(u, 0.0000001, org_img.shape[1]-1.1).to(device)
183 |
184 | u_floor = torch.floor(u)
185 | u_ceil = u_floor + 1
186 | v_floor = torch.floor(v)
187 | v_ceil = v_floor + 1
188 |
189 | img1 = org_img[v_floor.long(), u_floor.long()]
190 | img2 = org_img[v_floor.long(), u_ceil.long()]
191 | img3 = org_img[v_ceil.long() , u_floor.long()]
192 | img4 = org_img[v_ceil.long() , u_ceil.long()]
193 |
194 | Q1 = (u_ceil-u).unsqueeze(1)*img1 + (u-u_floor).unsqueeze(1)*img2
195 | Q2 = (u_ceil-u).unsqueeze(1)*img3 + (u-u_floor).unsqueeze(1)*img4
196 | Q = (v_ceil-v).unsqueeze(1)*Q1 + (v-v_floor).unsqueeze(1)*Q2
197 |
198 | return Q
--------------------------------------------------------------------------------
/surfaces/metric_resample_labels.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import torch
3 | import math
4 | import random
5 | import nibabel as nb
6 | from scipy.spatial.transform import Rotation as R
7 |
8 |
9 | def lat_lon_img_labels(files_dir , moving_feat, device):
10 |
11 | num_ver = len(moving_feat)
12 |
13 | img_idxs = np.load(files_dir+'img_indices_'+ str(num_ver) +'.npy').astype(np.int64)
14 | img_weights = np.load(files_dir+'img_weights_'+ str(num_ver) +'.npy').astype(np.float32)
15 |
16 | img_idxs =torch.from_numpy(img_idxs).to(device)
17 | img_weights = torch.from_numpy(img_weights).to(device)
18 |
19 | W = int(np.sqrt(len(img_idxs)))
20 |
21 | img = torch.sum(((moving_feat[img_idxs.flatten()]).reshape(img_idxs.shape[0], img_idxs.shape[1], moving_feat.shape[1]))*((img_weights.unsqueeze(2)).repeat(1,1,moving_feat.shape[1])),1)
22 |
23 | img = img.reshape(W, W, moving_feat.shape[1])
24 |
25 | return img
26 |
27 |
28 | def nearest_neighbour_sphere_resample(rot_grid, org_img, radius, device):
29 |
30 | assert rot_grid.shape[1] == 3, "grid.shape[1] ≠ 3"
31 |
32 | rot_grid_r1 = rot_grid/radius
33 |
34 | w = org_img.shape[0]
35 |
36 | rot_grid_r1[:,2] = torch.clamp(rot_grid_r1[:,2].clone(), -0.9999999, 0.9999999)
37 |
38 | Theta = torch.acos(rot_grid_r1[:,2]/1.0)
39 | Phi = torch.zeros_like(Theta)
40 |
41 | zero_idxs = (rot_grid_r1[:,0] == 0).nonzero(as_tuple=True)[0]
42 | rot_grid_r1[zero_idxs, 0] = 1e-15
43 |
44 | pos_idxs = (rot_grid_r1[:,0] > 0).nonzero(as_tuple=True)[0]
45 | Phi[pos_idxs] = torch.atan(rot_grid_r1[pos_idxs, 1]/rot_grid_r1[pos_idxs, 0])
46 |
47 | neg_idxs = (rot_grid_r1[:,0] < 0).nonzero(as_tuple=True)[0]
48 | Phi[neg_idxs] = torch.atan(rot_grid_r1[neg_idxs, 1]/rot_grid_r1[neg_idxs, 0]) + math.pi
49 |
50 | Phi = torch.remainder(Phi + 2 * math.pi, 2*math.pi)
51 |
52 | assert len(pos_idxs) + len(neg_idxs) == len(rot_grid_r1)
53 |
54 | u = Phi/(2*math.pi/(w-1))
55 | v = Theta/(math.pi/(w-1))
56 |
57 | v = torch.clamp(v, 0.0000001, org_img.shape[1]-1.00000001).to(device)
58 | u = torch.clamp(u, 0.0000001, org_img.shape[1]-1.1).to(device)
59 |
60 | u_floor = torch.floor(u)
61 | u_ceil = u_floor + 1
62 | v_floor = torch.floor(v)
63 | v_ceil = v_floor + 1
64 |
65 | #x,y coordinates of surrounding points
66 | a1 = torch.concat([v_floor.unsqueeze(1),u_floor.unsqueeze(1)],axis=1)
67 | a2 = torch.concat([v_floor.unsqueeze(1),u_ceil.unsqueeze(1)],axis=1)
68 | a3 = torch.concat([v_ceil.unsqueeze(1),u_floor.unsqueeze(1)],axis=1)
69 | a4 = torch.concat([v_ceil.unsqueeze(1),u_ceil.unsqueeze(1)],axis=1)
70 |
71 | b = torch.concat([a1.unsqueeze(0),a2.unsqueeze(0),a3.unsqueeze(0),a4.unsqueeze(0)],axis=0)
72 |
73 | p = torch.concat([v.unsqueeze(1),u.unsqueeze(1)],axis=1)
74 |
75 | # compute distance between (v,u) points and all points
76 | dist = torch.sqrt(torch.sum(torch.pow((b-p),2),axis=2))
77 |
78 | # get list of indices with minimal distance to point (v,u)
79 | first, second = zip(*enumerate(torch.argmax(dist,dim=0)))
80 |
81 | coordinates_to_select = torch.transpose(b,1,0)[first,second,:]
82 |
83 | Q = org_img[coordinates_to_select[:,0].long(),coordinates_to_select[:,1].long()]
84 |
85 | return torch.round(Q)
86 |
87 |
88 |
89 | def majority_sphere_resample(rot_grid, org_img, radius, device):
90 |
91 | assert rot_grid.shape[1] == 3, "grid.shape[1] ≠ 3"
92 |
93 | rot_grid_r1 = rot_grid/radius
94 |
95 | w = org_img.shape[0]
96 |
97 | rot_grid_r1[:,2] = torch.clamp(rot_grid_r1[:,2].clone(), -0.9999999, 0.9999999)
98 |
99 | Theta = torch.acos(rot_grid_r1[:,2]/1.0)
100 | Phi = torch.zeros_like(Theta)
101 |
102 | zero_idxs = (rot_grid_r1[:,0] == 0).nonzero(as_tuple=True)[0]
103 | rot_grid_r1[zero_idxs, 0] = 1e-15
104 |
105 | pos_idxs = (rot_grid_r1[:,0] > 0).nonzero(as_tuple=True)[0]
106 | Phi[pos_idxs] = torch.atan(rot_grid_r1[pos_idxs, 1]/rot_grid_r1[pos_idxs, 0])
107 |
108 | neg_idxs = (rot_grid_r1[:,0] < 0).nonzero(as_tuple=True)[0]
109 | Phi[neg_idxs] = torch.atan(rot_grid_r1[neg_idxs, 1]/rot_grid_r1[neg_idxs, 0]) + math.pi
110 |
111 | Phi = torch.remainder(Phi + 2 * math.pi, 2*math.pi)
112 |
113 | assert len(pos_idxs) + len(neg_idxs) == len(rot_grid_r1)
114 |
115 | u = Phi/(2*math.pi/(w-1))
116 | v = Theta/(math.pi/(w-1))
117 |
118 | v = torch.clamp(v, 0.0000001, org_img.shape[1]-1.00000001).to(device)
119 | u = torch.clamp(u, 0.0000001, org_img.shape[1]-1.1).to(device)
120 |
121 | u_floor = torch.floor(u)
122 | u_floor_1 = u_floor - 1
123 | u_floor_2 = u_floor_1 -1
124 | u_floor_3 = u_floor_2 -1
125 | u_ceil = u_floor + 1
126 | u_ceil_1 = u_ceil + 1
127 | u_ceil_2 = u_ceil_1 + 1
128 | u_ceil_3 = u_ceil_2 + 1
129 | v_floor = torch.floor(v)
130 | v_floor_1 = v_floor - 1
131 | v_floor_2 = v_floor_1 - 1
132 | v_floor_3 = v_floor_2 -1
133 | v_ceil = v_floor + 1
134 | v_ceil_1 = v_ceil + 1
135 | v_ceil_2 = v_ceil_1 + 1
136 | v_ceil_3 = v_ceil_2 +1
137 |
138 | v_ceil_1[v_ceil_1==512]=511
139 | u_ceil_1[u_ceil_1==512]=511
140 | v_ceil_2[v_ceil_2==512]=511
141 | u_ceil_2[u_ceil_2==512]=511
142 | v_ceil_2[v_ceil_2==513]=511
143 | u_ceil_2[u_ceil_2==513]=511
144 | v_ceil_3[v_ceil_3==512]=511
145 | u_ceil_3[u_ceil_3==512]=511
146 | v_ceil_3[v_ceil_3==513]=511
147 | u_ceil_3[u_ceil_3==513]=511
148 | v_ceil_3[v_ceil_3==514]=511
149 | u_ceil_3[u_ceil_3==514]=511
150 |
151 |
152 |
153 |
154 | img1 = org_img[v_floor.long(), u_floor.long()]
155 | img2 = org_img[v_floor.long(), u_ceil.long()]
156 | img3 = org_img[v_ceil.long() , u_floor.long()]
157 | img4 = org_img[v_ceil.long() , u_ceil.long()]
158 |
159 | img5 = org_img[v_floor_1.long() , u_floor.long()]
160 | img6 = org_img[v_floor_1.long() , u_ceil.long()]
161 | img7 = org_img[v_ceil_1.long() , u_floor.long()]
162 | img8 = org_img[v_ceil_1.long() , u_ceil.long()]
163 | img9 = org_img[v_floor.long() , u_floor_1.long()]
164 | img10 = org_img[v_floor.long() , u_ceil_1.long()]
165 | img11 = org_img[v_ceil.long() , u_floor_1.long()]
166 | img12 = org_img[v_ceil.long() , u_ceil_1.long()]
167 | img13 = org_img[v_floor_1.long() , u_floor_1.long()]
168 | img14 = org_img[v_floor_1.long() , u_ceil_1.long()]
169 | img15 = org_img[v_ceil_1.long() , u_floor_1.long()]
170 | img16 = org_img[v_ceil_1.long() , u_ceil_1.long()]
171 |
172 | img17 = org_img[v_floor_2.long() , u_floor.long()]
173 | img18 = org_img[v_floor_2.long() , u_ceil.long()]
174 | img19 = org_img[v_floor_2.long() , u_floor_1.long()]
175 | img20 = org_img[v_floor_2.long() , u_ceil_1.long()]
176 | img21 = org_img[v_floor_2.long() , u_floor_2.long()]
177 | img22 = org_img[v_floor_2.long() , u_ceil_2.long()]
178 | img23 = org_img[v_ceil_2.long() , u_floor.long()]
179 | img24 = org_img[v_ceil_2.long() , u_ceil.long()]
180 | img25 = org_img[v_ceil_2.long() , u_floor_1.long()]
181 | img26 = org_img[v_ceil_2.long() , u_ceil_1.long()]
182 | img27 = org_img[v_ceil_2.long() , u_floor_2.long()]
183 | img28 = org_img[v_ceil_2.long() , u_ceil_2.long()]
184 | img29 = org_img[v_floor.long() , u_floor_2.long()]
185 | img30 = org_img[v_ceil.long() , u_floor_2.long()]
186 | img31 = org_img[v_floor_1.long() , u_floor_2.long()]
187 | img32 = org_img[v_ceil_1.long() , u_floor_2.long()]
188 | img33 = org_img[v_floor.long() , u_ceil_2.long()]
189 | img34 = org_img[v_ceil.long() , u_ceil_2.long()]
190 | img35 = org_img[v_floor_1.long() , u_ceil_2.long()]
191 | img36 = org_img[v_ceil_1.long() , u_ceil_2.long()]
192 |
193 | img37 = org_img[v_floor_3.long() , u_floor.long()]
194 | img38 = org_img[v_floor_3.long() , u_ceil.long()]
195 | img39 = org_img[v_floor_3.long() , u_floor_1.long()]
196 | img40 = org_img[v_floor_3.long() , u_ceil_1.long()]
197 | img41 = org_img[v_floor_3.long() , u_floor_2.long()]
198 | img42 = org_img[v_floor_3.long() , u_ceil_2.long()]
199 | img43 = org_img[v_floor_3.long() , u_floor_3.long()]
200 | img44 = org_img[v_floor_3.long() , u_ceil_3.long()]
201 | img43 = org_img[v_ceil_3.long() , u_floor.long()]
202 | img44 = org_img[v_ceil_3.long() , u_ceil.long()]
203 | img45 = org_img[v_ceil_3.long() , u_floor_1.long()]
204 | img46 = org_img[v_ceil_3.long() , u_ceil_1.long()]
205 | img47 = org_img[v_ceil_3.long() , u_floor_2.long()]
206 | img48 = org_img[v_ceil_3.long() , u_ceil_2.long()]
207 | img49 = org_img[v_ceil_3.long() , u_floor_3.long()]
208 | img50 = org_img[v_ceil_3.long() , u_ceil_3.long()]
209 |
210 | img51 = org_img[v_floor.long() , u_floor_3.long()]
211 | img52 = org_img[v_ceil.long() , u_floor_3.long()]
212 | img53 = org_img[v_floor_1.long() , u_floor_3.long()]
213 | img54 = org_img[v_ceil_1.long() , u_floor_3.long()]
214 | img55 = org_img[v_floor_2.long() , u_floor_3.long()]
215 | img56 = org_img[v_ceil_2.long() , u_floor_3.long()]
216 |
217 | img57 = org_img[v_floor.long() , u_ceil_3.long()]
218 | img58 = org_img[v_ceil.long() , u_ceil_3.long()]
219 | img59 = org_img[v_floor_1.long() , u_ceil_3.long()]
220 | img60 = org_img[v_ceil_1.long() , u_ceil_3.long()]
221 | img61 = org_img[v_floor_2.long() , u_ceil_3.long()]
222 | img62 = org_img[v_ceil_2.long() , u_ceil_3.long()]
223 |
224 |
225 | Q = torch.Tensor([torch.bincount(torch.Tensor([img1[i],img2[i],img3[i],img4[i],img5[i],img6[i],img7[i],img8[i], img9[i],img10[i],img11[i],img12[i], img13[i],img14[i],img15[i],img16[i], \
226 | img17[i],img18[i],img19[i],img20[i],img21[i],img22[i],img23[i],img24[i], img25[i],img26[i],img27[i],img28[i],img29[i],img30[i],img31[i],img32[i], \
227 | img33[i],img34[i],img35[i],img36[i], img37[i],img38[i],img39[i],img40[i], img41[i],img42[i],img43[i],img44[i], img45[i],img46[i],img47[i],img48[i], \
228 | img49[i],img50[i],img51[i],img52[i], img53[i],img54[i],img55[i],img56[i], img57[i],img58[i],img59[i],img60[i], img61[i],img62[i]]).long()).argmax() for i in range(40962)])
229 |
230 | return Q
--------------------------------------------------------------------------------
/tools/dataloader.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 | from tools.datasets import dataset_cortical_surfaces_segmentation,dataset_cortical_surfaces
4 |
5 |
6 | def loader_metrics(data_path,
7 | config,):
8 |
9 | ###############################################################
10 | ##################### TRAINING DATA #####################
11 | ###############################################################
12 |
13 | train_dataset = dataset_cortical_surfaces(config=config,
14 | data_path=data_path,
15 | split='train',)
16 |
17 |
18 | #####################################
19 | ############### dHCP ##############
20 | #####################################
21 | if config['data']['dataset'] in ['dHCP'] :
22 |
23 | train_loader = torch.utils.data.DataLoader(train_dataset,
24 | batch_size = config['training']['bs'],
25 | shuffle=False,
26 | num_workers=32)
27 |
28 | ###############################################################
29 | #################### VALIDATION DATA ####################
30 | ###############################################################
31 |
32 | val_dataset = dataset_cortical_surfaces(data_path=data_path,
33 | config=config,
34 | split='val',)
35 |
36 | val_loader = torch.utils.data.DataLoader(val_dataset,
37 | batch_size=config['training']['bs_val'],
38 | shuffle=False,
39 | num_workers=32)
40 |
41 |
42 | ###############################################################
43 | ##################### TESTING DATA #####################
44 | ###############################################################
45 |
46 |
47 | test_dataset = dataset_cortical_surfaces(data_path=data_path,
48 | config=config,
49 | split='test',)
50 |
51 | test_loader = torch.utils.data.DataLoader(test_dataset,
52 | batch_size=config['training']['bs_val'],
53 | shuffle=False,
54 | num_workers=32)
55 |
56 | train_dataset.logging()
57 |
58 | print('')
59 | print('#'*30)
60 | print('############ Data ############')
61 | print('#'*30)
62 | print('')
63 |
64 | print('')
65 | print('Training data: {}'.format(len(train_dataset)))
66 | print('Validation data: {}'.format(len(val_dataset)))
67 | print('Testing data: {}'.format(len(test_dataset)))
68 |
69 | return train_loader, val_loader, test_loader
70 |
71 |
72 | def loader_metrics_segmentation(data_path,
73 | labels_path,
74 | config,):
75 |
76 | ###############################################################
77 | ##################### TRAINING DATA #####################
78 | ###############################################################
79 |
80 | train_dataset = dataset_cortical_surfaces_segmentation(config=config,
81 | data_path=data_path,
82 | labels_path=labels_path,
83 | split='train',)
84 |
85 |
86 | train_loader = torch.utils.data.DataLoader(train_dataset,
87 | batch_size = config['training']['bs'],
88 | shuffle = True,
89 | num_workers=32)
90 |
91 | ###############################################################
92 | #################### VALIDATION DATA ####################
93 | ###############################################################
94 |
95 |
96 | val_dataset = dataset_cortical_surfaces_segmentation(data_path=data_path,
97 | config=config,
98 | labels_path=labels_path,
99 | split='val',)
100 |
101 | val_loader = torch.utils.data.DataLoader(val_dataset,
102 | batch_size=config['training']['bs_val'],
103 | shuffle=False,
104 | num_workers=32)
105 |
106 |
107 | ###############################################################
108 | ##################### TESTING DATA #####################
109 | ###############################################################
110 |
111 |
112 | test_dataset = dataset_cortical_surfaces_segmentation(data_path=data_path,
113 | config=config,
114 | labels_path=labels_path,
115 | split='test',)
116 |
117 |
118 | test_loader = torch.utils.data.DataLoader(test_dataset,
119 | batch_size=config['training']['bs_val'],
120 | shuffle=False,
121 | num_workers=32)
122 |
123 | train_dataset.logging()
124 |
125 |
126 | print('')
127 | print('Training data: {}'.format(len(train_dataset)))
128 | print('Validation data: {}'.format(len(val_dataset)))
129 | print('Testing data: {}'.format(len(test_dataset)))
130 |
131 | return train_loader, val_loader, test_loader
132 |
133 |
--------------------------------------------------------------------------------
/tools/metrics.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 | def dice_coeff(pred, target):
4 | """This definition generalize to real valued pred and target vector.
5 | This should be differentiable.
6 | pred: tensor with first dimension as batch
7 | target: tensor with first dimension as batch
8 | https://github.com/pytorch/pytorch/issues/1249
9 | in V net paper: https://campar.in.tum.de/pub/milletari2016Vnet/milletari2016Vnet.pdf they square the sum in the denominator
10 | """
11 | smooth = 1.
12 | epsilon = 10e-8
13 | # have to use contiguous since they may from a torch.view op
14 | iflat = pred.view(-1).contiguous()
15 | tflat = target.view(-1).contiguous()
16 | intersection = (iflat * tflat).sum()
17 | #A_sum = torch.sum(iflat * iflat) #original version from AF
18 | #B_sum = torch.sum(tflat * tflat) #original version from AF
19 | A_sum = torch.sum(iflat)
20 | B_sum = torch.sum(tflat)
21 | dice = (2. * intersection + smooth) / (A_sum + B_sum + smooth)
22 | #dice = dice.mean(dim=0)
23 | #dice = torch.clamp(dice, 0, 1.0)
24 | return dice
25 |
--------------------------------------------------------------------------------
/tools/pretrain.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # @Author: Simon Dahan
3 | #
4 | # Created on Fri Oct 01 2021
5 | #
6 | # by Simon Dahan @SD3004
7 | #
8 | # Copyright (c) 2021 MeTrICS Lab
9 | #
10 |
11 |
12 | '''
13 | This file implements the training procedure to pre-traing a SiT model with Masked Patch Prediction (MPP).
14 | Models can be either trained:
15 | - from scratch
16 | - from pretrained weights (after self-supervision or ImageNet for instance)
17 | Models can be trained for two tasks:
18 | - age at scan prediction
19 | - birth age prediction
20 |
21 | Pretrained ImageNet models are downloaded from the Timm library.
22 | '''
23 |
24 | import os
25 | import argparse
26 | import yaml
27 | import sys
28 | import timm
29 | from datetime import datetime
30 |
31 | sys.path.append('./')
32 | sys.path.append('../')
33 | sys.path.append('../../')
34 |
35 | import torch
36 | import torch.nn as nn
37 | import torch.optim as optim
38 | import numpy as np
39 | import pandas as pd
40 |
41 |
42 | from models.sit import SiT
43 | from models.mpp import masked_patch_pretraining
44 |
45 |
46 | from tools.utils import load_weights_imagenet, get_dataloaders_numpy, get_dataloaders_metrics
47 |
48 | from torch.utils.tensorboard import SummaryWriter
49 |
50 |
51 | def train(config):
52 |
53 | #mesh_resolution
54 | ico_mesh = config['mesh_resolution']['ico_mesh']
55 | ico_grid = config['mesh_resolution']['ico_grid']
56 | num_patches = config['sub_ico_{}'.format(ico_grid)]['num_patches']
57 | num_vertices = config['sub_ico_{}'.format(ico_grid)]['num_vertices']
58 |
59 | #data
60 | dataset = config['data']['dataset']
61 | task = config['data']['task']
62 | loader_type = config['data']['loader']
63 |
64 | if task == 'sex':
65 | classification_task = True
66 | else:
67 | classification_task = False
68 |
69 | #training
70 | gpu = config['training']['gpu']
71 | LR = config['training']['LR']
72 | loss = config['training']['loss']
73 | epochs = config['training']['epochs']
74 | val_epoch = config['training']['val_epoch']
75 | testing = config['training']['testing']
76 | bs = config['training']['bs']
77 | bs_val = config['training']['bs_val']
78 | configuration = config['data']['configuration']
79 | task = config['data']['task']
80 |
81 | folder_to_save_model = config['logging']['folder_to_save_model']
82 |
83 | device = torch.device("cuda:{}".format(gpu) if torch.cuda.is_available() else "cpu")
84 |
85 | print('')
86 | print('#'*30)
87 | print('Config')
88 | print('#'*30)
89 | print('')
90 |
91 | print('gpu: {}'.format(device))
92 | print('dataset: {}'.format(dataset))
93 | print('task: {}'.format(task))
94 | print('model: {}'.format(config['MODEL']))
95 |
96 | print('Mesh resolution - ico {}'.format(ico_mesh))
97 | print('Grid resolution - ico {}'.format(ico_grid))
98 | print('Number of patches - {}'.format(num_patches))
99 | print('Number of vertices - {}'.format(num_vertices))
100 |
101 |
102 | ##############################
103 | ###### DATASET ######
104 | ##############################
105 |
106 | print('')
107 | print('#'*30)
108 | print('Loading data')
109 | print('#'*30)
110 | print('')
111 |
112 | print('LOADING DATA: ICO {} - sub-res ICO {}'.format(ico_mesh,ico_grid))
113 |
114 | if loader_type == 'numpy':
115 | data_path = config['data']['path_to_numpy'].format(ico_grid,task,configuration)
116 | train_loader, val_loader, test_loader = get_dataloaders_numpy(data_path, testing, bs, bs_val)
117 |
118 | elif loader_type == 'metrics':
119 | data_path = config['data']['path_to_metrics'].format(dataset,configuration)
120 | train_loader, val_loader, test_loader = get_dataloaders_metrics(config,data_path)
121 |
122 | ##############################
123 | ###### LOGGING ######
124 | ##############################
125 |
126 | # creating folders for logging.
127 | try:
128 | os.mkdir(folder_to_save_model)
129 | print('Creating folder: {}'.format(folder_to_save_model))
130 | except OSError:
131 | print('folder already exist: {}'.format(folder_to_save_model))
132 |
133 | date = datetime.today().strftime('%Y-%m-%d-%H:%M:%S')
134 |
135 | # folder time
136 | folder_to_save_model = os.path.join(folder_to_save_model,date)
137 | print(folder_to_save_model)
138 | if config['transformer']['dim'] == 192:
139 | folder_to_save_model = folder_to_save_model + '-tiny'
140 | elif config['transformer']['dim'] == 384:
141 | folder_to_save_model = folder_to_save_model + '-small'
142 | elif config['transformer']['dim'] == 768:
143 | folder_to_save_model = folder_to_save_model + '-base'
144 |
145 | if config['training']['load_weights_imagenet']:
146 | folder_to_save_model = folder_to_save_model + '-imgnet'
147 | if config['training']['load_weights_ssl']:
148 | folder_to_save_model = folder_to_save_model + '-ssl'
149 | if config['training']['dataset_ssl']=='hcp':
150 | folder_to_save_model = folder_to_save_model + '-hcp'
151 | elif config['training']['dataset_ssl']=='dhcp-hcp':
152 | folder_to_save_model = folder_to_save_model + '-dhcp-hcp'
153 | elif config['training']['dataset_ssl']=='dhcp':
154 | folder_to_save_model = folder_to_save_model + '-dhcp'
155 | if config['training']['finetuning']:
156 | folder_to_save_model = folder_to_save_model + '-finetune'
157 | else:
158 | folder_to_save_model = folder_to_save_model + '-freeze'
159 |
160 | try:
161 | os.mkdir(folder_to_save_model)
162 | print('Creating folder: {}'.format(folder_to_save_model))
163 | except OSError:
164 | print('folder already exist: {}'.format(folder_to_save_model))
165 |
166 | writer = SummaryWriter(log_dir=folder_to_save_model)
167 |
168 |
169 | ##############################
170 | ####### MODEL #######
171 | ##############################
172 |
173 | print('')
174 | print('#'*30)
175 | print('Init model')
176 | print('#'*30)
177 | print('')
178 |
179 | if config['transformer']['model'] == 'SiT':
180 |
181 | model = SiT(dim=config['transformer']['dim'],
182 | depth=config['transformer']['depth'],
183 | heads=config['transformer']['heads'],
184 | mlp_dim=config['transformer']['mlp_dim'],
185 | pool=config['transformer']['pool'],
186 | num_patches=num_patches,
187 | num_classes=config['transformer']['num_classes'],
188 | num_channels=config['transformer']['num_channels'],
189 | num_vertices=num_vertices,
190 | dim_head=config['transformer']['dim_head'],
191 | dropout=config['transformer']['dropout'],
192 | emb_dropout=config['transformer']['emb_dropout'])
193 |
194 |
195 |
196 | model.to(device)
197 |
198 | print('Number of parameters encoder: {:,}'.format(sum(p.numel() for p in model.parameters() if p.requires_grad)))
199 | print('')
200 |
201 | ##################################################
202 | ####### SELF-SUPERVISION PIPELINE #######
203 | ##################################################
204 |
205 | if config['SSL'] == 'mpp':
206 |
207 | print('Pretrain using Masked Patch Prediction')
208 | ssl = masked_patch_pretraining(transformer=model,
209 | dim_in = config['transformer']['dim'],
210 | dim_out= num_vertices*config['transformer']['num_channels'],
211 | device=device,
212 | mask_prob=config['pretraining_mpp']['mask_prob'],
213 | replace_prob=config['pretraining_mpp']['replace_prob'],
214 | swap_prob=config['pretraining_mpp']['swap_prob'],
215 | num_vertices=num_vertices,
216 | channels=config['transformer']['num_channels'])
217 | else:
218 | raise('not implemented yet')
219 |
220 | ssl.to(device)
221 |
222 | print('Number of parameters pretraining pipeline : {:,}'.format(sum(p.numel() for p in ssl.parameters() if p.requires_grad)))
223 | print('')
224 |
225 | #####################################
226 | ####### OPTIMISATION #######
227 | #####################################
228 |
229 | if config['optimisation']['optimiser']=='Adam':
230 | print('using Adam optimiser')
231 | optimizer = optim.Adam(model.parameters(), lr=LR, weight_decay=config['Adam']['weight_decay'])
232 | elif config['optimisation']['optimiser']=='SGD':
233 | print('using SGD optimiser')
234 | optimizer = optim.SGD(model.parameters(), lr=LR,
235 | weight_decay=config['SGD']['weight_decay'],
236 | momentum=config['SGD']['momentum'],
237 | nesterov=config['SGD']['nesterov'])
238 | elif config['optimisation']['optimiser']=='AdamW':
239 | print('using AdamW optimiser')
240 | optimizer = optim.AdamW(model.parameters(),
241 | lr=LR,
242 | weight_decay=config['AdamW']['weight_decay'])
243 | else:
244 | raise('not implemented yet')
245 |
246 | ###################################
247 | ####### SCHEDULING #######
248 | ###################################
249 |
250 | it_per_epoch = np.ceil(len(train_loader))
251 |
252 | ##################################
253 | ###### PRE-TRAINING ######
254 | ##################################
255 |
256 | print('')
257 | print('#'*30)
258 | print('Starting pre-training')
259 | print('#'*30)
260 | print('')
261 |
262 | best_val_loss = 100000000000
263 | c_early_stop = 0
264 |
265 | for epoch in range(epochs):
266 |
267 | ssl.train()
268 |
269 | running_loss = 0
270 |
271 | for i, data in enumerate(train_loader):
272 |
273 | inputs, _ = data[0].to(device), data[1].to(device)
274 |
275 | optimizer.zero_grad()
276 |
277 | if config['SSL'] == 'mpp':
278 | mpp_loss, _ = ssl(inputs)
279 |
280 | mpp_loss.backward()
281 | optimizer.step()
282 |
283 | running_loss += mpp_loss.item()
284 |
285 | writer.add_scalar('loss/train_it', mpp_loss.item(), epoch*it_per_epoch+1)
286 |
287 | ##############################
288 | ######### LOG IT ###########
289 | ##############################
290 |
291 | if (epoch+1)%5==0:
292 |
293 | print('| Epoch - {} | It - {} | Loss - {:.4f} | LR - {}'.format(epoch+1, epoch*it_per_epoch + i +1, running_loss / (i+1), optimizer.param_groups[0]['lr']))
294 |
295 | loss_pretrain_epoch = running_loss / (i+1)
296 |
297 | writer.add_scalar('loss/train', loss_pretrain_epoch, epoch+1)
298 |
299 |
300 | ##############################
301 | ###### VALIDATION ######
302 | ##############################
303 |
304 | if (epoch+1)%val_epoch==0:
305 |
306 | running_val_loss = 0
307 | ssl.eval()
308 |
309 | with torch.no_grad():
310 |
311 | for i, data in enumerate(val_loader):
312 |
313 | inputs, _ = data[0].to(device), data[1].to(device)
314 |
315 | if config['SSL'] == 'mpp':
316 | mpp_loss, _ = ssl(inputs)
317 |
318 | running_val_loss += mpp_loss.item()
319 |
320 | loss_pretrain_val_epoch = running_val_loss /(i+1)
321 |
322 | writer.add_scalar('loss/val', loss_pretrain_val_epoch, epoch+1)
323 |
324 | print('| Validation | Epoch - {} | Loss - {} | '.format(epoch+1, loss_pretrain_val_epoch))
325 |
326 | if loss_pretrain_val_epoch < best_val_loss:
327 | best_val_loss = loss_pretrain_val_epoch
328 | best_epoch = epoch+1
329 |
330 | config['results'] = {}
331 | config['results']['best_epoch'] = best_epoch
332 | config['results']['best_current_loss'] = loss_pretrain_epoch
333 | config['results']['best_current_loss_validation'] = best_val_loss
334 |
335 | with open(os.path.join(folder_to_save_model,'hparams.yml'), 'w') as yaml_file:
336 | yaml.dump(config, yaml_file)
337 |
338 | print('saving_model')
339 | torch.save({ 'epoch':epoch+1,
340 | 'model_state_dict': model.state_dict(),
341 | 'optimizer_state_dict': optimizer.state_dict(),
342 | 'loss':loss_pretrain_epoch,
343 | },
344 | os.path.join(folder_to_save_model, 'encoder-best.pt'))
345 | torch.save({ 'epoch':epoch+1,
346 | 'model_state_dict': ssl.state_dict(),
347 | 'optimizer_state_dict': optimizer.state_dict(),
348 | 'loss':loss_pretrain_epoch,
349 | },
350 | os.path.join(folder_to_save_model, 'encoder-decoder-best.pt'))
351 |
352 | print('')
353 | print('Final results: best model obtained at epoch {} - loss {}'.format(best_epoch,best_val_loss))
354 |
355 | config['logging']['folder_model_saved'] = folder_to_save_model
356 | config['results']['final_loss'] = loss_pretrain_epoch
357 | config['results']['training_finished'] = True
358 |
359 | with open(os.path.join(folder_to_save_model,'hparams.yml'), 'w') as yaml_file:
360 | yaml.dump(config, yaml_file)
361 |
362 |
363 | #####################################
364 | ###### SAVING FINAL CKPT ######
365 | #####################################
366 |
367 | torch.save({'epoch':epoch+1,
368 | 'model_state_dict': model.state_dict(),
369 | 'optimizer_state_dict': optimizer.state_dict(),
370 | 'loss':loss_pretrain_epoch,
371 | },
372 | os.path.join(folder_to_save_model,'encoder-final.pt'))
373 |
374 | torch.save({'epoch':epoch+1,
375 | 'model_state_dict': ssl.state_dict(),
376 | 'optimizer_state_dict': optimizer.state_dict(),
377 | 'loss':loss_pretrain_epoch,
378 | },
379 | os.path.join(folder_to_save_model,'encoder-decoder-final.pt'))
380 |
381 |
382 | if __name__ == '__main__':
383 |
384 | parser = argparse.ArgumentParser(description='ViT')
385 |
386 | parser.add_argument(
387 | 'config',
388 | type=str,
389 | default='./config/hparams.yml',
390 | help='path where the data is stored')
391 |
392 | args = parser.parse_args()
393 |
394 | with open(args.config) as f:
395 | config = yaml.safe_load(f)
396 |
397 | # Call training
398 | train(config)
399 |
--------------------------------------------------------------------------------
/tools/utils.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # @Author: Your name
3 | # @Date: 1970-01-01 01:00:00
4 | # @Last Modified by: Your name
5 | # @Last Modified time: 2022-04-07 15:51:18
6 | #
7 | # Created on Wed Oct 20 2021
8 | #
9 | # by Simon Dahan @SD3004
10 | #
11 | # Copyright (c) 2021 MeTrICS Lab
12 | #
13 | import os
14 | import torch
15 |
16 | from datetime import datetime
17 | import pandas as pd
18 | import nibabel as nb
19 | import numpy as np
20 |
21 | from tools.dataloader import loader_metrics_segmentation, loader_metrics
22 |
23 | from torch.optim.lr_scheduler import StepLR, ReduceLROnPlateau
24 |
25 | from warmup_scheduler import GradualWarmupScheduler
26 |
27 | def get_dataloaders_numpy(data_path, testing, bs, bs_val):
28 | #loading already processed and patched cortical surfaces.
29 |
30 | train_data = np.load(os.path.join(data_path,'train_data.npy'))
31 | train_label = np.load(os.path.join(data_path,'train_labels.npy'))
32 |
33 | print('training data: {}'.format(train_data.shape))
34 |
35 | val_data = np.load(os.path.join(data_path,'validation_data.npy'))
36 | val_label = np.load(os.path.join(data_path,'validation_labels.npy'))
37 |
38 | print('validation data: {}'.format(val_data.shape))
39 |
40 | train_data_dataset = torch.utils.data.TensorDataset(torch.from_numpy(train_data).float(),
41 | torch.from_numpy(train_label).float())
42 |
43 | train_loader = torch.utils.data.DataLoader(train_data_dataset,
44 | batch_size = bs,
45 | shuffle=True,
46 | num_workers=16)
47 |
48 | val_data_dataset = torch.utils.data.TensorDataset(torch.from_numpy(val_data).float(),
49 | torch.from_numpy(val_label).float())
50 |
51 |
52 | val_loader = torch.utils.data.DataLoader(val_data_dataset,
53 | batch_size = bs_val,
54 | shuffle=False,
55 | num_workers=16)
56 | if testing:
57 | test_data = np.load(os.path.join(data_path,'test_data.npy'))
58 | test_label = np.load(os.path.join(data_path,'test_labels.npy')).reshape(-1)
59 |
60 | print('testing data: {}'.format(test_data.shape))
61 | print('')
62 |
63 | test_data_dataset = torch.utils.data.TensorDataset(torch.from_numpy(test_data).float(),
64 | torch.from_numpy(test_label).float())
65 |
66 | test_loader = torch.utils.data.DataLoader(test_data_dataset,
67 | batch_size = bs_val,
68 | shuffle=False,
69 | num_workers=16)
70 |
71 | return train_loader, val_loader, test_loader
72 |
73 | else:
74 | return train_loader, val_loader, None
75 |
76 |
77 | def logging_ms_sit(config, pretraining=False):
78 |
79 | if pretraining:
80 | folder_to_save_model = config['logging']['folder_to_save_model'].format(config['data']['path_to_workdir'],config['data']['dataset'],config['data']['modality'],'pretraining',config['data']['task'],config['SSL'],config['mesh_resolution']['ico_grid'],config['data']['configuration'])
81 |
82 | else:
83 | if config['data']['task'] =='segmentation':
84 | folder_to_save_model = config['logging']['folder_to_save_model'].format(config['data']['path_to_workdir'],config['data']['dataset'])
85 | else:
86 | if config['data']['dataset']=='dHCP':
87 | folder_to_save_model = config['logging']['folder_to_save_model'].format(config['data']['path_to_workdir'],config['data']['dataset'],config['data']['modality'],config['data']['task'],config['mesh_resolution']['ico_grid'],config['data']['configuration'])
88 | elif config['data']['dataset']=='HCP':
89 | folder_to_save_model = config['logging']['folder_to_save_model'].format(config['data']['path_to_workdir'],config['data']['dataset'],config['data']['modality'],config['data']['task'],config['mesh_resolution']['ico_grid'],config['data']['registration'])
90 | elif config['data']['dataset']=='UKB':
91 | folder_to_save_model = config['logging']['folder_to_save_model'].format(config['data']['path_to_workdir'],config['data']['dataset'],config['data']['modality'],config['data']['task'],config['mesh_resolution']['ico_grid'],config['data']['registration'])
92 |
93 | if config['augmentation']['prob_augmentation']:
94 | folder_to_save_model = os.path.join(folder_to_save_model,'augmentation')
95 | else:
96 | folder_to_save_model = os.path.join(folder_to_save_model,'no_augmentation')
97 |
98 | date = datetime.today().strftime('%Y-%m-%d-%H:%M:%S')
99 |
100 | folder_to_save_model = os.path.join(folder_to_save_model,date)
101 |
102 | if config['transformer']['dim'] == 96:
103 | folder_to_save_model = folder_to_save_model + '-tiny'
104 | elif config['transformer']['dim'] == 48:
105 | folder_to_save_model = folder_to_save_model + '-very-tiny'
106 |
107 |
108 | if config['training']['init_weights']!=False:
109 | folder_to_save_model = folder_to_save_model + '-'+config['training']['init_weights']
110 |
111 | if config['training']['finetuning']:
112 | folder_to_save_model = folder_to_save_model + '-finetune'
113 | else:
114 | folder_to_save_model = folder_to_save_model + '-freeze'
115 |
116 | return folder_to_save_model
117 |
118 |
119 |
120 | def get_data_path_segmentation(config):
121 |
122 | dataloader = config['data']['loader']
123 | dataset = config['data']['dataset']
124 | configuration = config['data']['configuration']
125 |
126 | if str(dataloader) == 'metrics':
127 | if dataset == 'UKB':
128 | data_path = os.path.join(config['data']['path_to_data'],dataset,'metrics/merged_resample/')
129 | labels_path = os.path.join(config['data']['path_to_data'],dataset,'metrics/resample_segmentation_maps')
130 |
131 | elif dataset == 'MindBoggle':
132 | data_path = os.path.join(config['data']['path_to_data'],dataset,'{}/mindboggle_merged_metrics'.format(configuration))
133 | labels_path = os.path.join(config['data']['path_to_data'],dataset,'{}/mindboggle_resample_labels_ico6'.format(configuration))
134 |
135 | else:
136 | raise('not implemented yet')
137 |
138 | return data_path, labels_path
139 |
140 |
141 | def get_dataloaders_segmentation(config,
142 | data_path,
143 | labels_path,):
144 |
145 | dataloader = config['data']['loader']
146 | if str(dataloader)=='metrics':
147 | train_loader, val_loader, test_loader = loader_metrics_segmentation(data_path,labels_path,config)
148 | else:
149 | raise('not implemented yet')
150 |
151 | return train_loader, val_loader, test_loader
152 |
153 |
154 | def get_dimensions(config):
155 |
156 | ico_grid = config['mesh_resolution']['ico_grid']
157 | num_patches = config['sub_ico_{}'.format(ico_grid)]['num_patches']
158 | num_vertices = config['sub_ico_{}'.format(ico_grid)]['num_vertices']
159 |
160 | if config['MODEL'] in ['sit','ms-sit']:
161 | channels = config['transformer']['channels']
162 | elif config['MODEL']== 'spherical-unet':
163 | channels = config['spherical-unet']['channels']
164 | elif config['MODEL']== 'monet':
165 | channels = config['monet']['channels']
166 | num_channels = len(channels)
167 |
168 | if config['MODEL'] in ['sit','ms-sit']:
169 |
170 | T = num_channels
171 | N = num_patches
172 |
173 | V = num_vertices
174 |
175 | use_bottleneck = False
176 | bottleneck_dropout = 0.0
177 |
178 | print('Number of channels {}; Number of patches {}; Number of vertices {}'.format(T, N, V))
179 | print('Using bottleneck {}; Dropout bottleneck {}'.format(use_bottleneck,bottleneck_dropout))
180 | print('')
181 |
182 | return T, N, V, use_bottleneck, bottleneck_dropout
183 |
184 |
185 | def get_scheduler(config, nbr_iteration_per_epoch ,optimizer):
186 |
187 | epochs = config['training']['epochs']
188 |
189 | if config['optimisation']['use_scheduler']:
190 |
191 | print('Using learning rate scheduler')
192 |
193 | if config['optimisation']['scheduler'] == 'StepLR':
194 |
195 | scheduler = StepLR(optimizer=optimizer,
196 | step_size= config['StepLR']['stepsize'],
197 | gamma= config['StepLR']['decay'])
198 |
199 | elif config['optimisation']['scheduler'] == 'CosineDecay':
200 |
201 | scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer,
202 | T_max = config['CosineDecay']['T_max'],
203 | eta_min= config['CosineDecay']['eta_min'],
204 | )
205 |
206 | elif config['optimisation']['scheduler'] == 'ReduceLROnPlateau':
207 | scheduler = ReduceLROnPlateau(optimizer,
208 | mode='max',
209 | factor=0.5,
210 | patience=2,
211 | cooldown=0,
212 | min_lr=0.0000001
213 | )
214 |
215 | if config['optimisation']['warmup']:
216 |
217 | scheduler = GradualWarmupScheduler(optimizer,
218 | multiplier=1,
219 | total_epoch=config['optimisation']['nbr_step_warmup'],
220 | after_scheduler=scheduler)
221 |
222 | else:
223 | # to use warmup without fancy scheduler
224 | if config['optimisation']['warmup']:
225 | scheduler = StepLR(optimizer,
226 | step_size=epochs*nbr_iteration_per_epoch)
227 |
228 | scheduler = GradualWarmupScheduler(optimizer,
229 | multiplier=1,
230 | total_epoch=config['optimisation']['nbr_step_warmup'],
231 | after_scheduler=scheduler)
232 | else:
233 |
234 | return None
235 |
236 | return scheduler
237 |
238 |
239 |
240 | def save_segmentation_results_UKB(config,predictions,folder_to_save, epoch):
241 |
242 | try:
243 | os.makedirs(folder_to_save,exist_ok=False)
244 | print('Creating folder: {}'.format(folder_to_save))
245 | except OSError:
246 | pass
247 |
248 | val_ids = pd.read_csv(os.path.join(config['data']['path_to_workdir'],'labels/UKB/cortical_metrics/segmentation/half/val.csv')).ids
249 | for i, id in enumerate(val_ids):
250 | save_label_UKB(config['data']['path_to_data'],predictions[i],os.path.join(folder_to_save,'{}_{}.label.gii'.format(str(id).split('.')[0],epoch)))
251 |
252 | def save_segmentation_results_UKB_test(config,predictions,folder_to_save, epoch):
253 |
254 | try:
255 | os.makedirs(folder_to_save,exist_ok=False)
256 | print('Creating folder: {}'.format(folder_to_save))
257 | except OSError:
258 | pass
259 |
260 | test_ids = pd.read_csv(os.path.join(config['data']['path_to_workdir'],'labels/UKB/cortical_metrics/segmentation/half/test.csv')).ids
261 | for i, id in enumerate(test_ids):
262 | save_label_UKB(config['data']['path_to_data'],predictions[i],os.path.join(folder_to_save,'{}_{}.label.gii'.format(str(id).split('.')[0],epoch)))
263 |
264 |
265 | def save_segmentation_results_MindBoggle(config,predictions,folder_to_save, epoch):
266 |
267 | try:
268 | os.makedirs(folder_to_save,exist_ok=False)
269 | print('Creating folder: {}'.format(folder_to_save))
270 | except OSError:
271 | pass
272 |
273 | val_ids = pd.read_csv(os.path.join(config['data']['path_to_workdir'],'labels/MindBoggle/cortical_metrics/segmentation/half/val.csv')).ids
274 |
275 | for i, id in enumerate(val_ids):
276 | save_label_MindBoggle(config['data']['path_to_data'],predictions[i],os.path.join(folder_to_save,'{}_{}.label.gii'.format(str(id).split('.')[0],epoch)))
277 |
278 |
279 | def save_segmentation_results_MindBoggle_test(config,predictions,folder_to_save, epoch):
280 |
281 | try:
282 | os.makedirs(folder_to_save,exist_ok=False)
283 | print('Creating folder: {}'.format(folder_to_save))
284 | except OSError:
285 | pass
286 |
287 | test_ids = pd.read_csv(os.path.join(config['data']['path_to_workdir'],'labels/MindBoggle/cortical_metrics/segmentation/half/test.csv')).ids
288 |
289 | for i, id in enumerate(test_ids):
290 | save_label_MindBoggle(config['data']['path_to_data'],predictions[i],os.path.join(folder_to_save,'{}_{}.label.gii'.format(str(id).split('.')[0],epoch)))
291 |
292 | def save_label_UKB(path_to_data, data, filename):
293 | label =nb.load(os.path.join(path_to_data,'UKB/metrics/resample_segmentation_maps/1033131.L.aparc.ico6_fs_LR.label.gii'))
294 | label.darrays[0].data = data
295 | nb.save(label,filename)
296 |
297 | def save_label_MindBoggle(path_to_data,data, filename):
298 | label =nb.load(os.path.join(path_to_data,'MindBoggle/mindboggle_resample_labels_ico6/lh.labels.HLN-12-5.ico6.DKT31.manual.label.gii'))
299 | label.darrays[0].data = data
300 | nb.save(label,filename)
301 |
302 |
303 |
304 | def load_weights_imagenet(state_dict,state_dict_imagenet,nb_layers):
305 |
306 | state_dict['mlp_head.0.weight'] = state_dict_imagenet['norm.weight'].data
307 | state_dict['mlp_head.0.bias'] = state_dict_imagenet['norm.bias'].data
308 |
309 | # transformer blocks
310 | for i in range(nb_layers):
311 | state_dict['transformer.layers.{}.0.norm.weight'.format(i)] = state_dict_imagenet['blocks.{}.norm1.weight'.format(i)].data
312 | state_dict['transformer.layers.{}.0.norm.bias'.format(i)] = state_dict_imagenet['blocks.{}.norm1.bias'.format(i)].data
313 |
314 | state_dict['transformer.layers.{}.1.norm.weight'.format(i)] = state_dict_imagenet['blocks.{}.norm2.weight'.format(i)].data
315 | state_dict['transformer.layers.{}.1.norm.bias'.format(i)] = state_dict_imagenet['blocks.{}.norm2.bias'.format(i)].data
316 |
317 | state_dict['transformer.layers.{}.0.fn.to_qkv.weight'.format(i)] = state_dict_imagenet['blocks.{}.attn.qkv.weight'.format(i)].data
318 |
319 | state_dict['transformer.layers.{}.0.fn.to_out.0.weight'.format(i)] = state_dict_imagenet['blocks.{}.attn.proj.weight'.format(i)].data
320 | state_dict['transformer.layers.{}.0.fn.to_out.0.bias'.format(i)] = state_dict_imagenet['blocks.{}.attn.proj.bias'.format(i)].data
321 |
322 | state_dict['transformer.layers.{}.1.fn.net.0.weight'.format(i)] = state_dict_imagenet['blocks.{}.mlp.fc1.weight'.format(i)].data
323 | state_dict['transformer.layers.{}.1.fn.net.0.bias'.format(i)] = state_dict_imagenet['blocks.{}.mlp.fc1.bias'.format(i)].data
324 |
325 | state_dict['transformer.layers.{}.1.fn.net.3.weight'.format(i)] = state_dict_imagenet['blocks.{}.mlp.fc2.weight'.format(i)].data
326 | state_dict['transformer.layers.{}.1.fn.net.3.bias'.format(i)] = state_dict_imagenet['blocks.{}.mlp.fc2.bias'.format(i)].data
327 |
328 | return state_dict
329 |
330 |
331 | def save_gifti(data, filename):
332 | gifti_file = nb.gifti.gifti.GiftiImage()
333 | gifti_file.add_gifti_data_array(nb.gifti.gifti.GiftiDataArray(data))
334 | nb.save(gifti_file,filename)
335 |
336 |
337 | def get_dataloaders_metrics(config,
338 | data_path):
339 |
340 | dataloader = config['data']['loader']
341 |
342 | if str(dataloader)=='metrics':
343 | train_loader, val_loader, test_loader = loader_metrics(data_path,config)
344 | else:
345 | raise('not implemented yet')
346 |
347 | return train_loader, val_loader, test_loader
348 |
349 | def get_data_path(config):
350 |
351 | dataloader = config['data']['dataloader']
352 | dataset = config['data']['dataset']
353 | configuration = config['data']['configuration']
354 | modality = config['data']['modality']
355 | sampling = config['mesh_resolution']['sampling']
356 | registration = config['data']['registration']
357 |
358 | if str(dataloader) in ['metrics','numpy']:
359 | if dataset == 'dHCP':
360 | data_path = os.path.join(config['data']['path_to_metrics'],dataset,config['data']['folder_to_dhcp'].format(configuration))
361 | else:
362 | raise('not implemented yet')
363 |
364 | return data_path
--------------------------------------------------------------------------------
/utils/sphere.320.L.surf.gii:
--------------------------------------------------------------------------------
1 |
2 |
6 |
7 |
8 |
9 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
64 |
65 |
66 |
67 | 1 0 0 0
68 | 0 1 0 0
69 | 0 0 1 0
70 | 0 0 0 1
71 |
72 |
73 | eJyVV89vVlUQ7c4EYjfCAg2Jn4kYxQUmykIWcmcDgZBI+Q9MMWXBzg2JpKiRCosKSMGFBGgJiw8KhtrPUmvwHjCpaUoTE3cSw4+ViunOjQnOzDvzvtHEhV/y5b28d+99c+aec2buUKeLAf3d2zUkvGKo0xX7/+u5P+N7pGf5/h9r2H16j/QOeWxea/T1aQy/N16eevimjOzp4squNXLr2ka5snwRT57skOmtf5bZvz7D9y+MyrOr3XJu8AP8+NpZObB+S/n97ryvNX1vUOZ6d/z+nYMnysDAot/bL57vP3iixvir9wbxBdcZWb+l2vp3dP3nVrs1fbceYDzfXdvoce5v4sQzmxewffxUmVz7qPw618P2i09L7+H58v79LtZO7paF0U3l8R+f48KDI/Li4V9mFydu49VPT5frOzaUCz/N48z8oAztGe7F+Fujm6qNeUXHzOzYUGPN+Yfna3zr8tpHVeOFxls0dtF4ofFabkTjheVJYxeNFxqvaOyi8VbLq8buOG2eYTacNs8wX1ecV3XewMBOx2nzDLPhtHmG+aOxs1jpPC43z4zI28cPYXmlIze+/ViOHtuGuc5OeX7ipCx1Hld7r2OlM3ESnxzbJvbuyuq70L9dZWmlA5u39/ghm499etVnNg46R8ZszpkR2BrL+j3irQlvTXhrwouE1+MmL6B7LuRFVS7IQPtbFPKixHPlhdj48w+OOE90D2XN5G7Y/hy+3xXdE9zUPfltridvjZ+ql3RPdH9iTLExp+cHfa7us+h+1hu6nz9M3I65xea+3Dwv9pzrFFuHOvD4qQOoJiT4aHipA88DdVDPcV8TNyRxQxI3JHGjBDeK8mtKsazTGIry7mtiJJer8lTI5TcsJ8blL4mLXN5qeMll04Fs1jFfEaOt+Q2xk8uOl/GCnCyhQ+VkUU6CnJTQIeN1HVrcw+T3SMNZ0DdaPRh+zRfoG5YjmG+Qyw3PGi5DeS3kMpTXQi5DeV3GGi7DeE4uO6fJZRiHyWXnN7ns2iCXq/K6jd/itljpd0h4PVb6ncdKvwO1C/pVq4eEt1K7oF+1eki8QuIVEq+QeIXEK1jcH2oe7hoGxUNtO2ZqGKZV6tbx0yOQcoWUQ88jvQN7+3nz/aCPIPEZic9IfEbiMxKfI1eSciUpV5JyJSlXnqPQRNojSXskaY8k7ZHvjcUf3DI+BbeO0gsN+xJ91HJlPKFPth5puWJunVt2DW4x/w236LXGLXokkrcjeTuStyN5e+gISUdIOkLSEZKOEDqyPSc3Cj3cfT/20+Im9z031JnXj+CFYaZWnBfUn2OltjwH1KLXGPo6kq8j+TqSryP5ut9PNZ4N8zytx153w6vinn7sdZdeCPMw+pzX77nGz7xO0yO9ltfG//z+53MvbY0+IHJOXiFpvybt16T9GrmeZMwaT03xIH0X9Gb7FgKX+nRNMeNWPzbMEK/VKHqzr3m5j71G7WR/FXW0RO1kfxV1tB1v/ZXViaijVhejjnoNYB21GhB1NI2xHqmtoxpj1Ms8t6TnJa0TdaGkuiCpLkiqC5LqgveTtg77OuvBhH2d17+ohYYraqHFwL6uWsxRC2O8YbEx7OtKrKl9Xfsty/c0+47I73/0KTX1KW1fE/1C4pUkXknilSReCXkVcRivvD6TV4UYjFezxGy8KsRsvOoRs+9rjJ9puCfkVY01L7NXsm+l3ux/azj1wEg9MFIPjNQDI/XASP0kUj+J1E8i9ZNI/aT5RZ2iNgz/9aa38fvZphdyzVwKf1HMzKlrj/kyv8AcNWz5WkiarNSz5Vq9o2d6pq8jnVOQzilI5xSkc4rHvcJaat65TE82/406bD4b781bkw8j+bPPCy/e1/dk9/uY49+g14d/UEeOmToKzDW8ijqKnNbwKurIx1NH7lXUka9JHfm3TEcr/TNIxOX1hDELe4y2X2FOvF9J5xTHH/1K1CXLFfPg9S36x6Wm/pZ0tirpbFXS2UrS2Uqi5yCnhPXI9cV6VOP+Ul87JelUkgaFnBLWo9Cv1L42hZxy/2JfJ6zdEr1I9B6s3SX6DOZJWLvb/1i/txHmSVi727WYJ+97eNYo6xqNSGgkzs3kv9B3wLOGay3OzcaRODfb+MiXjeFZw/UV52bjyN91jU6F
74 |
75 |
85 |
86 |
87 |
88 |
89 |
90 |
91 | eJw12QV0lmUbwPGXEZMetaC7u2GwjRidY8RggPSGgK3Y2F2fIB/YXSgICKKCgSB2B2UXYWH3t+vbD8+5zv933nvPfcP7eHhfeBKJRKJKydQw4Xpaletxfa3G9bmBVucG3FBTuCGnajLXcEZNE26k9bkRN9YG3JibaENuwmmayjXtVcuEm2pjbsrNtAk343RN41quqW3CzbUZN+cMTefa1uqYcF3N4Dp6TKL0vxbuTxXupC25E3fWVtyZu2hr7sJdtQ135bZalqs4o6oJd9PO3I27axfuzj20K/fgdtqWq9qrmgn31O7ck3tpD+7F7bUdV3NNdRPurb24N3fQ9lzdWooJd9QOnKLJ7lcf96cFD9K+PIgHayYP5lztx7k8RPvzEM7SMtzCGS1NeKgO5qE8THN5GA/XITycszWLW9qrlQmP0GE8gkfqcB7JOZrNrVzT2oRH6UgexQM0h1tba2PCA3UAt9Gy3rPRfj9ZPFnH8GSeomN5ChfoOC7gqTqep3KeJnGWM7JNeJpO4WlcqAVcyNN1Kk/nCZrH2fbKMeEZWsgzeKZO55mcrxM4xzUDTPhYncnH8kTN5wHWBprwJJ3IA/Xo/Zrl/ozm43Q2H8eLdA4v4sU6lxfzEp3HS3ihlufRzhhjwsfrIj6eT9DFfAKfqEv4RC7WhTzGXmNN+CQ9gU/ik/VEPpmLtJjHumacCZ+iJ/MpvECLeJy18SY8XxfweE3ynp1aMmea8EV6Gl/EF+vpfDFfokv5Er5Uz+BL+TytyGc64ywTvkwv5sv4cr2EL+cr9FK+gpfpeXyWvc424Sv1cr6Sr9Ir+Co+X5fx2a45x4Sv1qv4ar5Az+dzrJ1rwhfqBXyuVjLXlMxKE75Fr+Vb+Fa9jm/l2/R6vo1v1//w7XyzVuaVzrjRhO/QW/kOvlNv4zv5Lr2d7+Kb9Ga+0V4rTPhuvZPv5nv0Lr6HV+tNvMI1y034Xr2H7+VVupqXW7vBhP+rq/gGrVAy5UrmoZJZZ8KP6oP8KK/XB3g9b9D7eQNv1Pt4I/fXMrzOGWtN+DFdz4/xJt3Am3izbuTN3E/781p7PWLCj+smfpy36Gbewpnajx9xzcMm/IRu4Se4r2byw9bWmHAf7ctrNOF+Pen+PMTP6FP8DD+rW/lZfk638XO8XZ/m7TxPy/NDznjQhJ/XZ/l53qHP8Q7eqdt5J8/VefygvR4w4Rd0B7/Au3Qn7+I5OpcfcM39Jvyi7uIXebbO4fut3WfCs3Q236dl3K+X3J8n+XV9mV/nN/QVfoPf1Ff5TX5LX+O3+AytyE864ykTflvf4Lf5HX2T3+F39S1+l5fqGfyUvbaa8Hv6Dr/H7+u7/D6frkt5q2u2mfAH+j5/wKfp6bzN2tMmfKqexk9reROf9wtN+EM9iz/kj/Rs/og/1nP4Y/5Ez+VPeL9W4oXOKDbhT/Uj/pQ/04/5M/5cP+HPeZ/u52J7FZnwF/oZf8Ff6uf8Je/VfVzkmgUm/JV+yV/xHt3LC6zNN+Hduofna5L3bKXfz37+Rm/kb/hbXcHf8ne6nL/j7/UG/p4PawXe74x9JvyDfss/8BH9jo/wj/o9/8iH9DDvs9deE/5Jj/BP/LP+yD/zQT3Ee12zx4R/0Z/5Fz6gB3mPtd0m/LUe4N2a5D2L7x6/mnCZMqVdxTHhJF3NMeGyehPHhMvpzRwTTmhl/tU5vyVKJ1zeuUlcnitoWa7AyVqOk/lfZyc49o+9fk+UTvgY+1fgY7iiJnNF/idR2tjvH/vENX8kSidcyT4VuRL/nShtXPe3n4+1PxOlE/5LY/0vr0eTvWfVS9Zqm3BdrcZ1uZ5W5XpcX6twfW6glbkBZ2h8fmU4N86oZcINtR435EZanxtxY23AjTldM7iWvWqacBNtxE24qTbmppym6VzTNTVMuJk25Wacqmlcw1qKCdfRVE7R+PfeeM9e877FhNvoqxwTbquvcEy4nb7MMeH2+hLHhJtrOc5wRroJd9C23IE7ajvuyJ20PXfiFtqc0+2VZsKdtSN35i7aibtwS23Baa5JNeGu2oW7cittyanW6phwa23FdTTuV8z/X0uUTrib9VbcjbtrS+7OPbQF9+Ce2px78jr3Ke7XOufGGUefR4R7Obc79+Le2oN7cx/tyX04/r7V05lr7R97HX2eEu5r/97clzO1D2dy/H2rj70fsU9cc/R5ULiffTK5H8fftzLt8bCfj7Wjz7PCazTW13g9mvDnYfx5lDDh/lqL+3OW1uQsztYanM05msI5HM9yUvz/UtdnWJwRnwcx4QFey+IBPFCzeSAP0hwexPEMKceZGfaPvf7x2RMebP+BPJhzdRDncjy7GmTvdPvENX/7TAoPsU8uD+F4ZpZrjzQ/H2t/+YwKp2qsp3o91edXfN+Iz/uRZUonPEp/45jwaP2dY8Jj9A+OCY/VPzkm3NFryTzSGSNMeJyO5nE8XsfweM7TsZzHHZwd54RH2Gu4CU/Q8TyB8zWP8zmeXeXZOzzcNcNMeKLm80SOZ2b59ggPszbUhONZ3UQ/Gx7qtbLuV7xf8X33ME9y70bwJJ6sw3kyT9FhPIULdCgX8CS/hjh/knPjjPh+fIinOncyT+VpOoWncaEWcCHHM4kCZ060f+wV36cP8nT7T+PpPEMLeQbHs5BCe+fbJ66J798HeKZ9ZvBMjmcwM+wxwc/HWnxf/5rzNNbzvB5N8udh/NtrfB+MCR+r13NMeJZexzHh2Xotx4Tn6DUcE77Qa5W4ujOqmfBcncVzeZ7O5nk8X+fwfL7A2XFOuJq9qprwAp3HC7hI53MRn58obewXruqaKiZcrEVczMsSpY3rwlWsVTbheJZQ7GfDlb0W3w3/B9EygdY=
92 |
93 |
94 |
--------------------------------------------------------------------------------
/utils/week-40_hemi-left_space-dhcpSym_dens-40k_desc-medialwallsymm_mask.shape.gii:
--------------------------------------------------------------------------------
1 |
2 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
17 |
18 |
19 |
20 |
33 |
34 |
35 |
36 | 0' /data/Data/dHCP_1/dhcp_3rd_release_atlas/week-40_hemi-left_space-dhcpSym_dens-40k_desc-medialwallsymm_mask.shape.gii -var x /data/Data/dHCP_1/dhcp_3rd_release_atlas/week-40_hemi-left_space-dhcpSym_dens-40k_desc-medialwallsymm_mask.shape.gii]]>
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
55 |
56 |
57 |
58 |
59 |
60 |
61 | eJzt3AtunDAUBdDsrFvr0qtUGhXRZ/thwDbkXOlolBkw/oOSUb6+fv/6+pvv1xG2KX02qh6j2rxvW639Z+vZusboNrfGHJ5gP4ejOV1LVN4V9TlS71XXYGufONKvEGmt3dr7tWNac9NcBUZp7TP2IYB3iZ4zW8kck7mfjMiZfoleV5Zt875/op+PlNfqw975eGVmjw3r2s+RaO6U5lJ2fo3K7L78tHX7CgCwkugZZf8sFT1jlZ69ss9iV2WVPutpV1TWlX0RfXbknCvrku3TnnMBAAAAnqb2e5sZdRh9bQAAAAAAyH4XsfT9whnpacPduXI8zo6XjMud63KV/WD/WVTPWf30Zvot1z/RHJxVj+ja2fUQHROtwdJ5mTL3Zc/st1Hz4slkbGaPNwAw3+eZYPuaeVa48jmk93mldkytvO1r5jpX97esk9nrb0Vv75fe9u3nzdv7CQAAAAAAAAAAZou+q1P7Dlj0c893x85mX/ezZZ2t38jxkusye/3NXvtv6ofS2LbauHI/rJhs3T7HPK19Z7JtV88cy5S/fc3Wp1WH2rWy+Snz/8m5c4xG74uZeozsBwAAAAAAAAAAAAAAAAAAAAC4W/Z/Zxz9nyS1ckf/PxPOzY/SGGXH3TizquyeBQAAAAAAAAAAAAAAAAAAAAAAAAAAwFyzUqtLq26fz1Zu31tydx9m5kFrXO8cd3PoWZm9n87Yu4/sgUf2zivrKCIiss3se+gTbftNP/b121Ncub6in69el59zS2WcacOIzB5vgJLtPmW/Aq7Seu7aHxudH5Xj+Wpd+/tJbfwz41ka42j+ZHPk+Lv7qTfZsjL1uKMNK/db9rj9sT31yZQBTxbN68xcr607+IlEROSdmX1/AQAAAAAAAIA3qP1NvvRe6bwV/s7/1kRtLI1T6bwZ/XT3WLfm8Crz60gZpWN76pOdD8whIiISZfb9CXgn+w3c57OeZq6vUdc883wT9ZP9CAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAnuc70XvR+9Ex22Oj7N/P1ikq52i7RuTOcZjRnhmJ2php81P76Uw9j87/1dbLXZm9j/Ku+SQi8tbMvj8A8x3dL+wn8L/Mmui9H/ee1ypzVD/0ZPZ4Qo08N9kxzI7zqvPkc/0j9eldC9aPyHXZr5Fo7WTX5fbYzHXfdP8s7VGtNtfK274eqUfPsbWxvLIfR4xr6fOzbbmzX2bnzDiMWss/kYiIrJnZ94cn2vadfvzXD63PVu+nq+s3Yt21rlG77tFkzp09hgAAAAAAAAAAAAAA8PP8Ac0mDRs=
62 |
63 |
64 |
--------------------------------------------------------------------------------