├── .github └── workflows │ └── update_notifier.yml ├── .gitignore ├── LICENSE.txt ├── README.md ├── ibc_data ├── 3mm_ref.nii.gz ├── README.md ├── all_contrasts.tsv ├── contrasts │ ├── 1_condition_renaming.csv │ ├── Abstraction.csv │ ├── ArchiEmotional.csv │ ├── ArchiSocial.csv │ ├── ArchiSpatial.csv │ ├── ArchiStandard.csv │ ├── Attention.csv │ ├── Audi.csv │ ├── Audio.csv │ ├── Bang.csv │ ├── BiologicalMotion1.csv │ ├── BiologicalMotion2.csv │ ├── BreathHolding.csv │ ├── Catell_oddball.csv │ ├── Checkerboard.csv │ ├── Color.csv │ ├── ColumbiaCards.csv │ ├── Discount.csv │ ├── DotPatterns.csv │ ├── EmoMem.csv │ ├── EmoReco.csv │ ├── Emotion.csv │ ├── EmotionalPain.csv │ ├── Enumeration.csv │ ├── FaceBody.csv │ ├── FacesAomic.csv │ ├── FingerTap.csv │ ├── FingerTapping.csv │ ├── HarririAomic.csv │ ├── HcpEmotion.csv │ ├── HcpGambling.csv │ ├── HcpLanguage.csv │ ├── HcpMotor.csv │ ├── HcpRelational.csv │ ├── HcpSocial.csv │ ├── HcpWm.csv │ ├── ItemRecognition.csv │ ├── Lec1.csv │ ├── Lec2.csv │ ├── LocalizerAbstraction.csv │ ├── MCSE.csv │ ├── MDTB.csv │ ├── MTTNS.csv │ ├── MTTWE.csv │ ├── MVEB.csv │ ├── MVIS.csv │ ├── MathLanguage.csv │ ├── Motion.csv │ ├── Moto.csv │ ├── MultiModal.csv │ ├── NARPS.csv │ ├── OptimismBias.csv │ ├── PainMovie.csv │ ├── PreferenceFaces.csv │ ├── PreferenceFood.csv │ ├── PreferenceHouses.csv │ ├── PreferencePaintings.csv │ ├── RSVPLanguage.csv │ ├── RewProc.csv │ ├── Ring.csv │ ├── Scene.csv │ ├── SelectiveStopSignal.csv │ ├── Self.csv │ ├── SpatialNavigation.csv │ ├── StopNogo.csv │ ├── StopSignal.csv │ ├── Stroop.csv │ ├── StroopAomic.csv │ ├── TheoryOfMind.csv │ ├── TwoByTwo.csv │ ├── VSTM.csv │ ├── VSTMC.csv │ ├── Visu.csv │ ├── VisualSearch.csv │ ├── WardAndAllport.csv │ ├── Wedge.csv │ └── WorkingMemoryAomic.csv ├── descriptions.json ├── gm_mask_1_5mm.nii.gz ├── gm_mask_3mm.nii.gz ├── ibc_conditions.tsv ├── ibc_tasks.tsv ├── main_contrasts.tsv └── sessions.csv ├── ibc_public ├── __init__.py ├── connectivity │ ├── utils_fc_classification.py │ ├── utils_fc_estimation.py │ ├── utils_plot.py │ ├── utils_sc_estimation.py │ └── utils_similarity.py ├── utils_annotations.py ├── utils_contrasts.py ├── utils_data.py ├── utils_descriptions.py ├── utils_labels.py ├── utils_paradigm.py ├── utils_pipeline.py ├── utils_relaxo.py └── utils_retino.py ├── papers_scripts ├── F10002020 │ └── MVPA │ │ ├── decoding_inter.py │ │ └── utils_tonotopy.py ├── README.md ├── gradients │ ├── gm_mask_2mm.nii.gz │ ├── script_hcp.py │ ├── script_ibc.py │ └── utils.py ├── hbm2021 │ ├── README.md │ ├── adapted_lang.py │ ├── archi_contrasts.csv │ ├── bids_postprocessed.json │ ├── comparison_hcp.py │ ├── conjunction_vs_rfx.py │ ├── contrast_reliability.py │ ├── contrast_reliability_hcp.py │ ├── dictionary_labels.py │ ├── dictionary_learning.py │ ├── dictionary_stability.py │ ├── grey_mask_img.py │ ├── hcp_contrasts.csv │ ├── hcplang_rois.py │ ├── intra_inter_reliability.py │ ├── predictive_model.py │ ├── rois_hcplang900_z16 │ │ ├── left_FG.nii.gz │ │ ├── left_FG.png │ │ ├── left_FP.nii.gz │ │ ├── left_FP.png │ │ ├── left_IFG.nii.gz │ │ ├── left_IFG.png │ │ ├── left_TPJ.nii.gz │ │ ├── left_TPJ.png │ │ ├── left_aSTS_TP.nii.gz │ │ ├── left_aSTS_TP.png │ │ ├── left_pSTS.nii.gz │ │ ├── left_pSTS.png │ │ ├── vmPFC.nii.gz │ │ └── vmPFC.png │ ├── rois_lang.py │ ├── rois_pallier │ │ ├── IFGorb.nii.gz │ │ ├── IFGtri.nii.gz │ │ ├── Precentral_Pallier_2011.nii.gz │ │ ├── Putamen.nii.gz │ │ ├── TP.nii.gz │ │ ├── TPJ.nii.gz │ │ ├── aSTS.nii.gz │ │ ├── dmPFC_Pallier_2011.nii.gz │ │ └── pSTS.nii.gz │ ├── utils_dictionary.py │ └── utils_surface_plots.py ├── neuroimage2021 │ ├── README.md │ ├── gm_mask_2mm.nii.gz │ ├── script_hcp.py │ ├── script_ibc.py │ └── utils.py ├── scidata2018 │ ├── README.md │ ├── brain_coverage.py │ ├── cognitive_atlas.csv │ ├── data_quality.py │ ├── global_stat.py │ ├── more_snapshots.py │ └── snapshots.py ├── scidata2020 │ ├── README.md │ ├── behavioral_data │ │ ├── README.md │ │ ├── behav_utils.py │ │ ├── success_rate_enumeration.py │ │ ├── success_rate_mtt.py │ │ ├── success_rate_self.py │ │ ├── success_rate_tom.py │ │ └── success_rate_vstm.py │ └── neuroimaging_data │ │ ├── bids_postprocessed.json │ │ ├── bids_preprocessed.json │ │ ├── brain_coverage2.py │ │ ├── data_quality2.py │ │ └── global_stat2.py └── scidata2023 │ ├── fastsrm_encoding.py │ ├── fastsrm_preprocess.py │ ├── fastsrm_surface_secondlevel.py │ ├── ibc_fastsrm_utils.py │ ├── script_retino.py │ ├── script_retinotopic_maps.py │ ├── surfimg_visualization.py │ └── volimg_visualization.py ├── scripts ├── anatomical_mapping.py ├── b02b0.cnf ├── b0_acquisition_params_AP.txt ├── cluster_bundles.py ├── connectivity │ ├── estimate_fc_calculate_similarity.py │ ├── estimate_fc_classify_fc.py │ ├── estimate_sc.py │ ├── estimate_sc_README.md │ ├── plotting │ │ ├── plot_all_accuracy_table.py │ │ ├── plot_classifier_coefficients.py │ │ ├── plot_connectomes.py │ │ ├── plot_fcfc_similarity.py │ │ ├── plot_fcsc_similarity.py │ │ ├── plot_fcsc_similarity_network_wise.py │ │ ├── plot_generalize_connectomes.py │ │ ├── plot_generalize_distributions.py │ │ ├── plot_methods_fmri_surf.py │ │ ├── plot_methods_rbg_regions.py │ │ ├── plot_multi_task_classification_accuracy.py │ │ ├── plot_reliability.py │ │ └── plot_within_binary_task_classification_accuracy.py │ └── supplementary │ │ ├── _basic_fc_estimation_pipeline.py │ │ ├── _estimate_fc_classify_fc_HCP.py │ │ ├── compile_sc_in_dataframe.py │ │ ├── estimate_fc_external_gbu.py │ │ ├── estimate_fc_ibc_sync_external.py │ │ ├── generalize_baseline.py │ │ ├── generalize_external_to_ibc.py │ │ ├── generalize_ibc_to_external.py │ │ ├── reliability_movie_v_rest.py │ │ └── umap_ibc_external_gbu.py ├── dmri_preprocessing.py ├── dmri_preprocessing_tractography.py ├── expert.opts ├── glm_only.py ├── ini_files │ ├── IBC_preproc_BBT1.ini │ ├── IBC_preproc_BBT2.ini │ ├── IBC_preproc_BBT3.ini │ ├── IBC_preproc_abstraction.ini │ ├── IBC_preproc_anat1.ini │ ├── IBC_preproc_aomic.ini │ ├── IBC_preproc_archi.ini │ ├── IBC_preproc_audio1.ini │ ├── IBC_preproc_audio1_sub-08.ini │ ├── IBC_preproc_audio2.ini │ ├── IBC_preproc_biological_motion.ini │ ├── IBC_preproc_camcan1.ini │ ├── IBC_preproc_camcan2.ini │ ├── IBC_preproc_clips1.ini │ ├── IBC_preproc_clips2.ini │ ├── IBC_preproc_clips3.ini │ ├── IBC_preproc_clips4.ini │ ├── IBC_preproc_color.ini │ ├── IBC_preproc_dwi.ini │ ├── IBC_preproc_enumeration.ini │ ├── IBC_preproc_fbirn.ini │ ├── IBC_preproc_hcp1.ini │ ├── IBC_preproc_hcp2.ini │ ├── IBC_preproc_leuven.ini │ ├── IBC_preproc_lpp1.ini │ ├── IBC_preproc_lpp2.ini │ ├── IBC_preproc_lyon1.ini │ ├── IBC_preproc_lyon2.ini │ ├── IBC_preproc_mario1.ini │ ├── IBC_preproc_mario2.ini │ ├── IBC_preproc_mathlang.ini │ ├── IBC_preproc_mdtb.ini │ ├── IBC_preproc_monkey_kingdom.ini │ ├── IBC_preproc_mtt1.ini │ ├── IBC_preproc_mtt2.ini │ ├── IBC_preproc_navigation.ini │ ├── IBC_preproc_optimism.ini │ ├── IBC_preproc_preference.ini │ ├── IBC_preproc_preference_sub-11.ini │ ├── IBC_preproc_raiders1.ini │ ├── IBC_preproc_raiders2.ini │ ├── IBC_preproc_retino.ini │ ├── IBC_preproc_reward.ini │ ├── IBC_preproc_rs.ini │ ├── IBC_preproc_rsvp-language.ini │ ├── IBC_preproc_scene.ini │ ├── IBC_preproc_screening.ini │ ├── IBC_preproc_search.ini │ ├── IBC_preproc_self.ini │ ├── IBC_preproc_stanford1.ini │ ├── IBC_preproc_stanford2.ini │ ├── IBC_preproc_stanford3.ini │ ├── IBC_preproc_stanford3_sub-15.ini │ ├── IBC_preproc_tom.ini │ └── IBC_preproc_tom_sub-08.ini ├── make_t1_template.py ├── pipeline.py ├── qmri_README.md ├── qmri_T2star_echo-times.json ├── qmri_run_estimation.py ├── qmri_t1_map_b1.py ├── qmri_t1_map_b1_params.py ├── qmri_t2_map.py ├── script_preferences.py ├── script_resample_normalized_data.py ├── script_retino.py ├── script_skull_stripping.py ├── sin_cos_regressors.csv ├── surface_based_analysis.py ├── surface_glm_only.py └── tract_plot.py └── setup.py /.github/workflows/update_notifier.yml: -------------------------------------------------------------------------------- 1 | name: Update notifier 2 | on: 3 | push: 4 | branches: [master] 5 | paths: [ibc_data/ibc_tasks.tsv, ibc_data/ibc_conditions.tsv, ibc_data/all_contrasts.tsv] 6 | 7 | jobs: 8 | dispatch: 9 | name: Notify docs 10 | env: 11 | PACKAGES_TO_UPGRADE: ${{ needs.run_publish.outputs.packages }} 12 | runs-on: ubuntu-latest 13 | strategy: 14 | matrix: 15 | repo: ['individual-brain-charting/docs'] 16 | timeout-minutes: 5 17 | steps: 18 | - name: Trigger docs build 19 | run: | 20 | curl -L \ 21 | -X POST \ 22 | -H "Accept: application/vnd.github+json" \ 23 | -H "Authorization: token ${{ secrets.UPDATE_NOTIFIER_TOKEN }}"\ 24 | -H "X-GitHub-Api-Version: 2022-11-28" \ 25 | https://api.github.com/repos/${{ matrix.repo }}/dispatches \ 26 | -d '{"event_type":"descriptions_updated","client_payload":{"unit":false,"integration":true}}' 27 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | ibc_public.egg-info/ 2 | ibc_public/__pycache__/ 3 | *.pyc 4 | ibc_data/all_contrasts_sparse.tsv 5 | processing/pyscript.m 6 | scripts/cache_dir 7 | scripts/pyscript*.m 8 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | License for IBC Public Analysis Code 2 | ===================================== 3 | 4 | Simplified BSD License 5 | 6 | Copyright (c) 2013 - 2023 The IBC developers. 7 | All rights reserved. 8 | 9 | 10 | Redistribution and use in source and binary forms, with or without 11 | modification, are permitted provided that the following conditions are met: 12 | 13 | a. Redistributions of source code must retain the above copyright notice, 14 | this list of conditions and the following disclaimer. 15 | b. Redistributions in binary form must reproduce the above copyright 16 | notice, this list of conditions and the following disclaimer in the 17 | documentation and/or other materials provided with the distribution. 18 | c. Neither the name of the IBC developers nor the names of 19 | its contributors may be used to endorse or promote products 20 | derived from this software without specific prior written 21 | permission. 22 | 23 | 24 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 25 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 | ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR 28 | ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 30 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 31 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 | LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 | OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 34 | DAMAGE. 35 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Public analysis code for the IBC project 2 | 3 | This Python package gives the pipeline used to process the MRI data obtained in 4 | the Individual Brain Charting Project. More info on the data can be found at 5 | [IBC public protocols](http://github.com/hbp-brain-charting/public_protocols) 6 | and 7 | [IBC webpage](http://project.inria.fr/IBC/) 8 | . 9 | 10 | Latest collection of raw data is available on 11 | [OpenNeuro, data accession no.002685](https://openneuro.org/datasets/ds002685/versions/1.3.1). 12 | 13 | Latest collection of unthresholded statistical maps can be found on 14 | [NeuroVault, id collection=6618](https://identifiers.org/neurovault.collection:6618). 15 | 16 | ## Install 17 | Under the main working directory of this repository in your computer, run the following command in a command prompt: 18 | 19 | ``` 20 | pip install -e . 21 | ``` 22 | 23 | ## Example usage 24 | 25 | One can import the entire package with `import ibc_public` or use specific parts of the package: 26 | 27 | ```python 28 | from ibc_public import utils_data 29 | utils_data.make_surf_db(derivatives="/path/to/ibc/derivatives", mesh="fsaverage5") 30 | ``` 31 | 32 | ## Details 33 | 34 | These script make it possible to preprocess the data 35 | * run topup distortion correction 36 | * run motion correction 37 | * run coregistration of the fMRI scans to the individual T1 image 38 | * run spatial normalization of the data 39 | * run a general linear model to obtain brain activity maps for the main contrasts of the experiment. 40 | 41 | ## Core scripts 42 | 43 | The core scripts are in the `scripts` folder 44 | 45 | - `pipeline.py` lunches the full analysis on fMRI data (pre-processing + GLM) 46 | - `glm_only.py` launches GLM analyses on the data 47 | - `surface_based_analyses` launches surface extraction and registration with Freesurfer; it also projects fMRI data to the surface 48 | - `surface_glm_analysis.py` runs glm analyses on the surface 49 | - `dmri_preprocessing` (WIP) is for diffusion daat. It relies on dipy. 50 | - `anatomical mapping` (WIP) yields T1w, T2w and MWF surrogates from anatomical acquisitions. 51 | - `script_retino.py` yields some post-processing for retinotopic acquisitions (derivation of retinotopic representations from fMRI maps) 52 | 53 | ## Dependencies 54 | 55 | Dependencies are : 56 | * FSL (topup) 57 | * SPM12 for preprocessing 58 | * Freesurfer for surface-based analysis 59 | * Nipype to call SPM12 functions 60 | * Pypreprocess to generate preprocessing reports 61 | * Nilearn for various functions 62 | * Nistats to run general Linear models. 63 | 64 | The scripts have been used with the following versions of software and environment: 65 | 66 | * Python 3.5 67 | * Ubuntu 16.04 68 | * Nipype v0.14.0 69 | * Pypreprocess v0.0.1.dev 70 | * FSL v5.0.9 71 | * SPM12 rev 7219 72 | * Nilearn v0.4.0 73 | * Nistats v0.0.1.a 74 | 75 | ## Future work 76 | 77 | - More high-level analyses scripts 78 | - Scripts for additional datasets not yet available 79 | - scripts for surface-based analysis 80 | 81 | ## Contributions 82 | 83 | Please feel free to report any issue and propose improvements on Github. 84 | 85 | ## Authors 86 | 87 | Licensed under simplified BSD. 88 | 89 | - Bertrand Thirion, 2017 - present 90 | - Ana Luísa Pinho, 2017 - present 91 | - Juan Jesús Torre, 2018 - 2020 92 | - Swetha Shankar, 2019 - present 93 | - Alexis Thual, 2020 - present 94 | -------------------------------------------------------------------------------- /ibc_data/3mm_ref.nii.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/individual-brain-charting/public_analysis_code/d0bbddb8e44580da4d4eb26fa8e86053a997a51c/ibc_data/3mm_ref.nii.gz -------------------------------------------------------------------------------- /ibc_data/README.md: -------------------------------------------------------------------------------- 1 | ## Notes about organization of the TSV files 2 | 3 | * __main_contrasts.tsv__ contains the description of the main contrasts estimated from the conditions. It is organized as follows: 4 | 5 | * column named as *task* - id of the task 6 | * column named as *contrast* - id of the contrast 7 | 8 | * __all_contrasts.tsv__ contains the description of all meaningful contrasts estimated from the conditions. It is organized as follows: 9 | 10 | * column named as *task* - id of the task 11 | * column named as *contrast* - id of the contrast 12 | * column named as *positive label* - label for the main regressor of the contrast 13 | * column named as *negative label* - label for the reversed regressor of the contrast 14 | * column named as *description* - description of the contrast 15 | * column named as *tags* - list of cognitive components describing functional activity of the contrast 16 | 17 | * __ibc_conditions.tsv__ contains the list of all independent (or elementary) conditions for each task. They are formed by the elementary conditions *vs.* baseline. It is organized as follows: 18 | 19 | * column named as *task* - id of the task 20 | * column named as *condition* - id referring to the elementary condition 21 | * column named as *description* - short explanation of the elementary condition 22 | 23 | ## Main *versus* All contrasts 24 | __main_contrasts.tsv__ contains only the contrasts isolating effects-of-interest. Most of the IBC tasks refer to categorical designs. Therefore, "main contrasts" are defined in terms of one of the following options: (1) "active condition *vs.* control condition"; (2) "control condition *vs.* baseline"; and, sometimes, (3) "active condition *vs.* baseline". For the few tasks that follow a parametric design, a "main contrast" can also be considered as "the parametric effect of the constant effect in the active condition *vs.* baseline". Importantly, main contrasts within tasks are linearly independent between them and, consequently, this also stands true across tasks. 25 | 26 | __all_contrasts.tsv__ contains all possible contrasts that can be extracted from the task paradigm. 27 | 28 | Note: Reverse contrasts are not listed in the 'main_contrasts.tsv' and, in most of the cases, contrasts formed by elementary conditions. Yet, if a main contrast is composed by an active condition and a control condition, we also include the contrast formed by the control condition *vs.* baseline in the 'main_contrasts.tsv'. 29 | -------------------------------------------------------------------------------- /ibc_data/contrasts/1_condition_renaming.csv: -------------------------------------------------------------------------------- 1 | task,before,after 2 | ArchiSpatial,saccade,saccades 3 | ArchiSocial,triangle_intention,triangle_mental 4 | ArchiSocial,speech,speech_sound 5 | ArchiSocial,non_speech,non_speech_sound 6 | RSVLanguage,consonant_strings,consonant_string 7 | MTTWE,we_all_reference,we_average_reference 8 | MTTSN,sn_all_reference,sn_average_reference 9 | PainMovie,pain,movie_pain 10 | PainMovie,mental,movie_mental 11 | Color,y,response 12 | MVEB,response,letter_occurrence_response 13 | MVIS,response,dot_displacement_response 14 | MCSE,hi_salience_left,high_salience_left 15 | MCSE,hi_salience_right,high_salience_right 16 | Attention,spatialcue,spatial_cue 17 | Attention,doublecue,double_cue 18 | Catell / oddball,easy_oddball,easy 19 | Catell / oddball,hard_oddball,hard -------------------------------------------------------------------------------- /ibc_data/contrasts/Abstraction.csv: -------------------------------------------------------------------------------- 1 | condition,humanbody-other,animals-other,faces-other,flora-other,objects-other,places-other,geometry-other,edge-other,photo-other,humanbody_geometry-humanbody_other,humanbody_edge-humanbody_other,humanbody_photo-humanbody_other,animals_geometry-animals_other,animals_edge-animals_other,animals_photo-animals_other,faces_geometry-faces_other,faces_edge-faces_other,faces_photo-faces_other,flora_geometry-flora_other,flora_edge-flora_other,flora_photo-flora_other,objects_geometry-objects_other,objects_edge-objects_other,objects_photo-objects_other,places_geometry-places_other,places_edge-places_other,places_photo-places_other,response 2 | humanbody_geometry,5,-1,-1,-1,-1,-1,2,-1,-1,2,-1,-1,,,,,,,,,,,,,,,, 3 | humanbody_edge,5,-1,-1,-1,-1,-1,-1,2,-1,-1,2,-1,,,,,,,,,,,,,,,, 4 | humanbody_photo,5,-1,-1,-1,-1,-1,-1,-1,2,-1,-1,2,,,,,,,,,,,,,,,, 5 | animals_geometry,-1,5,-1,-1,-1,-1,2,-1,-1,,,,2,-1,-1,,,,,,,,,,,,, 6 | animals_edge,-1,5,-1,-1,-1,-1,-1,2,-1,,,,-1,2,-1,,,,,,,,,,,,, 7 | animals_photo,-1,5,-1,-1,-1,-1,-1,-1,2,,,,-1,-1,2,,,,,,,,,,,,, 8 | faces_geometry,-1,-1,5,-1,-1,-1,2,-1,-1,,,,,,,2,-1,-1,,,,,,,,,, 9 | faces_edge,-1,-1,5,-1,-1,-1,-1,2,-1,,,,,,,-1,2,-1,,,,,,,,,, 10 | faces_photo,-1,-1,5,-1,-1,-1,-1,-1,2,,,,,,,-1,-1,2,,,,,,,,,, 11 | flora_geometry,-1,-1,-1,5,-1,-1,2,-1,-1,,,,,,,,,,2,-1,-1,,,,,,, 12 | flora_edge,-1,-1,-1,5,-1,-1,-1,2,-1,,,,,,,,,,-1,2,-1,,,,,,, 13 | flora_photo,-1,-1,-1,5,-1,-1,-1,-1,2,,,,,,,,,,-1,-1,2,,,,,,, 14 | objects_geometry,-1,-1,-1,-1,5,-1,2,-1,-1,,,,,,,,,,,,,2,-1,-1,,,, 15 | objects_edge,-1,-1,-1,-1,5,-1,-1,2,-1,,,,,,,,,,,,,-1,2,-1,,,, 16 | objects_photo,-1,-1,-1,-1,5,-1,-1,-1,2,,,,,,,,,,,,,-1,-1,2,,,, 17 | places_geometry,-1,-1,-1,-1,-1,5,2,-1,-1,,,,,,,,,,,,,,,,2,-1,-1, 18 | places_edge,-1,-1,-1,-1,-1,5,-1,2,-1,,,,,,,,,,,,,,,,-1,2,-1, 19 | places_photo,-1,-1,-1,-1,-1,5,-1,-1,2,,,,,,,,,,,,,,,,-1,-1,2, 20 | response,,,,,,,,,,,,,,,,,,,,,,,,,,,,1 -------------------------------------------------------------------------------- /ibc_data/contrasts/ArchiEmotional.csv: -------------------------------------------------------------------------------- 1 | condition,face_trusty-control,face_gender-control,face_trusty-gender,expression_gender-control,expression_intention-control,expression_intention-gender,trusty_and_intention-control,trusty_and_intention-gender 2 | face_gender,,1,-1,,,,,-1 3 | face_control,-1,-1,,,,,-1, 4 | face_trusty,1,,1,,,,1,1 5 | expression_intention,,,,,1,1,1,1 6 | expression_gender,,,,1,,-1,,-1 7 | expression_control,,,,-1,-1,,-1, -------------------------------------------------------------------------------- /ibc_data/contrasts/ArchiSocial.csv: -------------------------------------------------------------------------------- 1 | condition,triangle_mental-random,false_belief-mechanistic_audio,false_belief-mechanistic_video,false_belief-mechanistic,speech-non_speech 2 | mechanistic_audio,,-1,,-1, 3 | mechanistic_video,,,-1,-1, 4 | triangle_mental,1,,,, 5 | triangle_random,-1,,,, 6 | false_belief_audio,,1,,1, 7 | false_belief_video,,,1,1, 8 | speech_sound,,,,,1 9 | non_speech_sound,,,,,-1 -------------------------------------------------------------------------------- /ibc_data/contrasts/ArchiSpatial.csv: -------------------------------------------------------------------------------- 1 | condition,hand-side,grasp-orientation 2 | saccade,, 3 | rotation_hand,1, 4 | rotation_side,-1, 5 | object_grasp,,1 6 | object_orientation,,-1 -------------------------------------------------------------------------------- /ibc_data/contrasts/ArchiStandard.csv: -------------------------------------------------------------------------------- 1 | condition,left-right_button_press,right-left_button_press,listening-reading,reading-listening,motor-cognitive,cognitive-motor,reading-checkerboard,horizontal-vertical,vertical-horizontal,computation-sentences,sentences-computation 2 | audio_left_button_press,1,-1,1,-1,1,-1,,,,, 3 | audio_right_button_press,-1,1,1,-1,1,-1,,,,, 4 | video_left_button_press,1,-1,-1,1,1,-1,,,,, 5 | video_right_button_press,-1,1,-1,1,1,-1,,,,, 6 | horizontal_checkerboard,,,,,,,-1,1,-1,, 7 | vertical_checkerboard,,,,,,,,-1,1,, 8 | audio_sentence,,,,,-1,1,,,,-1,1 9 | video_sentence,,,,,-1,1,1,,,-1,1 10 | audio_computation,,,,,-1,1,,,,1,-1 11 | video_computation,,,,,-1,1,,,,1,-1 -------------------------------------------------------------------------------- /ibc_data/contrasts/Attention.csv: -------------------------------------------------------------------------------- 1 | condition,spatial_cue-double_cue,incongruent-congruent,spatial_incongruent-spatial_congruent,double_incongruent-double_congruent 2 | double_congruent,,-1,,-1 3 | double_incongruent,,1,,1 4 | double_cue,-1,,, 5 | spatial_congruent,,-1,-1, 6 | spatial_incongruent,,1,1, 7 | spatial_cue,1,,, -------------------------------------------------------------------------------- /ibc_data/contrasts/Audi.csv: -------------------------------------------------------------------------------- 1 | condition,tear-silence,suomi-silence,yawn-silence,human-silence,music-silence,reverse-silence,speech-silence,alphabet-silence,cough-silence,environment-silence,laugh-silence,animals-silence 2 | tear,1,,,,,,,,,,, 3 | suomi,,1,,,,,,,,,, 4 | yawn,,,1,,,,,,,,, 5 | human,,,,1,,,,,,,, 6 | music,,,,,1,,,,,,, 7 | reverse,,,,,,1,,,,,, 8 | speech,,,,,,,1,,,,, 9 | alphabet,,,,,,,,1,,,, 10 | cough,,,,,,,,,1,,, 11 | environment,,,,,,,,,,1,, 12 | laugh,,,,,,,,,,,1, 13 | animals,,,,,,,,,,,,1 14 | silence,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1 -------------------------------------------------------------------------------- /ibc_data/contrasts/Audio.csv: -------------------------------------------------------------------------------- 1 | condition,animal-others,music-others,nature-others,speech-others,tool-others,voice-others,mean-silence,animal-silence,music-silence,nature-silence,speech-silence,tool-silence,voice-silence 2 | animal,5,-1,-1,-1,-1,-1,1,1,,,,, 3 | music,-1,5,-1,-1,-1,-1,1,,1,,,, 4 | nature,-1,-1,5,-1,-1,-1,1,,,1,,, 5 | speech,-1,-1,-1,5,-1,-1,1,,,,1,, 6 | tool,-1,-1,-1,-1,5,-1,1,,,,,1, 7 | voice,-1,-1,-1,-1,-1,5,1,,,,,,1 8 | silence,,,,,,,-6,-1,-1,-1,-1,-1,-1 -------------------------------------------------------------------------------- /ibc_data/contrasts/Bang.csv: -------------------------------------------------------------------------------- 1 | condition,talk-no_talk 2 | talk,1 3 | no_talk,-1 -------------------------------------------------------------------------------- /ibc_data/contrasts/BiologicalMotion1.csv: -------------------------------------------------------------------------------- 1 | condition,global_upright - natural_upright,global_upright - global_inverted,natural_upright - natural_inverted,global-natural,natural-global,inverted-upright 2 | global_upright,1,1,,1,-1,-1 3 | global_inverted,,-1,,1,-1,1 4 | natural_upright,-1,,1,-1,1,-1 5 | natural_inverted,,,-1,-1,1,1 -------------------------------------------------------------------------------- /ibc_data/contrasts/BiologicalMotion2.csv: -------------------------------------------------------------------------------- 1 | condition,natural_upright - modified_upright,modified_upright - modified_inverted,natural_upright - natural_inverted,modified-natural,natural-modified,inverted-upright 2 | modified_upright,-1,1,,1,-1,-1 3 | modified_inverted,,-1,,1,-1,1 4 | natural_upright,1,,1,-1,1,-1 5 | natural_inverted,,,-1,-1,1,1 -------------------------------------------------------------------------------- /ibc_data/contrasts/BreathHolding.csv: -------------------------------------------------------------------------------- 1 | condition,hold-breathe,breathe-hold 2 | breathe,-1,1 3 | get_ready,, 4 | hold_breath,1,-1 -------------------------------------------------------------------------------- /ibc_data/contrasts/Catell_oddball.csv: -------------------------------------------------------------------------------- 1 | condition,hard-easy 2 | easy_oddball,-1 3 | hard_oddball,1 -------------------------------------------------------------------------------- /ibc_data/contrasts/Checkerboard.csv: -------------------------------------------------------------------------------- 1 | condition,checkerboard-fixation 2 | checkerboard,1 3 | fixation,-1 -------------------------------------------------------------------------------- /ibc_data/contrasts/Color.csv: -------------------------------------------------------------------------------- 1 | condition,chromatic-achromatic 2 | chromatic,1 3 | achromatic,-1 4 | response, -------------------------------------------------------------------------------- /ibc_data/contrasts/ColumbiaCards.csv: -------------------------------------------------------------------------------- 1 | condition,num_loss_cards,loss,gain 2 | num_loss_cards,1,, 3 | loss,,1, 4 | gain,,,1 5 | -------------------------------------------------------------------------------- /ibc_data/contrasts/Discount.csv: -------------------------------------------------------------------------------- 1 | condition,delay,amount 2 | delay,1, 3 | amount,,1 -------------------------------------------------------------------------------- /ibc_data/contrasts/DotPatterns.csv: -------------------------------------------------------------------------------- 1 | condition,correct_cue_incorrect_probe-correct_cue_correct_probe,incorrect_cue_incorrect_probe-incorrect_cue_correct_probe,correct_cue_incorrect_probe-incorrect_cue_correct_probe,incorrect_cue_incorrect_probe-correct_cue_incorrect_probe,correct_cue-incorrect_cue,incorrect_probe-correct_probe 2 | cue,,,,,, 3 | correct_cue_correct_probe,-1,,,,1,-1 4 | correct_cue_incorrect_probe,1,,1,-1,1,1 5 | incorrect_cue_correct_probe,,-1,-1,,-1,-1 6 | incorrect_cue_incorrect_probe,,1,,1,-1,1 -------------------------------------------------------------------------------- /ibc_data/contrasts/EmoMem.csv: -------------------------------------------------------------------------------- 1 | condition,positive-neutral_image,negative-neutral_image 2 | neutral_image,-1,-1 3 | negative_image,,1 4 | positive_image,1, 5 | object,, -------------------------------------------------------------------------------- /ibc_data/contrasts/EmoReco.csv: -------------------------------------------------------------------------------- 1 | condition,neutral,angry,angry-neutral,neutral-angry,male-female,female-male 2 | neutral_female,1,,-1,1,-1,1 3 | neutral_male,1,,-1,1,1,-1 4 | angry_female,,1,1,-1,-1,1 5 | angry_male,,1,1,-1,1,-1 -------------------------------------------------------------------------------- /ibc_data/contrasts/Emotion.csv: -------------------------------------------------------------------------------- 1 | condition,neutral_image,negative_image,valence_scale,negative-neutral 2 | neutral_image,1,,,-1 3 | negative_image,,1,,1 4 | valence_scale,,,1, 5 | -------------------------------------------------------------------------------- /ibc_data/contrasts/EmotionalPain.csv: -------------------------------------------------------------------------------- 1 | condition,emotional-physical_pain 2 | physical_pain,-1 3 | emotional_pain,1 -------------------------------------------------------------------------------- /ibc_data/contrasts/Enumeration.csv: -------------------------------------------------------------------------------- 1 | condition 2 | enumeration_linear 3 | enumeration_constant 4 | enumeration_quadratic -------------------------------------------------------------------------------- /ibc_data/contrasts/FaceBody.csv: -------------------------------------------------------------------------------- 1 | condition,bodies-others,characters-others,faces-others,objects-others,places-others 2 | bodies_body,4,-1,-1,-1,-1 3 | bodies_limb,4,-1,-1,-1,-1 4 | characters_number,-1,4,-1,-1,-1 5 | characters_word,-1,4,-1,-1,-1 6 | faces_adult,-1,-1,4,-1,-1 7 | faces_child,-1,-1,4,-1,-1 8 | objects_car,-1,-1,-1,4,-1 9 | objects_instrument,-1,-1,-1,4,-1 10 | places_corridor,-1,-1,-1,-1,4 11 | places_house,-1,-1,-1,-1,4 -------------------------------------------------------------------------------- /ibc_data/contrasts/FacesAomic.csv: -------------------------------------------------------------------------------- 1 | condition,all-neutral,anger-neutral,contempt-neutral,joy-neutral,pride-neutral,male-female,female-male,mediterranean-european,european-mediterranean 2 | anger,"0,25",1,,,,,,, 3 | contempt,"0,25",,1,,,,,, 4 | joy,"0,25",,,1,,,,, 5 | pride,"0,25",,,,1,,,, 6 | neutral,-1,-1,-1,-1,-1,,,, 7 | male,,,,,,1,-1,, 8 | female,,,,,,-1,1,, 9 | mediterranean,,,,,,,,1,-1 10 | european,,,,,,,,-1,1 -------------------------------------------------------------------------------- /ibc_data/contrasts/FingerTap.csv: -------------------------------------------------------------------------------- 1 | condition,fingertap-rest 2 | fingertap,1 3 | rest,-1 -------------------------------------------------------------------------------- /ibc_data/contrasts/FingerTapping.csv: -------------------------------------------------------------------------------- 1 | condition,chosen-specified,specified-null,chosen-null 2 | specified,-1,1, 3 | chosen,1,,1 4 | null,,-1,-1 -------------------------------------------------------------------------------- /ibc_data/contrasts/HarririAomic.csv: -------------------------------------------------------------------------------- 1 | condition,emotion-shape 2 | shape,-1 3 | emotion,1 4 | index_response, 5 | middle_response, -------------------------------------------------------------------------------- /ibc_data/contrasts/HcpEmotion.csv: -------------------------------------------------------------------------------- 1 | condition,face-shape,shape-face 2 | shape,-1,1 3 | face,1,-1 -------------------------------------------------------------------------------- /ibc_data/contrasts/HcpGambling.csv: -------------------------------------------------------------------------------- 1 | condition,punishment-reward,reward-punishment 2 | punishment,1,-1 3 | reward,-1,1 -------------------------------------------------------------------------------- /ibc_data/contrasts/HcpLanguage.csv: -------------------------------------------------------------------------------- 1 | condition,math-story,story-math 2 | story,-1,1 3 | math,1,-1 -------------------------------------------------------------------------------- /ibc_data/contrasts/HcpMotor.csv: -------------------------------------------------------------------------------- 1 | condition,left_hand-avg,right_hand-avg,left_foot-avg,right_foot-avg,tongue-avg 2 | left_hand,0.8,-0.2,-0.2,-0.2,-0.2 3 | right_hand,-0.2,0.8,-0.2,-0.2,-0.2 4 | left_foot,-0.2,-0.2,0.8,-0.2,-0.2 5 | right_foot,-0.2,-0.2,-0.2,0.8,-0.2 6 | tongue,-0.2,-0.2,-0.2,-0.2,0.8 7 | cue,,,,, -------------------------------------------------------------------------------- /ibc_data/contrasts/HcpRelational.csv: -------------------------------------------------------------------------------- 1 | condition,relational-match 2 | relational,1 3 | match,-1 -------------------------------------------------------------------------------- /ibc_data/contrasts/HcpSocial.csv: -------------------------------------------------------------------------------- 1 | condition,mental-random 2 | mental,1 3 | random,-1 -------------------------------------------------------------------------------- /ibc_data/contrasts/HcpWm.csv: -------------------------------------------------------------------------------- 1 | condition,body-avg,face-avg,place-avg,tools-avg 2 | 0back_body,"0,375","-0,125","-0,125","-0,125" 3 | 2back_body,"0,375","-0,125","-0,125","-0,125" 4 | 0back_face,"-0,125","0,375","-0,125","-0,125" 5 | 2back_face,"-0,125","0,375","-0,125","-0,125" 6 | 0back_place,"-0,125","-0,125","0,375","-0,125" 7 | 2back_place,"-0,125","-0,125","0,375","-0,125" 8 | 0back_tools,"-0,125","-0,125","-0,125","0,375" 9 | 2back_tools,"-0,125","-0,125","-0,125","0,375" -------------------------------------------------------------------------------- /ibc_data/contrasts/ItemRecognition.csv: -------------------------------------------------------------------------------- 1 | condition,encode5-encode1,probe5_mem-probe1_mem,probe5_new-probe1_new,prob-arrow,encode,arrow_left-arrow_right 2 | load1_instr,,,,,, 3 | encode1,-1,,,,1, 4 | probe1_mem,,-1,,1,, 5 | probe1_new,,,-1,1,, 6 | load3_instr,,,,,, 7 | encode3,,,,,1, 8 | probe3_mem,,,,1,, 9 | probe3_new,,,,1,, 10 | load5_instr,,,,,, 11 | encode5,1,,,,1, 12 | probe5_mem,,1,,1,, 13 | probe5_new,,,1,1,, 14 | arrow_right,,,,-3,,-1 15 | arrow_left,,,,-3,,1 16 | -------------------------------------------------------------------------------- /ibc_data/contrasts/Lec1.csv: -------------------------------------------------------------------------------- 1 | condition,word-pseudoword,word-random_string,pseudoword-random_string 2 | random_string,,-1,-1 3 | word,1,1, 4 | pseudoword,-1,,1 -------------------------------------------------------------------------------- /ibc_data/contrasts/Lec2.csv: -------------------------------------------------------------------------------- 1 | condition,attend-unattend 2 | unattend,-1 3 | attend,1 -------------------------------------------------------------------------------- /ibc_data/contrasts/LocalizerAbstraction.csv: -------------------------------------------------------------------------------- 1 | condition,localizer_faces-other,localizer_humanbody-other,localizer_words-other,localizer_nonsensewords-other,localizer_numbers-other,localizer_places-other,localizer_objects-other,localizer_checkerboards-other,response 2 | localizer_faces,7,-1,-1,-1,-1,-1,-1,-1, 3 | localizer_humanbody,-1,7,-1,-1,-1,-1,-1,-1, 4 | localizer_words,-1,-1,7,-1,-1,-1,-1,-1, 5 | localizer_nonsensewords,-1,-1,-1,7,-1,-1,-1,-1, 6 | localizer_numbers,-1,-1,-1,-1,7,-1,-1,-1, 7 | localizer_places,-1,-1,-1,-1,-1,7,-1,-1, 8 | localizer_objects,-1,-1,-1,-1,-1,-1,7,-1, 9 | localizer_checkerboards,-1,-1,-1,-1,-1,-1,-1,7, 10 | response,,,,,,,,,1 -------------------------------------------------------------------------------- /ibc_data/contrasts/MCSE.csv: -------------------------------------------------------------------------------- 1 | condition,high-low_salience,low-high_salience,salience_left-right,salience_right-left,low+high_salience 2 | high_salience_left,1,-1,1,-1,1 3 | high_salience_right,1,-1,-1,1,1 4 | low_salience_left,-1,1,1,-1,1 5 | low_salience_right,-1,1,-1,1,1 -------------------------------------------------------------------------------- /ibc_data/contrasts/MDTB.csv: -------------------------------------------------------------------------------- 1 | condition,action_action,action_control,finger_simple,finger_complex,semantic_hard,semantic_easy,2back_easy,2back_hard,tom_photo,tom_belief,search_easy,search_hard,flexion_extension,action_action-control,finger_complex-simple,semantic_hard-easy,2back_hard-easy,tom_belief-photo,search_hard-easy 2 | search_easy,,,,,,,,,,,1,,,,,,,,-1 3 | search_hard,,,,,,,,,,,,1,,,,,,,1 4 | action_action,1,,,,,,,,,,,,,1,,,,, 5 | action_control,,1,,,,,,,,,,,,-1,,,,, 6 | flexion_extension,,,,,,,,,,,,,1,,,,,, 7 | finger_complex,,,,1,,,,,,,,,,,1,,,, 8 | finger_simple,,,1,,,,,,,,,,,,-1,,,, 9 | tom_belief,,,,,,,,,,1,,,,,,,,1, 10 | tom_photo,,,,,,,,,1,,,,,,,,,-1, 11 | 2back_easy,,,,,,,1,,,,,,,,,,-1,, 12 | 2back_hard,,,,,,,,1,,,,,,,,,1,, 13 | semantic easy,,,,,,1,,,,,,,,,,-1,,, 14 | semantic hard,,,,,1,,,,,,,,,,,1,,, -------------------------------------------------------------------------------- /ibc_data/contrasts/MTTNS.csv: -------------------------------------------------------------------------------- 1 | condition,sn_southside_event,sn_northside_event,sn_before_event,sn_after_event,sn_all_space-time_cue,sn_all_time-space_cue,sn_space_event,sn_time_event,sn_average_event,sn_space-time_event,sn_time-space_event,northside-southside_event,southside-northside_event,sn_before-after_event,sn_after-before_event 2 | sn_average_reference,,,,,,,,,,,,,,, 3 | sn_all_space_cue,,,,,1,-1,,,,,,,,, 4 | sn_all_time_cue,,,,,-1,1,,,,,,,,, 5 | sn_southside_close_event,1,,,,,,1,,1,1,-1,-1,1,, 6 | sn_southside_far_event,1,,,,,,1,,1,1,-1,-1,1,, 7 | sn_northside_close_event,,1,,,,,1,,1,1,-1,1,-1,, 8 | sn_northside_far_event,,1,,,,,1,,1,1,-1,1,-1,, 9 | sn_before_close_event,,,1,,,,,1,1,-1,1,,,1,-1 10 | sn_before_far_event,,,1,,,,,1,1,-1,1,,,1,-1 11 | sn_after_close_event,,,,1,,,,1,1,-1,1,,,-1,1 12 | sn_after_far_event,,,,1,,,,1,1,-1,1,,,-1,1 13 | sn_all_event_response,,,,,,,,,,,,,,, -------------------------------------------------------------------------------- /ibc_data/contrasts/MTTWE.csv: -------------------------------------------------------------------------------- 1 | condition,we_westside_event,we_eastside_event,we_before_event,we_after_event,we_space_event,we_time_event,we_average_event,we_all_space-time_cue,we_all_time-space_cue,we_space-time_event,we_time-space_event,westside-eastside_event,eastside-westside_event,we_before-after_event,we_after-before_event 2 | we_average_reference,,,,,,,,,,,,,,, 3 | we_all_space_cue,,,,,,,,1,-1,,,,,, 4 | we_all_time_cue,,,,,,,,-1,1,,,,,, 5 | we_westside_close_event,1,,,,1,,1,,,1,-1,1,-1,, 6 | we_westside_far_event,1,,,,1,,1,,,1,-1,1,-1,, 7 | we_eastside_close_event,,1,,,1,,1,,,1,-1,-1,1,, 8 | we_eastside_far_event,,1,,,1,,1,,,1,-1,-1,1,, 9 | we_before_close_event,,,1,,,1,1,,,-1,1,,,1,-1 10 | we_before_far_event,,,1,,,1,1,,,-1,1,,,1,-1 11 | we_after_close_event,,,,1,,1,1,,,-1,1,,,-1,1 12 | we_after_far_event,,,,1,,1,1,,,-1,1,,,-1,1 13 | we_all_event_response,,,,,,,,,,,,,,, -------------------------------------------------------------------------------- /ibc_data/contrasts/MVEB.csv: -------------------------------------------------------------------------------- 1 | condition,2_letters_different-same,4_letters_different-same,6_letters_different-same,6_letters_different-2_letters_different 2 | letter_occurrence_response,,,, 3 | 2_letters_different,1,,,-1 4 | 4_letters_different,,1,, 5 | 6_letters_different,,,1,1 6 | 2_letters_same,-1,,, 7 | 4_letters_same,,-1,, 8 | 6_letters_same,,,-1, -------------------------------------------------------------------------------- /ibc_data/contrasts/MVIS.csv: -------------------------------------------------------------------------------- 1 | condition,2_dots-2_dots_control,4_dots-4_dots_control,6_dots-6_dots_control,6_dots-2_dots,dots-control 2 | dot_displacement_response,,,,, 3 | 2_dots,1,,,-1,1 4 | 2_dots_control,-1,,,,-1 5 | 4_dots,,1,,,1 6 | 4_dots_control,,-1,,,-1 7 | 6_dots,,,1,1,1 8 | 6_dots_control,,,-1,,-1 -------------------------------------------------------------------------------- /ibc_data/contrasts/MathLanguage.csv: -------------------------------------------------------------------------------- 1 | condition,colorlessg-wordlist,general-colorlessg,math-nonmath,nonmath-math,geometry-othermath,arithmetic_principle-othermath,arithmetic_fact-othermath,theory_of_mind-general,context-general,theory_of_mind-context,context-theory_of_mind,theory_of_mind_and_context-general 2 | arithmetic_fact_auditory,,,1,-1,"-0,5","-0,5",1,,,,, 3 | arithmetic_fact_visual,,,1,-1,"-0,5","-0,5",1,,,,, 4 | arithmetic_principle_auditory,,,1,-1,"-0,5",1,"-0,5",,,,, 5 | arithmetic_principle_visual,,,1,-1,"-0,5",1,"-0,5",,,,, 6 | colorlessg_auditory,1,-1,,,,,,,,,, 7 | colorlessg_visual,1,-1,,,,,,,,,, 8 | context_auditory,,,-1,1,,,,,1,-1,1,"0,5" 9 | context_visual,,,-1,1,,,,,1,-1,1,"0,5" 10 | general_auditory,,1,-1,1,,,,-1,-1,,,-1 11 | general_visual,,1,-1,1,,,,-1,-1,,,-1 12 | geometry_fact_auditory,,,1,-1,1,"-0,5","-0,5",,,,, 13 | geometry_fact_visual,,,1,-1,1,"-0,5","-0,5",,,,, 14 | theory_of_mind_auditory,,,-1,1,,,,1,,1,-1,"0,5" 15 | theory_of_mind_visual,,,-1,1,,,,1,,1,-1,"0,5" 16 | wordlist_auditory,-1,,,,,,,,,,, 17 | wordlist_visual,-1,,,,,,,,,,, -------------------------------------------------------------------------------- /ibc_data/contrasts/Motion.csv: -------------------------------------------------------------------------------- 1 | condition,clock,coherent-incoherent,coherent-stationary,incoherent-stationary,clock-anti,left-right 2 | incoherent,,-3,,1,, 3 | left_coherent,,1,1,,, 4 | right_coherent,,1,1,,, 5 | both_coherent,,1,1,,, 6 | left_stationary,,,-1,,, 7 | right_stationary,,,-1,-1,, 8 | both_stationary,,,-1,,, 9 | left_coherent_clock,1,,,,1, 10 | right_coherent_clock,1,,,,, 11 | both_coherent_clock,1,,,,, 12 | anti,,,,,-1, 13 | left,,,,,,1 14 | right,,,,,,-1 -------------------------------------------------------------------------------- /ibc_data/contrasts/Moto.csv: -------------------------------------------------------------------------------- 1 | condition,finger_left-fixation,finger_right-fixation,foot_left-fixation,foot_right-fixation,hand_left-fixation,hand_right-fixation,saccade-fixation,tongue-fixation 2 | finger_left,1,,,,,,, 3 | finger_right,,1,,,,,, 4 | foot_left,,,1,,,,, 5 | foot_right,,,,1,,,, 6 | hand_left,,,,,1,,, 7 | hand_right,,,,,,1,, 8 | saccade_left,,,,,,,"0,5", 9 | saccade_right,,,,,,,"0,5", 10 | tongue_left,,,,,,,,"0,5" 11 | tongue_right,,,,,,,,"0,5" 12 | instructions,,,,,,,, 13 | fixation_left,"-0,5","-0,5","-0,5","-0,5","-0,5","-0,5","-0,5","-0,5" 14 | fixation_right,"-0,5","-0,5","-0,5","-0,5","-0,5","-0,5","-0,5","-0,5" -------------------------------------------------------------------------------- /ibc_data/contrasts/MultiModal.csv: -------------------------------------------------------------------------------- 1 | condition,audio,audio-control,visual,visual-control,tactile,tactile-control,audio-visual,visual-audio,tactile-visual,visual-tactile,tactile-audio,audio-tactile,face-other,body-other,body-non_face,animate-inanimate,monkey_speech-other,speech-other,speech+voice-other,,,,, 2 | audio_monkey,1,1,,,,,5,-5,,,-1,1,,,,,6,-1,-2,,,,, 3 | audio_animal,1,1,,,,,5,-5,,,-1,1,,,,,-1,-1,-2,,,,, 4 | audio_nature,1,1,,,,,5,-5,,,-1,1,,,,,-1,-1,-2,,,,, 5 | audio_silence,,-6,,-10,,,,,,,,,,,,,-1,-1,-2,,,,, 6 | audio_speech,1,1,,,,,5,-5,,,-1,1,,,,,-1,6,5,,,,, 7 | audio_tools,1,1,,,,,5,-5,,,-1,1,,,,,-1,-1,-2,,,,, 8 | audio_voice,1,1,,,,,5,-5,,,-1,1,,,,,-1,-1,5,,,,, 9 | tactile_bottom,,,,,1,1,,,10,-10,2,-2,,,,,,,,,,,, 10 | tactile_middle,,,,,1,1,,,10,-10,2,-2,,,,,,,,,,,, 11 | tactile_top,,,,,1,1,,,10,-10,2,-2,,,,,,,,,,,, 12 | tactile_novalve,,,,,,-3,,,,,,,,,,,,,,,,,, 13 | image_animals,,,1,1,,,-3,3,-3,3,,,-1,-1,-2,2,,,,,,,, 14 | image_birds,,,1,1,,,-3,3,-3,3,,,-1,-1,-2,2,,,,,,,, 15 | image_fruits,,,1,1,,,-3,3,-3,3,,,-1,-1,-2,-3,,,,,,,, 16 | image_human_body,,,1,1,,,-3,3,-3,3,,,-1,4,3,2,,,,,,,, 17 | image_human_face,,,1,1,,,-3,3,-3,3,,,4,-1,3,2,,,,,,,, 18 | image_human_object,,,1,1,,,-3,3,-3,3,,,-1,-1,-2,-3,,,,,,,, 19 | image_monkey_body,,,1,1,,,-3,3,-3,3,,,-1,4,3,2,,,,,,,, 20 | image_monkey_face,,,1,1,,,-3,3,-3,3,,,4,-1,3,2,,,,,,,, 21 | image_monkey_object,,,1,1,,,-3,3,-3,3,,,-1,-1,-2,-3,,,,,,,, 22 | image_sculpture,,,1,1,,,-3,3,-3,3,,,-1,-1,-2,-3,,,,,,,, 23 | ,,,,,,,,,,,,,,,,,,,,,,,, 24 | ,,,,,,,,,,,,,,,,,,,,,,,, 25 | ,,,,,,,,,,,,,,,,,,,,,,,, 26 | ,,,,,,,,,,,,,,,,,,,,,,,, 27 | ,,,,,,,,,,,,,,,,,,,,,,,, 28 | ,,,,,,,,,,,,,,,,,,,,,,,, 29 | ,,,,,,,,,,,,,,,,,,,,,,,, 30 | ,,,,,,,,,,,,,,,,,,,,,,,, 31 | ,,,,,,,,,,,,,,,,,,,,,,,, 32 | ,,,,,,,,,,,,,,,,,,,,,,,, 33 | ,,,,,,,,,,,,,,,,,,,,,,,, 34 | ,,,,,,,,,,,,,,,,,,,,,,,,1 35 | -------------------------------------------------------------------------------- /ibc_data/contrasts/NARPS.csv: -------------------------------------------------------------------------------- 1 | condition,reject-accept,accept-reject 2 | strongly_accept,-1,1 3 | weakly_accept,-1,1 4 | weakly_reject,1,-1 5 | strongly_reject,1,-1 -------------------------------------------------------------------------------- /ibc_data/contrasts/OptimismBias.csv: -------------------------------------------------------------------------------- 1 | condition,all_events,optimism_bias ,future–past,positive–negative,future_positive–negative,past_positive–negative,interaction 2 | past_positive,1,-1,-1,1,,1,-1 3 | past_negative,1,-1,-1,-1,,-1,1 4 | future_positive,1,-1,1,1,1,,1 5 | future_negative,1,3,1,-1,-1,, 6 | past_neutral,1,,-1,,,, 7 | future_neutral,1,,1,,,,-1 8 | inconclusive,1,,,,,, 9 | fixation,-7,,,,,, -------------------------------------------------------------------------------- /ibc_data/contrasts/PainMovie.csv: -------------------------------------------------------------------------------- 1 | condition,movie_mental-pain,movie_pain,movie_mental 2 | movie_pain,-1,1, 3 | movie_mental,1,,1 4 | -------------------------------------------------------------------------------- /ibc_data/contrasts/PreferenceFaces.csv: -------------------------------------------------------------------------------- 1 | condition,face_constant,face_linear,face_quadratic 2 | face_constant,1,, 3 | face_linear,,1, 4 | face_quadratic,,,1 5 | -------------------------------------------------------------------------------- /ibc_data/contrasts/PreferenceFood.csv: -------------------------------------------------------------------------------- 1 | condition,food_constant,food_linear,food_quadratic 2 | food_constant,1,, 3 | food_linear,,1, 4 | food_quadratic,,,1 5 | -------------------------------------------------------------------------------- /ibc_data/contrasts/PreferenceHouses.csv: -------------------------------------------------------------------------------- 1 | condition,house_constant,house_linear,house_quadratic 2 | house_constant,1,, 3 | house_linear,,1, 4 | house_quadratic,,,1 5 | -------------------------------------------------------------------------------- /ibc_data/contrasts/PreferencePaintings.csv: -------------------------------------------------------------------------------- 1 | condition,painting_constant,painting_linear,painting_quadratic 2 | painting_constant,1,, 3 | painting_linear,,1, 4 | painting_quadratic,,,1 5 | -------------------------------------------------------------------------------- /ibc_data/contrasts/RSVPLanguage.csv: -------------------------------------------------------------------------------- 1 | condition,complex-simple,sentence-jabberwocky,sentence-word,word-consonant_string,jabberwocky-pseudo,word-pseudo,pseudo-consonant_string,sentence-consonant_string,simple-consonant_string,complex-consonant_string,sentence-pseudo,jabberwocky-consonant_string 2 | complex,1,1,1,,,,,1,,1,1, 3 | simple,-1,1,1,,,,,1,1,,1, 4 | jabberwocky,,-2,,,1,,,,,,,1 5 | word_list,,,-2,1,,1,,,,,, 6 | pseudoword_list,,,,,-1,-1,1,,,,-2, 7 | consonant_string,,,,-1,,,-1,-2,-1,-1,,-1 8 | probe,,,,,,,,,,,, -------------------------------------------------------------------------------- /ibc_data/contrasts/RewProc.csv: -------------------------------------------------------------------------------- 1 | condition,green-purple,purple-green,left-right,right-left,switch-stay,stay-switch,gain,loss,gain-loss,loss-gain 2 | green,1,-1,,,,,,,, 3 | purple,-1,1,,,,,,,, 4 | left,,,1,-1,,,,,, 5 | right,,,-1,1,,,,,, 6 | switch,,,,,1,-1,,,, 7 | stay,,,,,-1,1,,,, 8 | plus_20,,,,,,,1,,1,-1 9 | minus_20,,,,,,,,1,-1,1 10 | plus_10,,,,,,,1,,1,-1 11 | minus_10,,,,,,,,1,-1,1 -------------------------------------------------------------------------------- /ibc_data/contrasts/Ring.csv: -------------------------------------------------------------------------------- 1 | condition,foveal,middle,peripheral 2 | foveal,1,, 3 | middle,,1, 4 | peripheral,,,1 5 | -------------------------------------------------------------------------------- /ibc_data/contrasts/Scene.csv: -------------------------------------------------------------------------------- 1 | condition,scene_possible_correct-scene_impossible_correct,scene_correct-dot_correct,dot_left-right,dot_hard-easy 2 | scene_impossible_correct,-1,,, 3 | scene_impossible_incorrect,,,, 4 | scene_possible_correct,1,,, 5 | scene_possible_incorrect,,,, 6 | dot_easy_left,,,1,-1 7 | dot_easy_right,,,-1,-1 8 | dot_hard_left,,,1,1 9 | dot_hard_right,,,-1,1 -------------------------------------------------------------------------------- /ibc_data/contrasts/SelectiveStopSignal.csv: -------------------------------------------------------------------------------- 1 | condition,go_critical-stop,go_noncritical-ignore,ignore-stop,stop-ignore 2 | go_critical,1,,, 3 | go_noncritical,,1,, 4 | stop,-1,,-1,1 5 | ignore,,-1,1,-1 -------------------------------------------------------------------------------- /ibc_data/contrasts/Self.csv: -------------------------------------------------------------------------------- 1 | condition,encode_self-other,recognition_self-other,recognition_hit,correct_rejection,recognition_hit-correct_rejection 2 | instructions,,,,, 3 | encode_self,1,,,, 4 | encode_other,-1,,,, 5 | recognition_self_hit,,1,,, 6 | recognition_other_hit,,-1,,, 7 | correct_rejection,,,,, 8 | false_alarm,,,,, -------------------------------------------------------------------------------- /ibc_data/contrasts/SpatialNavigation.csv: -------------------------------------------------------------------------------- 1 | condition,experimental-control,experimental-intersection,retrieval 2 | encoding_phase,,, 3 | intersection,,, 4 | control,-1,,-1 5 | pointing_control,,,-1 6 | experimental,1,,1 7 | pointing_experimental,,,1 8 | navigation,,, 9 | -------------------------------------------------------------------------------- /ibc_data/contrasts/StopNogo.csv: -------------------------------------------------------------------------------- 1 | condition,nogo-go,unsuccessful-successful_stop,successful+nogo-unsuccessful 2 | go,-1,, 3 | nogo,1,,1 4 | successful_stop,,-1,1 5 | unsuccessful_stop,,1,-1 -------------------------------------------------------------------------------- /ibc_data/contrasts/StopSignal.csv: -------------------------------------------------------------------------------- 1 | condition,stop-go 2 | go,-1 3 | stop,1 -------------------------------------------------------------------------------- /ibc_data/contrasts/Stroop.csv: -------------------------------------------------------------------------------- 1 | condition,incongruent-congruent 2 | congruent,-1 3 | incongruent,1 -------------------------------------------------------------------------------- /ibc_data/contrasts/StroopAomic.csv: -------------------------------------------------------------------------------- 1 | condition,congruent–incongruent ,face_female–face_male,word_female–word_male 2 | congruent,1,, 3 | incongruent,-1,, 4 | face_male,,-1, 5 | face_female,,1, 6 | word_male,,,-1 7 | word_female,,,1 -------------------------------------------------------------------------------- /ibc_data/contrasts/TheoryOfMind.csv: -------------------------------------------------------------------------------- 1 | condition,belief-photo 2 | belief,1 3 | photo,-1 -------------------------------------------------------------------------------- /ibc_data/contrasts/TwoByTwo.csv: -------------------------------------------------------------------------------- 1 | condition,task_switch-stay,cue_switch-stay,cue_taskstay_cuestay,cue_taskswitch_cueswitch,cue_taskswitch_cuestay,cue_taskstay_cueswitch,stim_taskstay_cuestay,stim_taskswitch_cueswitch,stim_taskswitch_cuestay,stim_taskstay_cueswitch 2 | cue_taskstay_cuestay,-1,-1,1,,,,,,, 3 | cue_taskswitch_cueswitch,1,,,1,,,,,, 4 | cue_taskswitch_cuestay,1,,,,1,,,,, 5 | cue_taskstay_cueswitch,-1,1,,,,1,,,, 6 | stim_taskstay_cuestay,,,,,,,1,,, 7 | stim_taskswitch_cueswitch,,,,,,,,1,, 8 | stim_taskswitch_cuestay,,,,,,,,,1, 9 | stim_taskstay_cueswitch,,,,,,,,,,1 10 | -------------------------------------------------------------------------------- /ibc_data/contrasts/VSTM.csv: -------------------------------------------------------------------------------- 1 | condition 2 | vstm_linear 3 | vstm_constant 4 | vstm_quadratic -------------------------------------------------------------------------------- /ibc_data/contrasts/VSTMC.csv: -------------------------------------------------------------------------------- 1 | condition,stim,resp,stim_load3-load1,resp_load3-load1 2 | stim_load1,1,,-1, 3 | stim_load2,1,,, 4 | stim_load3,1,,1, 5 | resp_load1,,1,,-1 6 | resp_load2,,1,, 7 | resp_load3,,1,,1 -------------------------------------------------------------------------------- /ibc_data/contrasts/Visu.csv: -------------------------------------------------------------------------------- 1 | condition,animal-scrambled,characters-scrambled,face-scrambled,house-scrambled,pseudoword-scrambled,scene-scrambled,tool-scrambled 2 | scrambled,-1,-1,-1,-1,-1,-1,-1 3 | animal,1,,,,,, 4 | characters,,1,,,,, 5 | face,,,1,,,, 6 | house,,,,1,,, 7 | pseudoword,,,,,1,, 8 | scene,,,,,,1, 9 | tool,,,,,,,1 -------------------------------------------------------------------------------- /ibc_data/contrasts/VisualSearch.csv: -------------------------------------------------------------------------------- 1 | condition,probe_item,search_array,probe_item_absent-probe_item_present,search_array_absent-search_array_present ,probe_item_four-probe_item_two,search_array_four-search_array_two,delay_vis-delay_wm 2 | memory_array_two,,,,,,, 3 | memory_array_four,,,,,,, 4 | delay_wm,,,,,,,-1 5 | probe_item_two,1,,,,-1,, 6 | probe_item_four,1,,,,1,, 7 | probe_item_present,1,,-1,,,, 8 | probe_item_absent,1,,1,,,, 9 | sample_item,,,,,,, 10 | delay_vis,,,,,,,1 11 | search_array_two,,1,,,,-1, 12 | search_array_four,,1,,,,1, 13 | search_array_present,,1,,-1,,, 14 | search_array_absent,,1,,1,,, 15 | response_hit,,,,,,, 16 | response_miss,,,,,,, 17 | -------------------------------------------------------------------------------- /ibc_data/contrasts/WardAndAllport.csv: -------------------------------------------------------------------------------- 1 | condition,intermediate-direct,ambiguous-unambiguous 2 | planning_ambiguous_intermediate,1,1 3 | planning_unambiguous_direct,1,-1 4 | planning_ambiguous_direct,-1,1 5 | planning_unambiguous_intermediate,-1,-1 -------------------------------------------------------------------------------- /ibc_data/contrasts/Wedge.csv: -------------------------------------------------------------------------------- 1 | condition,lower_meridian,upper_meridian,left_meridian,right_meridian,lower_left,upper_left,lower_right,upper_right 2 | lower_meridian,1,,,,,,, 3 | upper_meridian,,1,,,,,, 4 | lower_right,,,,,,,1, 5 | upper_left,,,,,,1,, 6 | right_meridian,,,,1,,,, 7 | left_meridian,,,1,,,,, 8 | upper_right,,,,,,,,1 9 | lower_left,,,,,1,,, 10 | -------------------------------------------------------------------------------- /ibc_data/contrasts/WorkingMemoryAomic.csv: -------------------------------------------------------------------------------- 1 | condition,active-passive,active_change-active_no_change 2 | active_change,1,1 3 | active_no_change,1,-1 4 | passive,-2, 5 | -------------------------------------------------------------------------------- /ibc_data/gm_mask_1_5mm.nii.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/individual-brain-charting/public_analysis_code/d0bbddb8e44580da4d4eb26fa8e86053a997a51c/ibc_data/gm_mask_1_5mm.nii.gz -------------------------------------------------------------------------------- /ibc_data/gm_mask_3mm.nii.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/individual-brain-charting/public_analysis_code/d0bbddb8e44580da4d4eb26fa8e86053a997a51c/ibc_data/gm_mask_3mm.nii.gz -------------------------------------------------------------------------------- /ibc_data/sessions.csv: -------------------------------------------------------------------------------- 1 | subject,ses-00,ses-01,ses-02,ses-03,ses-04,ses-05,ses-06,ses-07,ses-08,ses-09,ses-10,ses-11,ses-12,ses-13,ses-14,ses-15,ses-16,ses-17,ses-18,ses-19,ses-20,ses-21,ses-22,ses-23,ses-24,ses-25,ses-26,ses-27,ses-28,ses-29,ses-30,ses-31,ses-32,ses-33,ses-34,ses-35,ses-36,ses-37,ses-38,ses-39,ses-40,ses-41,ses-42,ses-43,ses-44,ses-45,ses-46,ses-47,ses-48,ses-49,ses-50,ses-51 2 | sub-01,screening,old_anat1,old_anat2,hcp1,hcp2,rsvp-language,clips1,archi,clips2,clips3,anat1_,clips4,anat1,anat2,mtt1,mtt2,raiders1,raiders2,tom,preference,enumeration,relaxo,lyon1,lyon2,self,navigation,mathlang,,,,,,,,,,,,,,,,,,,,,,,,, 3 | sub-02,screening,archi,old_anat1,old_anat2,hcp1,hcp2,rsvp-language,clips1,clips2,clips3,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4 | sub-04,screening,hcp1,hcp2,rsvp-language,archi,clips1,clips2,clips3,anat1,clips4,anat2,mtt1,mtt2,raiders1,raiders2,preference,tom,enumeration,lyon1,lyon2,relaxo,self,audio1,audio2,stanford1,stanford2,stanford3,lpp1,lpp2,biological_motion,mathlang,navigation,BBT1,BBT2,BBT3,camcan1,camcan2,fbirn,search,reward,scene,monkey_kingdom,color,optimism,aomic,abstraction,mdtb,leuven,mario1,mario2,, 5 | sub-05,screening,hcp1,hcp2,rsvp-language,archi,clips1,clips2,clips3,anat1,clips4,anat2,mtt1,mtt2,raiders1,raiders2,preference,tom,enumeration,lyon1,lyon2,self,audio1,relaxo,audio2,stanford1,stanford2,stanford3,lpp1,lpp2,biological_motion,navigation,BBT1,BBT2,BBT3,camcan1,mathlang,camcan2,fbirn,search,reward,scene,,,,,,,,,,, 6 | sub-06,screening,hcp1,hcp2,rsvp-language,archi,clips1,clips2,clips3,clips4,anat1,anat2,mtt1,mtt2,raiders1,raiders2,preference,tom,enumeration,lyon1,lyon2,relaxo,self,audio1,audio2,stanford1,stanford2,stanford3,lpp1,lpp2,biological_motion,navigation,mathlang,BBT1,BBT2,BBT3,camcan1,camcan2,fbirn,search,reward,scene,monkey_kingdom,color,optimism,aomic,abstraction,mdtb,leuven,mario1,mario2,, 7 | sub-07,screening,hcp1,hcp2,rsvp-language,archi,clips1,clips2,clips3,clips4,anat1,anat2,mtt2,mtt1,raiders1,raiders2,preference,tom,enumeration,lyon1,lyon2,relaxo,audio1,audio2,self,stanford1,stanford2,stanford3,lpp1,lpp2,biological_motion,mathlang,navigation,BBT1,BBT2,BBT3,camcan1,camcan2,fbirn,,,,,,,,,,,,,, 8 | sub-08,screening,archi,hcp1,hcp2,rsvp-language,clips1,clips2,clips3,clips4,anat1,anat2,none,mtt1,mtt2,raiders1,navigation,raiders2,mathlang,preference,tom,enumeration,lyon1,lyon2,self,audio1,audio2,stanford1,stanford2,stanford3,lpp1,lpp2,biological_motion,BBT1,BBT2,BBT3,relaxo,camcan1,camcan2,fbirn,search,reward,scene,monkey_kingdom,color,optimism,aomic,abstraction,mdtb,leuven,mario1,mario2, 9 | sub-09,screening,hcp1,hcp2,old_anat1,rsvp-language,archi,clips1,clips2,clips3,anat1,clips4,anat2,mtt1,mtt2,raiders1,raiders2,preference,tom,enumeration,relaxo,lyon1,lyon2,self,audio1,audio2,stanford1,stanford2,stanford3,lpp1,lpp2,biological_motion,navigation,mathlang,BBT1,BBT2,BBT3,camcan1,camcan2,fbirn,search,reward,scene,monkey_kingdom,color,optimism,aomic,abstraction,mdtb,leuven,mario1,mario2, 10 | sub-11,screening,hcp1,hcp2,rsvp-language,clips1,archi,clips2,clips3,clips4,anat1,anat2,mtt1,mtt2,raiders1,raiders1,raiders2,preference,relaxo,tom,enumeration,lyon1,lyon2,self,audio1,audio2,stanford1,stanford2,stanford3,lpp1,lpp2,biological_motion,mathlang_,navigation,BBT1,BBT2,BBT3,camcan1,mathlang,camcan2,fbirn,search,reward,scene,monkey_kingdom,color,optimism,aomic,abstraction,mdtb,leuven,mario1,mario2 11 | sub-12,screening,hcp1,hcp2,archi,rsvp-language,clips1,clips2,clips3,clips4,anat1,anat2,mtt1,mtt2,raiders1,raiders1,raiders2,preference,relaxo,tom,enumeration,lyon1,lyon2,self,audio1,audio2,stanford1,stanford2,stanford3,lpp1,lpp2,biological_motion,mathlang,navigation,BBT1,BBT2,BBT3,camcan1,camcan2,fbirn,search,reward,scene,monkey_kingdom,color,optimism,aomic,abstraction,mdtb,leuven,mario1,mario2, 12 | sub-13,screening,hcp1,hcp2,rsvp-language,archi,clips1,clips2,clips3,clips4,anat1,anat2,mtt1,mtt2,raiders1,raiders2,preference,tom,enumeration,lyon1,lyon2,relaxo,self,audio1,audio2,stanford1,stanford2,stanford3,lpp1,lpp2,biological_motion,mathlang,navigation,BBT1,BBT2,BBT3,camcan1,camcan2,fbirn,,,,,,,,,,,,,, 13 | sub-14,screening,archi,hcp1,hcp2,rsvp-language,anat1,anat2,clips1,clips2,clips3,clips4,mtt1,mtt2,preference,raiders1,raiders2,tom,enumeration,lyon1,lyon2,relaxo,self,audio1,audio2,stanford1,stanford2,stanford3,lpp1,lpp2,biological_motion,mathlang,BBT1,BBT2,BBT3,navigation,camcan1,camcan2,fbirn,search,reward,scene,monkey_kingdom,color,optimism,aomic,abstraction,mdtb,leuven,mario1,mario2,, 14 | sub-15,screening,archi,hcp1,hcp2,rsvp-language,tom,preference,clips1,clips2,clips3,enumeration,clips4,anat1,anat2,mtt1,mtt2,raiders1,raiders2,anat1_,lyon1,self,lyon2,audio1,audio2,stanford1,stanford2,stanford3,lpp1,lpp2,biological_motion,mathlang,navigation,BBT1,BBT2,BBT3,camcan1,camcan2,fbirn,search,reward,scene,monkey_kingdom,color,optimism,aomic,abstraction,mdtb,leuven,mario1,mario2,, 15 | -------------------------------------------------------------------------------- /ibc_public/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/individual-brain-charting/public_analysis_code/d0bbddb8e44580da4d4eb26fa8e86053a997a51c/ibc_public/__init__.py -------------------------------------------------------------------------------- /ibc_public/utils_descriptions.py: -------------------------------------------------------------------------------- 1 | # %% 2 | # This script generates ./ibc_data/descriptions.json 3 | # which contains descriptions for all tasks, conditions and 4 | # contrasts of the IBC dataset. 5 | # These descriptions are initially documented in other files 6 | # of the repo. 7 | 8 | # %% 9 | import json 10 | import warnings 11 | 12 | from pathlib import Path 13 | 14 | import numpy as np 15 | import pandas as pd 16 | 17 | # %% 18 | # Initialise output structure 19 | d = {"tasks": {}} 20 | 21 | # %% 22 | # Add descriptions for all tasks 23 | df_tasks = pd.read_csv("./ibc_data/ibc_tasks.tsv", delimiter="\t") 24 | 25 | for _, row in df_tasks.iterrows(): 26 | d["tasks"][row["task"]] = { 27 | "description": str(row["description"]), 28 | "conditions": {}, 29 | "contrasts": {}, 30 | } 31 | 32 | # %% 33 | # Add descriptions for all conditions 34 | df_conditions = pd.read_csv("./ibc_data/ibc_conditions.tsv", delimiter="\t") 35 | 36 | missing_task = [] 37 | for _, row in df_conditions.iterrows(): 38 | if row["task"] in d["tasks"]: 39 | d["tasks"][row["task"]]["conditions"][row["condition"]] = { 40 | "description": str(row["description"]) 41 | } 42 | else: 43 | missing_task.append(row["task"]) 44 | 45 | missing_task = set(missing_task) 46 | 47 | # %% 48 | warnings.warn( 49 | "The following tasks are missing a description " 50 | "(task names possibly don't match between files for these tasks):\n" 51 | f"{missing_task}" 52 | ) 53 | 54 | # %% 55 | # Add descriptions for all constrasts. 56 | # This includes contrast string description 57 | # but also a list of all conditions used to compute this contrast 58 | df_all_contrasts = pd.read_csv("./ibc_data/all_contrasts.tsv", delimiter="\t") 59 | 60 | missing_contrasts = [] 61 | 62 | for task in d["tasks"].keys(): 63 | task_contrasts_filename = Path(f"./ibc_data/contrasts/{task}.csv") 64 | if task_contrasts_filename.exists(): 65 | df_contrasts = pd.read_csv( 66 | task_contrasts_filename, delimiter=",", index_col="condition" 67 | ) 68 | df_contrasts = df_contrasts.T 69 | conditions = list(df_contrasts.columns) 70 | 71 | for index, row in df_contrasts.iterrows(): 72 | contrast = row.name 73 | 74 | description = None 75 | tags = [] 76 | selected_descriptions = df_all_contrasts[ 77 | (df_all_contrasts["task"] == task) 78 | & (df_all_contrasts["contrast"] == contrast) 79 | ] 80 | if len(selected_descriptions) > 0: 81 | description = selected_descriptions.iloc[0]["pretty name"] 82 | tags = eval(selected_descriptions.iloc[0]["tags"]) 83 | assert isinstance(tags, list) or tags is None 84 | tags = list(map(lambda x: str(x), tags)) 85 | else: 86 | missing_contrasts.append((task, contrast)) 87 | 88 | d["tasks"][task]["contrasts"][contrast] = { 89 | "description": str(description), 90 | "tags": tags, 91 | "conditions": {}, 92 | } 93 | 94 | for i, condition in enumerate(conditions): 95 | weight = row[i] 96 | weight = float(str(weight).replace(",", ".")) 97 | if not np.isnan(weight): 98 | d["tasks"][task]["contrasts"][contrast]["conditions"][ 99 | condition 100 | ] = weight 101 | 102 | missing_contrasts = set(missing_contrasts) 103 | 104 | # %% 105 | warnings.warn( 106 | "The following contrasts are missing a description " 107 | "(task / contrast names possibly don't match between files " 108 | "for these contrasts):\n" 109 | f"{missing_contrasts}" 110 | ) 111 | 112 | # %% 113 | # Save dictionary as json file 114 | with open("./ibc_data/descriptions.json", "w") as f: 115 | json.dump(d, f) 116 | 117 | # %% 118 | -------------------------------------------------------------------------------- /ibc_public/utils_labels.py: -------------------------------------------------------------------------------- 1 | import json 2 | import sys 3 | import warnings 4 | 5 | import numpy as np 6 | import pandas as pd 7 | 8 | import os 9 | 10 | _package_directory = os.path.dirname(os.path.abspath(__file__)) 11 | 12 | ALL_CONTRASTS = os.path.join(_package_directory, '..', 'ibc_data', 13 | 'all_contrasts.tsv') 14 | 15 | 16 | def get_labels(contrasts='all'): 17 | """ 18 | Returns the list of labels for each passed contrast name 19 | 20 | Parameters 21 | ---------- 22 | contrasts: list of str, default 'all' 23 | Each element of the list is a contrast name in the document. 24 | The default argument will select all present contrasts 25 | 26 | Returns 27 | ------- 28 | contrast_dict: dict 29 | Dictionary containing the contrasts provided by the user 30 | as keys, and their corresponding labels as values 31 | """ 32 | df = pd.read_csv(ALL_CONTRASTS, sep='\t') 33 | if contrasts == 'all': 34 | contrasts = df['contrast'].values 35 | 36 | contrast_dict = {} 37 | 38 | con_slice = df[df['contrast'].isin(contrasts)] 39 | not_found = np.setdiff1d(contrasts, con_slice['contrast']) 40 | 41 | if not_found.size != 0: 42 | warnings.warn("The following contrast names were not " 43 | "found: {}".format(not_found)) 44 | 45 | for index, con in con_slice.iterrows(): 46 | 47 | labels = con.loc[con == 1.0].index 48 | con_name = "({}) {}".format(con.task, con.contrast) 49 | contrast_dict[con_name] = [label for label in labels] 50 | 51 | return contrast_dict 52 | 53 | 54 | def add_labels(contrast, labels, output_file=ALL_CONTRASTS): 55 | """ 56 | Adds all the passed labels to the selected contrast 57 | 58 | Paramenters 59 | ----------- 60 | contrast: str 61 | Name of the contrast that will get the labels 62 | 63 | labels: list of str 64 | Labels that the user wants to add. The labels must exist as 65 | columns in the file 66 | 67 | output_file: str or path object 68 | Path to csv file where the new label database is to be saved 69 | with the changed 70 | """ 71 | df = pd.read_csv(ALL_CONTRASTS, sep='\t') 72 | con_index = df[df['contrast'] == contrast].index 73 | for label in labels: 74 | if label in df.columns: 75 | df.at[con_index, label] = 1.0 76 | else: 77 | print("No label with the name {} could be found".format(label)) 78 | df.at[con_index, label] = 1.0 79 | df.fillna(0.0, inplace=True) 80 | print("Added {}\n".format(label)) 81 | 82 | df.to_csv(output_file, sep='\t', index=False) 83 | 84 | 85 | def _flatten_contrast(contrast): 86 | """Helper function to change a labels_dict entry into a flattened list""" 87 | 88 | trans = str.maketrans("", "", "()") 89 | flat_contrast = contrast[0].translate(trans).split(" ") 90 | flat_contrast.extend(contrast[1]) 91 | 92 | return flat_contrast 93 | 94 | 95 | def sparse_labels(output_dir=os.path.dirname(ALL_CONTRASTS), save=True): 96 | """ 97 | Transform the all_contrasts.csv file into a more readable, sparse file. 98 | The new file will contain the name of each task, each contrast and only 99 | the names of the labels that are related to them in each row. 100 | 101 | Parameters 102 | ---------- 103 | output_dir: str, default ibc_data dir path 104 | Path for saving the new file. Defaults to the same directory 105 | where all_contrasts.csv is located 106 | 107 | save: bool, default True 108 | 109 | Returns 110 | ------- 111 | sparse_df: pd.DataFrame 112 | New dataframe with only the task name, contrast name and 113 | names of labels in each row 114 | """ 115 | 116 | labels_dict = get_labels() 117 | sparse_list = list(map(_flatten_contrast, labels_dict.items())) 118 | 119 | col_names = ['Task', 'Contrast'] 120 | col_names.extend(["Label{}".format(i + 1 for i in range(10))]) 121 | 122 | sparse_df = pd.DataFrame(sparse_list, columns=col_names) 123 | 124 | return sparse_df 125 | 126 | 127 | -------------------------------------------------------------------------------- /ibc_public/utils_retino.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | import nibabel as nib 4 | NEGINF = -np.inf 5 | ALL_REG = ['sin_ring_pos', 'cos_ring_pos', 'sin_ring_neg', 'cos_ring_neg', 6 | 'sin_wedge_pos', 'cos_wedge_pos', 'sin_wedge_neg', 7 | 'cos_wedge_neg'] 8 | 9 | 10 | def combine_phase(phase_pos, phase_neg, offset=0, hemo=None): 11 | """ Combine the phases estimated in two directions""" 12 | if hemo is None: 13 | # estimate hemodynamic delay 14 | hemo = 0.5 * (phase_pos + phase_neg) 15 | hemo += np.pi * (hemo < 0) # - np.pi / 4) 16 | hemo += np.pi * (hemo < 0) # - np.pi / 4) 17 | 18 | # first phase estimate 19 | pr1 = phase_pos - hemo 20 | pr2 = hemo - phase_neg 21 | pr2[(pr1 - pr2) > np.pi] += (2 * np.pi) 22 | pr2[(pr1 - pr2) > np.pi] += (2 * np.pi) 23 | pr1[(pr2 - pr1) > np.pi] += (2 * np.pi) 24 | pr1[(pr2 - pr1) > np.pi] += (2 * np.pi) 25 | phase = 0.5 * (pr1 + pr2) 26 | 27 | # add the offset and bring back to [-pi, +pi] 28 | phase += offset 29 | phase += 2 * np.pi * (phase < - np.pi) 30 | phase += 2 * np.pi * (phase < - np.pi) 31 | phase -= 2 * np.pi * (phase > np.pi) 32 | phase -= 2 * np.pi * (phase > np.pi) 33 | return phase, hemo 34 | 35 | 36 | def phase_maps(data, offset_ring=0, offset_wedge=0, do_wedge=True, 37 | do_ring=True): 38 | """ Compute the phase for each functional map 39 | 40 | Parameters 41 | ---------- 42 | data: dictionary with keys 'sin_wedge_pos', 'sin_wedge_neg', 43 | 'cos_wedge_neg', 'cos_ring_pos', 'sin_ring_neg', 'cos_wedge_pos', 44 | 'sin_ring_pos', 'cos_ring_neg' 45 | arrays of shape (n_nodes) showing fMRI activations 46 | for different retino conditions 47 | 48 | offset_ring: float, 49 | offset value to apply to the ring phase 50 | 51 | offset_wedge: float, 52 | offset value to apply to the wedge phase 53 | 54 | do_wedge: bool, 55 | should we do the ring phase estimation or not 56 | 57 | do_ring: bool, 58 | should we do the ring phase estimation or not 59 | 60 | mesh: path or mesh instance, optional 61 | underlying mesh model 62 | """ 63 | phase_ring, phase_wedge, hemo = None, None, None 64 | if do_ring: 65 | phase_ring_pos = np.arctan2(data['sin_ring_pos'], data['cos_ring_pos']) 66 | phase_ring_neg = np.arctan2(data['sin_ring_neg'], data['cos_ring_neg']) 67 | phase_ring, hemo_ring = combine_phase( 68 | phase_ring_pos, phase_ring_neg, offset_ring, hemo=hemo) 69 | hemo = hemo_ring 70 | 71 | if do_wedge: 72 | phase_wedge_pos = np.arctan2(data['sin_wedge_pos'], 73 | data['cos_wedge_pos']) 74 | phase_wedge_neg = np.arctan2(data['sin_wedge_neg'], 75 | data['cos_wedge_neg']) 76 | phase_wedge, hemo_wedge = combine_phase( 77 | phase_wedge_pos, phase_wedge_neg, offset_wedge) 78 | hemo = hemo_wedge 79 | 80 | if do_ring and do_wedge: 81 | hemo = 0.5 * (hemo_ring + hemo_wedge) 82 | 83 | return phase_wedge, phase_ring, hemo 84 | 85 | 86 | def angular_maps(side, contrast_path, mask_img, mesh_path=None, 87 | all_reg=ALL_REG, threshold=3.1, 88 | offset_wedge=0, offset_ring=0, 89 | do_wedge=True, do_ring=True, do_phase_unwrapping=False): 90 | """ 91 | Parameters 92 | ---------- 93 | side: {'left', 'right', False} 94 | all_reg: list of strings, 95 | identifiers of the contrast files used in angular mapping 96 | threshold: float, optional 97 | threshold defining the brain regions 98 | where the analysis is performed 99 | offset_wedge: float, optional 100 | offset to be applied to wedge angle 101 | offset_ring float, optional 102 | offset to be applied to ring angle 103 | """ 104 | if side is False: 105 | stat_map = os.path.join(contrast_path, 'effects_of_interest_z_map.nii') 106 | 107 | # create an occipital data_mask 108 | mask = nib.load(stat_map).get_data() > threshold 109 | 110 | # load and mask the data 111 | data = {} 112 | for r in all_reg: 113 | contrast_file = os.path.join(contrast_path, '%s_con.nii' % r) 114 | data[r] = nib.load(contrast_file).get_data()[mask] 115 | do_phase_unwrapping = False 116 | mesh = None 117 | else: 118 | pass 119 | 120 | # Then compute the activation phase in these regions 121 | phase_wedge, phase_ring, hemo = phase_maps( 122 | data, offset_ring, offset_wedge, do_wedge, do_ring, 123 | do_phase_unwrapping, mesh=mesh, mask=mask) 124 | 125 | # write the results 126 | data_, id_ = [hemo, mask[mask > 0]], ['hemo', 'mask'] 127 | if do_ring: 128 | data_.append(phase_ring) 129 | id_.append('phase_ring') 130 | if do_wedge: 131 | data_.append(phase_wedge) 132 | id_.append('phase_wedge') 133 | 134 | if side is False: 135 | for (x, name) in zip(data_, id_): 136 | wdata = np.zeros(nib.load(stat_map).shape) 137 | wdata[mask > 0] = x 138 | wim = nib.Nifti1Image(wdata, nib.load(stat_map).affine) 139 | nib.save(wim, os.path.join(contrast_path, '%s.nii' % name)) 140 | 141 | 142 | # Compute fixed effects_maps for effects of interest -> retinotopic maps 143 | -------------------------------------------------------------------------------- /papers_scripts/README.md: -------------------------------------------------------------------------------- 1 | # Note 2 | 3 | Each folder contains the scripts featuring the analysis pipeline undertaken for the corresponding peer-reviewed publication. 4 | -------------------------------------------------------------------------------- /papers_scripts/gradients/gm_mask_2mm.nii.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/individual-brain-charting/public_analysis_code/d0bbddb8e44580da4d4eb26fa8e86053a997a51c/papers_scripts/gradients/gm_mask_2mm.nii.gz -------------------------------------------------------------------------------- /papers_scripts/gradients/utils.py: -------------------------------------------------------------------------------- 1 | """ 2 | Utilities for script_ibc.py, for generalization to upcoming script_hcp.py 3 | 4 | Author: Bertrand Thirion, 2020 5 | """ 6 | from nilearn.decomposition import DictLearning 7 | import numpy as np 8 | from joblib import Parallel, delayed 9 | 10 | def make_dictionary(rs_fmri, n_components, cache, mask, n_jobs=1): 11 | dict_learning = DictLearning(n_components=n_components, 12 | memory=cache, memory_level=2, 13 | verbose=1, random_state=0, n_epochs=1, 14 | mask=mask, n_jobs=n_jobs) 15 | dict_learning.fit(rs_fmri) 16 | return dict_learning.components_img_, dict_learning.components_ 17 | 18 | 19 | def adapt_components(Y, subject, rs_fmri_db, masker, n_dim): 20 | rs_scans = rs_fmri_db[rs_fmri_db.subject == subject].path 21 | X_ = np.zeros_like(Y) 22 | for scan in rs_scans.values: 23 | X = masker.transform(scan) 24 | U, S, V = np.linalg.svd(X, 0) 25 | Vk = V[:n_dim] 26 | X_ += Y.dot(Vk.T).dot(Vk) 27 | return X_ 28 | 29 | 30 | def make_parcellation(ward, rs_fmri): 31 | indexes = np.random.randint(len(rs_fmri), size=5) 32 | ward.fit([rs_fmri[j] for j in indexes]) 33 | return ward.labels_img_ 34 | 35 | 36 | def make_parcellations(ward, rs_fmri, n_parcellations, n_jobs): 37 | parcellations = Parallel(n_jobs=n_jobs)(delayed(make_parcellation)( 38 | ward, rs_fmri) for b in range(n_parcellations)) 39 | return parcellations 40 | 41 | 42 | def predict_Y_oneparcel(parcellations, dummy_masker, train_index, 43 | n_parcels, X, models, b): 44 | labels = np.ravel(dummy_masker.transform(parcellations[b])) 45 | Y_pred = np.zeros((models[0][0].shape[0], labels.size)) 46 | for q in range(n_parcels): 47 | parcel = labels == q + 1 48 | for i in train_index: 49 | Y_pred[:, parcel] += np.dot( 50 | X.T[parcel], models[i][b * n_parcels + q].T).T 51 | return Y_pred 52 | 53 | 54 | def predict_Y_multiparcel(parcellations, dummy_masker, train_index, 55 | n_parcels, Y, X, models, n_jobs): 56 | n_parcellations = len(parcellations) 57 | #Y_preds = Parallel(n_jobs=n_jobs)(delayed(predict_Y_oneparcel)( 58 | # parcellations, dummy_masker, train_index, 59 | # n_parcels, X, models, b) for b in range(n_parcellations)) 60 | Y_preds = [] 61 | for b in range(n_parcellations): 62 | Y_pred_ = predict_Y_oneparcel( 63 | parcellations, dummy_masker, train_index, 64 | n_parcels, X, models, b) 65 | Y_preds.append(Y_pred_) 66 | # 67 | Y_preds = np.array(Y_preds) 68 | Y_pred = np.sum(Y_preds, 0) / (n_parcellations * len(train_index)) 69 | return Y_pred 70 | 71 | 72 | def permuted_score(Y, Y_pred, Y_baseline, n_permutations, seed=1): 73 | rng = np.random.RandomState(seed) 74 | n_contrasts = Y.shape[1] 75 | permuted_con_score = [] 76 | permuted_vox_score = [] 77 | for b in range(n_permutations): 78 | permutation = rng.permutation(n_contrasts) 79 | Y_ = Y[:, permutation] 80 | vox_score = 1 - np.sum((Y_ - Y_pred) ** 2, 0) / np.sum(( 81 | Y_ - Y_baseline.mean(0)) ** 2, 0) 82 | con_score = 1 - np.sum((Y_.T - Y_pred.T) ** 2, 0) / np.sum( 83 | (Y_.T - Y_baseline.T.mean(0)) ** 2, 0) 84 | permuted_con_score.append(con_score) 85 | #permuted_vox_score.append(vox_score) 86 | return permuted_con_score#, permuted_vox_score 87 | 88 | 89 | def fit_regressions(individual_components, data, parcellations, 90 | dummy_masker, clf, i): 91 | n_parcellations = len(parcellations) 92 | X = individual_components[i] 93 | Y = data[i] 94 | model = [] 95 | n_parcellations = len(parcellations) 96 | for b in range(n_parcellations): 97 | labels = np.ravel( 98 | dummy_masker.transform(parcellations[b]).astype(np.int)) 99 | n_parcels = len(np.unique(labels)) 100 | for q in range(n_parcels): 101 | parcel = labels == q + 1 102 | model_ = clf.fit(X.T[parcel], Y.T[parcel]).coef_ 103 | model.append(model_) 104 | return model 105 | 106 | 107 | def predict_Y(parcellations, dummy_masker, n_parcels, X, average_models): 108 | n_parcellations = len(parcellations) 109 | Y_preds = [] 110 | for b in range(n_parcellations): 111 | labels = np.ravel(dummy_masker.transform(parcellations[b])) 112 | Y_pred = np.zeros((average_models[0].shape[0], labels.size)) 113 | for q in range(n_parcels): 114 | parcel = labels == q + 1 115 | Y_pred[:, parcel] = np.dot( 116 | X.T[parcel], average_models[b * n_parcels + q].T).T 117 | Y_preds.append(Y_pred) 118 | Y_preds = np.array(Y_preds) 119 | Y_pred = np.sum(Y_preds, 0) / (n_parcellations) 120 | return Y_pred 121 | -------------------------------------------------------------------------------- /papers_scripts/hbm2021/README.md: -------------------------------------------------------------------------------- 1 | # Note 2 | 3 | This folder contains the scripts featuring the analysis pipeline undertaken for [Pinho, A. L. *et al.* (2021). Subject-specific segregation of functional territories based on deep phenotyping. Hum Brain Mapp **42**(4):841-870](https://doi.org/10.1002/hbm.25189). 4 | -------------------------------------------------------------------------------- /papers_scripts/hbm2021/archi_contrasts.csv: -------------------------------------------------------------------------------- 1 | archi name,IBC task,IBC name 2 | left-right,ArchiStandard,left-right_button_press 3 | video-audio,ArchiStandard,reading-listening 4 | motor-cognitive,ArchiStandard,motor-cognitive 5 | reading-visual,ArchiStandard,reading-checkerboard 6 | computation-sentences,ArchiStandard,computation-sentences 7 | H-V,ArchiStandard,horizontal-vertical 8 | saccade,ArchiSpatial,saccades 9 | hand-side,ArchiSpatial,hand-side 10 | grasp-orientation,ArchiSpatial,grasp-orientation 11 | rotation_side,ArchiSpatial,rotation_side 12 | object_orientation,ArchiSpatial,object_orientation 13 | intention-random,ArchiSocial,triangle_mental-random 14 | false_belief-mechanistic_audio,ArchiSocial,false_belief-mechanistic_audio 15 | triangle_random,ArchiSocial,triangle_random 16 | mecanistic_audio,ArchiSocial,mechanistic_audio 17 | mecanistic_video,ArchiSocial,mechanistic_video 18 | non_speech,ArchiSocial,non_speech_sound 19 | false_belief-mechanistic_video,ArchiSocial,false_belief-mechanistic_video 20 | speech-non_speech,ArchiSocial,speech-non_speech 21 | expression_sex-control,ArchiEmotional,expression_gender-control 22 | expression_intention-sex,ArchiEmotional,expression_intention-gender 23 | face_sex-control,ArchiEmotional,face_gender-control 24 | face_trusty-sex,ArchiEmotional,face_trusty-gender 25 | -------------------------------------------------------------------------------- /papers_scripts/hbm2021/bids_postprocessed.json: -------------------------------------------------------------------------------- 1 | {"ArchiEmotional": ["ArchiEmotional"], "HcpLanguage": ["HcpLanguage"], "HcpSocial": ["HcpSocial"], "ArchiSpatial": ["ArchiSpatial"], "HcpMotor": ["HcpMotor"], "HcpWm": ["HcpWm"], "RSVPLanguage": ["RSVPLanguage"], "ArchiSocial": ["ArchiSocial"], "HcpGambling": ["HcpGambling"], "ArchiStandard": ["ArchiStandard"], "HcpEmotion": ["HcpEmotion"], "HcpRelational": ["HcpRelational"]} 2 | -------------------------------------------------------------------------------- /papers_scripts/hbm2021/grey_mask_img.py: -------------------------------------------------------------------------------- 1 | """ 2 | Produce image with grey mask 3 | 4 | author: Ana Luisa Pinho 5 | date: July 2019 6 | """ 7 | 8 | import os 9 | import numpy as np 10 | 11 | import ibc_public.utils_data 12 | from nilearn import plotting 13 | 14 | import matplotlib 15 | 16 | # Get grey matter mask 17 | _package_directory = os.path.dirname( 18 | os.path.abspath(ibc_public.utils_data.__file__)) 19 | mask = os.path.join( 20 | _package_directory, '../ibc_data', 'gm_mask_1_5mm.nii.gz') 21 | 22 | # Output path 23 | output_path = '../../../admin/papers/descriptive_paper/' 24 | output_name = 'eps_figs/grey_mask_1_5mm.eps' 25 | output = os.path.join(output_path, output_name) 26 | 27 | cmap = matplotlib.colors.ListedColormap('w', name='from_list', N=256) 28 | 29 | plotting.plot_roi(mask, cmap=cmap, bg_img=None, 30 | cut_coords=(-1, 24, 6), 31 | black_bg = 'True', 32 | output_file=output) 33 | -------------------------------------------------------------------------------- /papers_scripts/hbm2021/hcp_contrasts.csv: -------------------------------------------------------------------------------- 1 | HCP task,HCP name,IBC task,IBC name 2 | EMOTION,FACES-SHAPES,HcpEmotion,face-shape 3 | LANGUAGE,STORY-MATH,HcpLanguage,story-math 4 | RELATIONAL,REL-MATCH,HcpRelational,relational-match 5 | GAMBLING,PUNISH-REWARD,HcpGambling,punishment-reward 6 | SOCIAL,TOM-RANDOM,HcpSocial,mental-random 7 | WM,0BK-2BK,HcpWm,2back-0back 8 | WM,PLACE-AVG,HcpWm,place-avg 9 | WM,TOOL-AVG,HcpWm,tools-avg 10 | WM,FACE-AVG,HcpWm,face-avg 11 | WM,BODY-AVG,HcpWm,body-avg 12 | MOTOR,LF-AVG,HcpMotor,left_foot-avg 13 | MOTOR,RF-AVG,HcpMotor,right_foot-avg 14 | MOTOR,LH-AVG,HcpMotor,left_hand-avg 15 | MOTOR,RH-AVG,HcpMotor,right_hand-avg 16 | MOTOR,T-AVG,HcpMotor,tongue-avg 17 | -------------------------------------------------------------------------------- /papers_scripts/hbm2021/rois_hcplang900_z16/left_FG.nii.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/individual-brain-charting/public_analysis_code/d0bbddb8e44580da4d4eb26fa8e86053a997a51c/papers_scripts/hbm2021/rois_hcplang900_z16/left_FG.nii.gz -------------------------------------------------------------------------------- /papers_scripts/hbm2021/rois_hcplang900_z16/left_FG.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/individual-brain-charting/public_analysis_code/d0bbddb8e44580da4d4eb26fa8e86053a997a51c/papers_scripts/hbm2021/rois_hcplang900_z16/left_FG.png -------------------------------------------------------------------------------- /papers_scripts/hbm2021/rois_hcplang900_z16/left_FP.nii.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/individual-brain-charting/public_analysis_code/d0bbddb8e44580da4d4eb26fa8e86053a997a51c/papers_scripts/hbm2021/rois_hcplang900_z16/left_FP.nii.gz -------------------------------------------------------------------------------- /papers_scripts/hbm2021/rois_hcplang900_z16/left_FP.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/individual-brain-charting/public_analysis_code/d0bbddb8e44580da4d4eb26fa8e86053a997a51c/papers_scripts/hbm2021/rois_hcplang900_z16/left_FP.png -------------------------------------------------------------------------------- /papers_scripts/hbm2021/rois_hcplang900_z16/left_IFG.nii.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/individual-brain-charting/public_analysis_code/d0bbddb8e44580da4d4eb26fa8e86053a997a51c/papers_scripts/hbm2021/rois_hcplang900_z16/left_IFG.nii.gz -------------------------------------------------------------------------------- /papers_scripts/hbm2021/rois_hcplang900_z16/left_IFG.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/individual-brain-charting/public_analysis_code/d0bbddb8e44580da4d4eb26fa8e86053a997a51c/papers_scripts/hbm2021/rois_hcplang900_z16/left_IFG.png -------------------------------------------------------------------------------- /papers_scripts/hbm2021/rois_hcplang900_z16/left_TPJ.nii.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/individual-brain-charting/public_analysis_code/d0bbddb8e44580da4d4eb26fa8e86053a997a51c/papers_scripts/hbm2021/rois_hcplang900_z16/left_TPJ.nii.gz -------------------------------------------------------------------------------- /papers_scripts/hbm2021/rois_hcplang900_z16/left_TPJ.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/individual-brain-charting/public_analysis_code/d0bbddb8e44580da4d4eb26fa8e86053a997a51c/papers_scripts/hbm2021/rois_hcplang900_z16/left_TPJ.png -------------------------------------------------------------------------------- /papers_scripts/hbm2021/rois_hcplang900_z16/left_aSTS_TP.nii.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/individual-brain-charting/public_analysis_code/d0bbddb8e44580da4d4eb26fa8e86053a997a51c/papers_scripts/hbm2021/rois_hcplang900_z16/left_aSTS_TP.nii.gz -------------------------------------------------------------------------------- /papers_scripts/hbm2021/rois_hcplang900_z16/left_aSTS_TP.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/individual-brain-charting/public_analysis_code/d0bbddb8e44580da4d4eb26fa8e86053a997a51c/papers_scripts/hbm2021/rois_hcplang900_z16/left_aSTS_TP.png -------------------------------------------------------------------------------- /papers_scripts/hbm2021/rois_hcplang900_z16/left_pSTS.nii.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/individual-brain-charting/public_analysis_code/d0bbddb8e44580da4d4eb26fa8e86053a997a51c/papers_scripts/hbm2021/rois_hcplang900_z16/left_pSTS.nii.gz -------------------------------------------------------------------------------- /papers_scripts/hbm2021/rois_hcplang900_z16/left_pSTS.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/individual-brain-charting/public_analysis_code/d0bbddb8e44580da4d4eb26fa8e86053a997a51c/papers_scripts/hbm2021/rois_hcplang900_z16/left_pSTS.png -------------------------------------------------------------------------------- /papers_scripts/hbm2021/rois_hcplang900_z16/vmPFC.nii.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/individual-brain-charting/public_analysis_code/d0bbddb8e44580da4d4eb26fa8e86053a997a51c/papers_scripts/hbm2021/rois_hcplang900_z16/vmPFC.nii.gz -------------------------------------------------------------------------------- /papers_scripts/hbm2021/rois_hcplang900_z16/vmPFC.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/individual-brain-charting/public_analysis_code/d0bbddb8e44580da4d4eb26fa8e86053a997a51c/papers_scripts/hbm2021/rois_hcplang900_z16/vmPFC.png -------------------------------------------------------------------------------- /papers_scripts/hbm2021/rois_pallier/IFGorb.nii.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/individual-brain-charting/public_analysis_code/d0bbddb8e44580da4d4eb26fa8e86053a997a51c/papers_scripts/hbm2021/rois_pallier/IFGorb.nii.gz -------------------------------------------------------------------------------- /papers_scripts/hbm2021/rois_pallier/IFGtri.nii.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/individual-brain-charting/public_analysis_code/d0bbddb8e44580da4d4eb26fa8e86053a997a51c/papers_scripts/hbm2021/rois_pallier/IFGtri.nii.gz -------------------------------------------------------------------------------- /papers_scripts/hbm2021/rois_pallier/Precentral_Pallier_2011.nii.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/individual-brain-charting/public_analysis_code/d0bbddb8e44580da4d4eb26fa8e86053a997a51c/papers_scripts/hbm2021/rois_pallier/Precentral_Pallier_2011.nii.gz -------------------------------------------------------------------------------- /papers_scripts/hbm2021/rois_pallier/Putamen.nii.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/individual-brain-charting/public_analysis_code/d0bbddb8e44580da4d4eb26fa8e86053a997a51c/papers_scripts/hbm2021/rois_pallier/Putamen.nii.gz -------------------------------------------------------------------------------- /papers_scripts/hbm2021/rois_pallier/TP.nii.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/individual-brain-charting/public_analysis_code/d0bbddb8e44580da4d4eb26fa8e86053a997a51c/papers_scripts/hbm2021/rois_pallier/TP.nii.gz -------------------------------------------------------------------------------- /papers_scripts/hbm2021/rois_pallier/TPJ.nii.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/individual-brain-charting/public_analysis_code/d0bbddb8e44580da4d4eb26fa8e86053a997a51c/papers_scripts/hbm2021/rois_pallier/TPJ.nii.gz -------------------------------------------------------------------------------- /papers_scripts/hbm2021/rois_pallier/aSTS.nii.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/individual-brain-charting/public_analysis_code/d0bbddb8e44580da4d4eb26fa8e86053a997a51c/papers_scripts/hbm2021/rois_pallier/aSTS.nii.gz -------------------------------------------------------------------------------- /papers_scripts/hbm2021/rois_pallier/dmPFC_Pallier_2011.nii.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/individual-brain-charting/public_analysis_code/d0bbddb8e44580da4d4eb26fa8e86053a997a51c/papers_scripts/hbm2021/rois_pallier/dmPFC_Pallier_2011.nii.gz -------------------------------------------------------------------------------- /papers_scripts/hbm2021/rois_pallier/pSTS.nii.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/individual-brain-charting/public_analysis_code/d0bbddb8e44580da4d4eb26fa8e86053a997a51c/papers_scripts/hbm2021/rois_pallier/pSTS.nii.gz -------------------------------------------------------------------------------- /papers_scripts/hbm2021/utils_surface_plots.py: -------------------------------------------------------------------------------- 1 | """ Various utilities for surface-based plotting of brain maps 2 | """ 3 | import numpy as np 4 | import nibabel as nib 5 | import os 6 | from nilearn import plotting 7 | 8 | 9 | def surface_one_sample(df, contrast, side): 10 | from scipy.stats import ttest_1samp, norm 11 | mask = (df.contrast.values == contrast) * (df.side.values == side) 12 | X = np.array([nib.load(texture).darrays[0].data 13 | for texture in list(df.path[mask].values)]) 14 | # print (X.shape, np.sum(np.isnan(X))) 15 | t_values, p_values = ttest_1samp(X, 0) 16 | p_values = .5 * (1 - (1 - p_values) * np.sign(t_values)) 17 | z_values = norm.isf(p_values) 18 | return z_values 19 | 20 | 21 | def surface_conjunction(df, contrast, side, percentile=25): 22 | from conjunction import _conjunction_inference_from_z_values 23 | mask = (df.contrast.values == contrast) * (df.side.values == side) 24 | Z = np.array([nib.load(texture).darrays[0].data 25 | for texture in list(df.path[mask].values)]).T 26 | pos_conj = _conjunction_inference_from_z_values(Z, percentile * .01) 27 | neg_conj = _conjunction_inference_from_z_values(-Z, percentile * .01) 28 | conj = pos_conj 29 | conj[conj < 0] = 0 30 | conj[neg_conj > 0] = - neg_conj[neg_conj > 0] 31 | return conj 32 | 33 | 34 | def make_thumbnail_surface(func, hemi, threshold=3.0, vmax=10., 35 | output_dir='/tmp'): 36 | if os.path.exists('/neurospin/ibc'): 37 | dir_ = '/neurospin/ibc/derivatives/sub-01/ses-00/anat/fsaverage/surf' 38 | else: 39 | dir_ = '/storage/store/data/ibc/derivatives/sub-01/ses-00/anat/' + \ 40 | 'fsaverage/surf' 41 | if hemi == 'right': 42 | mesh = os.path.join(dir_, 'rh.inflated') 43 | bg_map = os.path.join(dir_, 'rh.sulc') 44 | else: 45 | mesh = os.path.join(dir_, 'lh.inflated') 46 | bg_map = os.path.join(dir_, 'lh.sulc') 47 | 48 | medial = '/tmp/surf_medial_%s.png' % hemi 49 | lateral = '/tmp/surf_lateral_%s.png' % hemi 50 | # threshold = fdr_threshold(func, .05) 51 | plotting.plot_surf_stat_map(mesh, func, hemi=hemi, vmax=vmax, 52 | threshold=threshold, bg_map=bg_map, 53 | view='lateral', output_file=lateral) 54 | plotting.plot_surf_stat_map(mesh, func, hemi=hemi, vmax=vmax, 55 | threshold=threshold, bg_map=bg_map, 56 | view='medial', output_file=medial) 57 | return medial, lateral 58 | 59 | 60 | def make_atlas_surface(label, hemi, name='', output_dir='/tmp'): 61 | if os.path.exists('/neurospin/ibc'): 62 | dir_ = '/neurospin/ibc/derivatives/sub-01/ses-00/anat/fsaverage/surf' 63 | else: 64 | dir_ = '/storage/store/data/ibc/derivatives/sub-01/ses-00/anat/' + \ 65 | 'fsaverage/surf' 66 | if hemi == 'right': 67 | mesh = os.path.join(dir_, 'rh.inflated') 68 | bg_map = os.path.join(dir_, 'rh.sulc') 69 | else: 70 | mesh = os.path.join(dir_, 'lh.inflated') 71 | bg_map = os.path.join(dir_, 'lh.sulc') 72 | 73 | medial = os.path.join(output_dir, '%s_medial_%s.png' % (name, hemi)) 74 | lateral = os.path.join(output_dir, '%s_lateral_%s.png' % (name, hemi)) 75 | plotting.plot_surf_roi(mesh, label, hemi=hemi, bg_map=bg_map, 76 | view='lateral', output_file=lateral, alpha=.9) 77 | plotting.plot_surf_roi(mesh, label, hemi=hemi, bg_map=bg_map, 78 | view='medial', output_file=medial, alpha=.9) 79 | 80 | 81 | def faces_2_connectivity(faces): 82 | from scipy.sparse import coo_matrix 83 | n_features = len(np.unique(faces)) 84 | edges = np.vstack((faces.T[:2].T, faces.T[1:].T, faces.T[0:3:2].T)) 85 | weight = np.ones(edges.shape[0]) 86 | connectivity = coo_matrix((weight, (edges.T[0], edges.T[1])), 87 | (n_features, n_features)) # .tocsr() 88 | # Making it symmetrical 89 | connectivity = (connectivity + connectivity.T) / 2 90 | return connectivity 91 | 92 | 93 | def connected_components_cleaning(connectivity, _map, cluster_size=10): 94 | from scipy.sparse import csgraph, coo_matrix 95 | n_features = connectivity.shape[0] 96 | weight = connectivity.data.copy() 97 | edges = connectivity.nonzero() 98 | i_idx, j_idx = edges 99 | weight[_map[i_idx] == 0] = 0 100 | weight[_map[j_idx] == 0] = 0 101 | mask = weight != 0 102 | reduced_connectivity = coo_matrix( 103 | (weight[mask], (i_idx[mask], j_idx[mask])), (n_features, n_features)) 104 | # Clustering step: getting the connected components of the nn matrix 105 | n_components, labels = csgraph.connected_components(reduced_connectivity) 106 | label, count = np.unique(labels, return_counts=True) 107 | good_labels = label[count >= cluster_size] 108 | map_ = np.zeros_like(_map) 109 | for gl in good_labels: 110 | map_[labels == gl] = _map[labels == gl] 111 | return map_ 112 | 113 | 114 | def clean_surface_map(maps, hemi, cluster_size): 115 | """Clean surface maps by removing small connected components""" 116 | from nilearn.surface import load_surf_mesh 117 | if os.path.exists('/neurospin/ibc'): 118 | dir_ = '/neurospin/ibc/derivatives/sub-01/ses-00/anat/fsaverage/surf' 119 | else: 120 | dir_ = '/storage/store/data/ibc/derivatives/sub-01/ses-00/anat/' + \ 121 | 'fsaverage/surf' 122 | if hemi == 'right': 123 | mesh = os.path.join(dir_, 'rh.inflated') 124 | else: 125 | mesh = os.path.join(dir_, 'lh.inflated') 126 | 127 | _, faces = load_surf_mesh(mesh) 128 | connectivity = faces_2_connectivity(faces) 129 | for i in range(maps.shape[1]): 130 | maps[:, i] = connected_components_cleaning( 131 | connectivity, maps[:, i], cluster_size=cluster_size) 132 | return maps 133 | -------------------------------------------------------------------------------- /papers_scripts/neuroimage2021/README.md: -------------------------------------------------------------------------------- 1 | # Note 2 | 3 | This folder contains the scripts featuring the analysis pipeline undertaken for [Dohmatob, E., Richard, H., Pinho, A. L., Thirion, B. (2021) Brain topography beyond parcellations: Local gradients of functional maps. *Neuroimage* **229**:117706](https://doi.org/10.1016/j.neuroimage.2020.117706). 4 | -------------------------------------------------------------------------------- /papers_scripts/neuroimage2021/gm_mask_2mm.nii.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/individual-brain-charting/public_analysis_code/d0bbddb8e44580da4d4eb26fa8e86053a997a51c/papers_scripts/neuroimage2021/gm_mask_2mm.nii.gz -------------------------------------------------------------------------------- /papers_scripts/neuroimage2021/utils.py: -------------------------------------------------------------------------------- 1 | """ 2 | Utilities for script_ibc.py, for generalization to upcoming script_hcp.py 3 | 4 | Author: Bertrand Thirion, 2020 5 | """ 6 | from nilearn.decomposition import DictLearning 7 | import numpy as np 8 | from joblib import Parallel, delayed 9 | 10 | def make_dictionary(rs_fmri, n_components, cache, mask, n_jobs=1): 11 | dict_learning = DictLearning(n_components=n_components, 12 | memory=cache, memory_level=2, 13 | verbose=1, random_state=0, n_epochs=1, 14 | mask=mask, n_jobs=n_jobs) 15 | dict_learning.fit(rs_fmri) 16 | return dict_learning.components_img_, dict_learning.components_ 17 | 18 | 19 | def adapt_components(Y, subject, rs_fmri_db, masker, n_dim): 20 | rs_scans = rs_fmri_db[rs_fmri_db.subject == subject].path 21 | X_ = np.zeros_like(Y) 22 | for scan in rs_scans.values: 23 | X = masker.transform(scan) 24 | U, S, V = np.linalg.svd(X, 0) 25 | Vk = V[:n_dim] 26 | X_ += Y.dot(Vk.T).dot(Vk) 27 | return X_ 28 | 29 | 30 | def make_parcellation(ward, rs_fmri): 31 | indexes = np.random.randint(len(rs_fmri), size=5) 32 | ward.fit([rs_fmri[j] for j in indexes]) 33 | return ward.labels_img_ 34 | 35 | 36 | def make_parcellations(ward, rs_fmri, n_parcellations, n_jobs): 37 | parcellations = Parallel(n_jobs=n_jobs)(delayed(make_parcellation)( 38 | ward, rs_fmri) for b in range(n_parcellations)) 39 | return parcellations 40 | 41 | 42 | def predict_Y_oneparcel(parcellations, dummy_masker, train_index, 43 | n_parcels, X, models, b): 44 | labels = np.ravel(dummy_masker.transform(parcellations[b])) 45 | Y_pred = np.zeros((models[0][0].shape[0], labels.size)) 46 | for q in range(n_parcels): 47 | parcel = labels == q + 1 48 | for i in train_index: 49 | Y_pred[:, parcel] += np.dot( 50 | X.T[parcel], models[i][b * n_parcels + q].T).T 51 | return Y_pred 52 | 53 | 54 | def predict_Y_multiparcel(parcellations, dummy_masker, train_index, 55 | n_parcels, Y, X, models, n_jobs): 56 | n_parcellations = len(parcellations) 57 | #Y_preds = Parallel(n_jobs=n_jobs)(delayed(predict_Y_oneparcel)( 58 | # parcellations, dummy_masker, train_index, 59 | # n_parcels, X, models, b) for b in range(n_parcellations)) 60 | Y_preds = [] 61 | for b in range(n_parcellations): 62 | Y_pred_ = predict_Y_oneparcel( 63 | parcellations, dummy_masker, train_index, 64 | n_parcels, X, models, b) 65 | Y_preds.append(Y_pred_) 66 | # 67 | Y_preds = np.array(Y_preds) 68 | Y_pred = np.sum(Y_preds, 0) / (n_parcellations * len(train_index)) 69 | return Y_pred 70 | 71 | 72 | def permuted_score(Y, Y_pred, Y_baseline, n_permutations, seed=1): 73 | rng = np.random.RandomState(seed) 74 | n_contrasts = Y.shape[1] 75 | permuted_con_score = [] 76 | permuted_vox_score = [] 77 | for b in range(n_permutations): 78 | permutation = rng.permutation(n_contrasts) 79 | Y_ = Y[:, permutation] 80 | vox_score = 1 - np.sum((Y_ - Y_pred) ** 2, 0) / np.sum(( 81 | Y_ - Y_baseline.mean(0)) ** 2, 0) 82 | con_score = 1 - np.sum((Y_.T - Y_pred.T) ** 2, 0) / np.sum( 83 | (Y_.T - Y_baseline.T.mean(0)) ** 2, 0) 84 | permuted_con_score.append(con_score) 85 | #permuted_vox_score.append(vox_score) 86 | return permuted_con_score#, permuted_vox_score 87 | 88 | 89 | def fit_regressions(individual_components, data, parcellations, 90 | dummy_masker, clf, i): 91 | n_parcellations = len(parcellations) 92 | X = individual_components[i] 93 | Y = data[i] 94 | model = [] 95 | n_parcellations = len(parcellations) 96 | for b in range(n_parcellations): 97 | labels = np.ravel( 98 | dummy_masker.transform(parcellations[b]).astype(np.int)) 99 | n_parcels = len(np.unique(labels)) 100 | for q in range(n_parcels): 101 | parcel = labels == q + 1 102 | model_ = clf.fit(X.T[parcel], Y.T[parcel]).coef_ 103 | model.append(model_) 104 | return model 105 | 106 | 107 | def predict_Y(parcellations, dummy_masker, n_parcels, X, average_models): 108 | n_parcellations = len(parcellations) 109 | Y_preds = [] 110 | for b in range(n_parcellations): 111 | labels = np.ravel(dummy_masker.transform(parcellations[b])) 112 | Y_pred = np.zeros((average_models[0].shape[0], labels.size)) 113 | for q in range(n_parcels): 114 | parcel = labels == q + 1 115 | Y_pred[:, parcel] = np.dot( 116 | X.T[parcel], average_models[b * n_parcels + q].T).T 117 | Y_preds.append(Y_pred) 118 | Y_preds = np.array(Y_preds) 119 | Y_pred = np.sum(Y_preds, 0) / (n_parcellations) 120 | return Y_pred 121 | -------------------------------------------------------------------------------- /papers_scripts/scidata2018/README.md: -------------------------------------------------------------------------------- 1 | # Note 2 | 3 | This folder contains the scripts featuring the analysis pipeline undertaken for [Pinho, A. L. *et al.* (2018). Individual Brain Charting, a high-resolution fMRI dataset for cognitive mapping. *Sci Data* **5** , 180105](https://www.nature.com/articles/sdata2018105). 4 | -------------------------------------------------------------------------------- /papers_scripts/scidata2018/brain_coverage.py: -------------------------------------------------------------------------------- 1 | """ 2 | Analysis of the coverage of brain maps. 3 | 4 | Authors: Bertrand Thirion, 2017 5 | """ 6 | import numpy as np 7 | import os 8 | import glob 9 | import pandas as pd 10 | from joblib import Memory, Parallel, delayed 11 | import nibabel as nib 12 | from nilearn.input_data import NiftiMasker 13 | from nilearn import plotting 14 | 15 | DERIVATIVES = '/neurospin/ibc/derivatives' 16 | SMOOTH_DERIVATIVES = '/neurospin/ibc/smooth_derivatives' 17 | SUBJECTS = [os.path.basename(full_path) for full_path in 18 | sorted(glob.glob(os.path.join(DERIVATIVES, 'sub-*')))] 19 | CONDITIONS = pd.DataFrame().from_csv('../processing/conditions.tsv', sep='\t') 20 | cache = '/neurospin/tmp/bthirion' 21 | mem = Memory(cachedir=cache, verbose=0) 22 | 23 | def stouffer(x): 24 | return x.mean(0) * np.sqrt(x.shape[0]) 25 | 26 | def eoi_parser(derivatives=DERIVATIVES): 27 | """Generate a dataframe that contains all the data corresponding 28 | to the archi, hcp and rsvp_language acquisitions""" 29 | paths = [] 30 | subjects = [] 31 | sessions = [] 32 | modalities = [] 33 | contrasts = [] 34 | tasks = [] 35 | acquisitions = [] 36 | 37 | # T1 images 38 | imgs_ = sorted(glob.glob(os.path.join( 39 | derivatives, 'sub-*/ses-*/anat/wsub*_T1w_nonan.nii.gz'))) 40 | for img in imgs_: 41 | session = img.split('/')[-3] 42 | subject = img.split('/')[-4] 43 | paths.append(img) 44 | sessions.append(session) 45 | subjects.append(subject) 46 | modalities.append('T1') 47 | contrasts.append('t1') 48 | tasks.append('') 49 | acquisitions.append('') 50 | 51 | # fixed-effects activation images 52 | task_list = CONDITIONS.task.unique() 53 | for acq in ['ap', 'pa', 'ffx']: 54 | for task in task_list: 55 | for subject in SUBJECTS: 56 | imgs_ = glob.glob(os.path.join( 57 | derivatives, '%s/*/res_stats_%s_%s/z_score_maps/effects_interest.nii.gz' % 58 | (subject, task, acq))) 59 | imgs_.sort() 60 | 61 | task_ = task 62 | if task_ == 'language_': 63 | task_ = 'rsvp_language' 64 | 65 | for img in imgs_: 66 | session = img.split('/')[5] 67 | paths.append(img) 68 | sessions.append(session) 69 | subjects.append(img.split('/')[4]) 70 | modalities.append('bold') 71 | contrasts.append('effects_interest') 72 | tasks.append(task_) 73 | acquisitions.append(acq) 74 | 75 | # create a dictionary with all the information 76 | db_dict = dict( 77 | path=paths, 78 | subject=subjects, 79 | modality=modalities, 80 | contrast=contrasts, 81 | session=sessions, 82 | task=tasks, 83 | acquisition=acquisitions, 84 | ) 85 | # create a FataFrame out of the dictionary and write it to disk 86 | db = pd.DataFrame().from_dict(db_dict) 87 | return db 88 | 89 | if __name__ == '__main__': 90 | db = eoi_parser(derivatives=SMOOTH_DERIVATIVES) 91 | mask_gm = nib.load(os.path.join(DERIVATIVES, 'group', 'anat', 'gm_mask.nii.gz')) 92 | masker = NiftiMasker(mask_img=mask_gm).fit() 93 | df = db[db.modality == 'bold'] 94 | X = masker.transform(df.path.values) 95 | 96 | # per-subject EoI 97 | for subject in SUBJECTS: 98 | anat = db[db.modality == 'T1'][db.subject == subject].path.values[0] 99 | z = stouffer(X[df.subject.values == subject]) 100 | plotting.plot_stat_map(masker.inverse_transform(z), bg_img=anat, threshold=5.) 101 | 102 | z = stouffer(X) 103 | plotting.plot_stat_map(masker.inverse_transform(z), threshold=5., 104 | display_mode='x', cut_coords=5, 105 | output_file=os.path.join('output/coverage.pdf')) 106 | plotting.show() 107 | -------------------------------------------------------------------------------- /papers_scripts/scidata2018/cognitive_atlas.csv: -------------------------------------------------------------------------------- 1 | Tasks Contrasts visual form recognition feature comparison response selection response execution emotional face recognition animacy perception animacy decision motion detection working memory left finger response execution left toe response execution tongue response execution right finger response execution right toe response execution updating visual tool recognition tool maintenance body maintenance visual body recognition place maintenance visual place recognition face maintenance visual face recognition auditory sentence recognition story comprehension auditory arithmetic processing visual arithmetic processing reward processing punishment processing relational comparison visual pattern recognition visual pseudo word recognition visual word recognition word maintenance sentence processing syntactic parsing string maintenance visual string recognition vertical checkerboard horizontal checkerboard Visual tracking grasping Hand chirality recognition Hand side recognition sounds perception voice perception theory of mind emotion expression identification gender discrimination facial trustworthiness recognition 2 | HCP emotion shape 1 1 1 1 3 | HCP emotion face 1 1 1 1 4 | HCP social random 1 1 1 5 | HCP social mental 1 1 1 1 1 6 | HCP motor cue 1 1 7 | HCP motor tongue 1 1 8 | HCP motor left_foot 1 1 9 | HCP motor right_foot 1 1 10 | HCP motor left_hand 1 1 11 | HCP motor right_hand 1 1 12 | HCP WM 2back_body 1 1 1 1 1 13 | HCP WM 2back_face 1 1 1 1 1 14 | HCP WM 2back_place 1 1 1 1 1 15 | HCP WM 2back_tools 1 1 1 1 1 16 | HCP WM 0back_body 1 1 1 1 17 | HCP WM 0back_face 1 1 1 1 18 | HCP WM 0back_place 1 1 1 1 19 | HCP WM 0back_tools 1 1 1 1 20 | HCP language story 1 1 1 1 21 | HCP language math 1 1 1 22 | HCP Gambling punishment 1 1 1 23 | HCP Gambling reward 1 1 1 24 | HCP relational match 1 1 1 1 1 25 | HCP relational relational 1 1 1 1 1 1 26 | RSVP language complex 1 1 1 1 1 27 | RSVP language simple 1 1 1 1 28 | RSVP language word_list 1 1 1 29 | RSVP language jabberwocky 1 1 1 30 | RSVP language pseudoword_list 1 1 31 | RSVP language consonant_string 1 1 1 32 | RSVP language language_probe 1 1 33 | Archi standard horizontal_checkerboard 1 34 | Archi standard vertical_checkerboard 1 35 | Archi standard audio_left_button_press 1 1 1 1 36 | Archi standard video_left_button_press 1 1 1 37 | Archi standard audio_right_button_press 1 1 1 1 38 | Archi standard video_right_button_press 1 1 1 39 | Archi standard audio_computation 1 40 | Archi standard video_computation 1 1 41 | Archi standard audio_sentence 1 42 | Archi standard video_sentence 1 1 43 | Archi spatial saccades 1 44 | Archi spatial object_orientation 1 1 1 1 45 | Archi spatial object_grasp 1 1 1 1 1 46 | Archi spatial rotation_hand 1 1 1 47 | Archi spatial rotation_side 1 1 1 48 | Archi social triangle_mental 1 1 1 49 | Archi social triangle_random 1 50 | Archi social speech_sound 1 51 | Archi social non_speech_sound 1 52 | Archi social mechanistic_audio 1 1 53 | Archi social mechanistic_video 1 1 54 | Archi social false_belief_audio 1 1 1 55 | Archi social false_belief_video 1 1 1 56 | Archi emotional expression_gender 1 1 57 | Archi emotional expression_intention 1 1 58 | Archi emotional expression_control 1 59 | Archi emotional face_gender 1 1 60 | Archi emotional face_trusty 1 1 61 | Archi emotional face_control 1 62 | -------------------------------------------------------------------------------- /papers_scripts/scidata2018/snapshots.py: -------------------------------------------------------------------------------- 1 | """ 2 | This script generate snapshots of brain activity for the different tasks across subjects 3 | """ 4 | import os 5 | import matplotlib.image as mpimg 6 | from nilearn import plotting 7 | import matplotlib.pyplot as plt 8 | import pandas as pd 9 | from nistats.second_level_model import SecondLevelModel 10 | import numpy as np 11 | 12 | 13 | db = data_parser(derivatives=SMOOTH_DERIVATIVES) 14 | mask_gm = nib.load(os.path.join(DERIVATIVES, 'group', 'anat', 'gm_mask.nii.gz')) 15 | glm = SecondLevelModel(mask=mask_gm) 16 | BETTER_NAMES = '' 17 | 18 | 19 | 20 | write_dir = '' 21 | sorted_contrasts = '' 22 | 23 | for task in sorted_contrasts.keys(): 24 | task_dir = os.path.join(write_dir, task) 25 | if not os.path.exists(task_dir): 26 | os.mkdir(task_dir) 27 | contrasts = sorted_contrasts[task] 28 | n_contrasts = len(contrasts) 29 | # First do the random effects glass brain figure 30 | for i, contrast in enumerate(contrasts): 31 | contrast_mask = (db.contrast.values == contrast) 32 | dmtx = pd.DataFrame(np.ones(np.sum(contrast_mask))) 33 | glm.fit(list(db.path[contrast_mask].values), design_matrix=dmtx) 34 | grp_stat = glm.compute_contrast([1], stat_type='t', output_type='z_score') 35 | plotting.plot_glass_brain( 36 | grp_stat, display_mode='z', title=BETTER_NAMES[contrast], 37 | threshold=3., vmax=8, plot_abs=False, black_bg=True, 38 | output_file='/tmp/rfx_%s.png' % contrast) 39 | plt.figure(figsize=(7, 2 * n_contrasts + 1), facecolor='k', edgecolor='k') 40 | delta = (4 * n_contrasts - 1.) / (4 * n_contrasts ** 2) 41 | for i, contrast in enumerate(contrasts): 42 | ax = plt.axes([0., 1 - (i + 1) * delta, 1., delta], axisbg='k') 43 | ax.imshow(mpimg.imread('/tmp/rfx_%s.png' % contrast)) 44 | plt.axis('off') 45 | ax = plt.axes([0.02, 0.0, .8, 1./ (8 * n_contrasts)], 46 | axisbg='k') 47 | _draw_colorbar(ax, vmax=8, offset=3., orientation='horizontal', fontsize=14) 48 | ax = plt.axes([0.84, .01, .15, 1./ (8 * n_contrasts)], axisbg='k') 49 | ax.text(0, 0, 'z-scale', color='w', fontsize=14) 50 | ax.axis('off') 51 | plt.savefig(os.path.join(task_dir, 'glass_brain_rfx_colorbar_%s.pdf' % task), 52 | facecolor='k', edgecolor='k', transparent=True, frameon=False, 53 | pad_inches=0.) 54 | plt.close() 55 | -------------------------------------------------------------------------------- /papers_scripts/scidata2020/README.md: -------------------------------------------------------------------------------- 1 | # Note 2 | 3 | This folder contains the scripts featuring the analysis pipeline undertaken for [Pinho, A. L. *et al.* (2020). Individual Brain Charting dataset extension, second release of high-resolution fMRI data for cognitive mapping. Sci Data **7**, 353](https://doi.org/10.1038/s41597-020-00670-4). 4 | -------------------------------------------------------------------------------- /papers_scripts/scidata2020/behavioral_data/README.md: -------------------------------------------------------------------------------- 1 | ## Notes about scripts for behavioral-data analysis 2 | 3 | Author: Ana Luisa Pinho 4 | e-mail: ana.pinho@inria.fr 5 | 6 | These scripts parse and analyse individual behavioral data extracted from `.xpd` files generated by the protocols _mtt_ and _self_, `.mat` files generated by the protocol _tom_ and `.dat` files generated by the protocols _vstm_ and _enumeration_. 7 | 8 | These protocols can be found in the repository on GitHub dedicated to the IBC behavioral protocols: https://github.com/hbp-brain-charting/public_protocols 9 | -------------------------------------------------------------------------------- /papers_scripts/scidata2020/behavioral_data/behav_utils.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | import csv 4 | import numpy as np 5 | 6 | 7 | def calc_score(answer_list, pt_fdbk_list): 8 | """ 9 | Calculate the final score of the participant based on the number of 10 | correct answers. 11 | """ 12 | answer = np.array(answer_list) 13 | pt_fdbk = np.array(pt_fdbk_list) 14 | score_list = np.where(answer == pt_fdbk, 1, 0) 15 | score_list = [float(i) for i in score_list] 16 | score = np.sum(score_list) 17 | total_score = (score / answer.size) * 100 18 | return total_score 19 | 20 | 21 | def generate_csv(pt_scores, runs_number, header, output, numerosity=None, 22 | n_trials=None): 23 | """ 24 | Generate csv file to be imported by data_paper2.tex 25 | """ 26 | # Prepare some labeling arrays for table 27 | runs_number = [r for r in runs_number] 28 | runs_number.insert(len(runs_number), 'Mean') 29 | if numerosity is not None: 30 | if len(runs_number) % 2 == 0: 31 | row = len(runs_number) // 2 - 1 32 | else: 33 | row = len(runs_number) // 2 34 | num_column = [''] * len(runs_number) 35 | num_column[row] = numerosity 36 | # Stack all arrays in a table 37 | table = np.vstack((header, np.vstack((num_column, runs_number, 38 | pt_scores)).T)) 39 | elif n_trials is not None: 40 | # Stack all arrays in a table 41 | n_trials.insert(len(runs_number), '-') 42 | table = np.vstack((header, np.vstack((runs_number, n_trials, 43 | pt_scores)).T)) 44 | else: 45 | # Stack all arrays in a table 46 | table = np.vstack((header, np.vstack((runs_number, pt_scores)).T)) 47 | # Save table in the output file 48 | with open(output, 'w') as fp: 49 | a = csv.writer(fp, delimiter=',') 50 | a.writerows(table) 51 | -------------------------------------------------------------------------------- /papers_scripts/scidata2020/behavioral_data/success_rate_enumeration.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Compute success rates for the performances of the IBC participants 4 | for the Enumeration task 5 | 6 | author: Ana Luisa Pinho 7 | e-mail: ana.pinho@inria.fr 8 | 9 | Compatibility: Python 3.5 10 | 11 | Date: September 2019 12 | """ 13 | 14 | import os 15 | # import sys 16 | import glob 17 | import csv 18 | import numpy as np 19 | 20 | # Add momentarily the parent dir to the path in order to call 'scores' module 21 | # new_path = os.path.abspath(os.pardir) 22 | # if new_path not in sys.path: 23 | # sys.path.append(new_path) 24 | 25 | from behav_utils import calc_score, generate_csv 26 | 27 | 28 | def enumeration_scores_extractor(participants, dir_path, numerosity = None): 29 | all_pt_scores = [] 30 | all_pt_means = [] 31 | # For each participant... 32 | for participant in participants: 33 | log_path = os.path.abspath(os.path.join(dir_path, 34 | 'sub-' + '%02d' % participant, 35 | 'knops/enumeration')) 36 | # Load the files 37 | log_files = glob.glob(os.path.join(log_path, '*.dat')) 38 | log_files.sort() 39 | # For every log file: 40 | runs = [] 41 | all_scores = [] 42 | count = 0 43 | for log_fname in log_files: 44 | print(log_fname) 45 | log_file = [line for line in csv.reader(open(log_fname), 46 | delimiter='\t')] 47 | # Discard rows pertaining to trials from a run that didn't finish 48 | discarded_trials = len(log_file[1:]) % 96 49 | if discarded_trials == 0: 50 | data_list = log_file[1:] 51 | else: 52 | data_list = log_file[1:][:-discarded_trials] 53 | # Start reading the log files row-by-row 54 | correct_answers = [] 55 | answers = [] 56 | for dt, data in enumerate(data_list): 57 | if numerosity is not None: 58 | if numerosity == int(data[6]): 59 | correct_answers.append(data[6]) 60 | answers.append(data[11]) 61 | else: 62 | correct_answers.append(data[6]) 63 | answers.append(data[11]) 64 | # Compute scores for each run 65 | if dt in [95, 191]: 66 | score = round(calc_score(correct_answers, answers), 0) 67 | all_scores.append(score) 68 | runs.append(str(count)) 69 | count = count + 1 70 | # Clean arrays 71 | correct_answers = [] 72 | answers = [] 73 | # Compute mean of scores in all runs for each participant 74 | score_mean = np.rint(np.mean(all_scores)) 75 | all_scores.append(score_mean) 76 | all_pt_means.append(score_mean) 77 | # Append total average per participant 78 | all_scores = ["%d" % s for s in all_scores] 79 | all_pt_scores.append(all_scores) 80 | # Compute mean and standard deviation for all participants 81 | group_mean = np.rint(np.mean(all_pt_means)) 82 | group_mean = "%d" % group_mean 83 | group_std = np.rint(np.std(all_pt_means)) 84 | group_std = "%d" % group_std 85 | return all_pt_scores, runs, group_mean, group_std 86 | 87 | 88 | # # %% 89 | # # ========================== GENERAL PARAMETERS ============================= 90 | 91 | # Inputs 92 | pt_list = [1, 4, 5, 6, 7, 9, 11, 12, 13, 14, 15] 93 | 94 | HERE = os.path.dirname(__file__) 95 | intermediate_folder = 'neurospin_data/info/' 96 | parent_dir = os.path.abspath(os.path.join(HERE, os.pardir, 97 | intermediate_folder)) 98 | 99 | task_name = 'enumeration' 100 | 101 | # Outputs 102 | pt_full = ['one', 'four', 'five', 'six', 'seven', 'nine', 'eleven', 'twelve', 103 | 'thirteen', 'fourteen', 'fifteen'] 104 | 105 | HEADER = ['sub' + sub for sub in pt_full] 106 | HEADER.insert(0, 'run') 107 | HEADER.insert(0, 'numerosity') 108 | 109 | main_dir = '/home/analu/mygit/ibc_ghub/' + \ 110 | 'admin/papers/data_paper2/behavioral_results' 111 | 112 | # # %% 113 | # # ================= COMPUTE SCORES AND GENERATE CSV FILES =================== 114 | 115 | # All numerosities 116 | print('Numerosities all together') 117 | participants_scores, runs_id, gmean_all, gstd_all = \ 118 | enumeration_scores_extractor(pt_list, parent_dir) 119 | 120 | # Create csv file with individual rates for numerosities all together 121 | csv_file = 'success_rate_' + task_name + '_' + 'all' + '.csv' 122 | output_path = os.path.join(main_dir, csv_file) 123 | generate_csv(participants_scores, runs_id, HEADER, output_path, 124 | numerosity='All numerosities') 125 | 126 | # Start table with group means and stds 127 | group_table = [['All numerosities', gmean_all, gstd_all]] 128 | 129 | # Every numerosity 130 | num = [1, 2, 3, 4, 5, 6, 7, 8] 131 | for n in num: 132 | print(n) 133 | participants_scores, runs_id, gmean, gstd = \ 134 | enumeration_scores_extractor(pt_list, parent_dir, numerosity = n) 135 | csv_file = 'success_rate_' + task_name + '_' + str(n) + '.csv' 136 | output_path = os.path.join(main_dir, csv_file) 137 | generate_csv(participants_scores, runs_id, HEADER, output_path, 138 | numerosity=str(n)) 139 | group_table.append([str(n), gmean, gstd]) 140 | 141 | # Create csv file with group rates for all numerosities 142 | group_table = np.vstack((['numerosity', 'groupmean', 'groupstd'], 143 | group_table)) 144 | group_csv = 'group_values_' + task_name + '.csv' 145 | group_path = os.path.join(main_dir, group_csv) 146 | # Save table in the output file 147 | with open(group_path, 'w') as fp: 148 | a = csv.writer(fp, delimiter=',') 149 | a.writerows(group_table) 150 | -------------------------------------------------------------------------------- /papers_scripts/scidata2020/behavioral_data/success_rate_self.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Compute success rates for the performances of the IBC participants 4 | during the recognition phase of the self task 5 | 6 | author: Ana Luisa Pinho 7 | e-mail: ana.pinho@inria.fr 8 | 9 | Compatibility: Python 3.5 10 | 11 | Date: September 2019 12 | """ 13 | 14 | import os 15 | # import sys 16 | import glob 17 | import csv 18 | import numpy as np 19 | 20 | # Add momentarily the parent dir to the path in order to call 'scores' module 21 | # new_path = os.path.abspath(os.pardir) 22 | # if new_path not in sys.path: 23 | # sys.path.append(new_path) 24 | 25 | from behav_utils import calc_score, generate_csv 26 | 27 | 28 | def self_scores_extractor(participants, dir_path): 29 | all_pt_scores = [] 30 | # For each participant... 31 | for participant in participants: 32 | log_path = os.path.abspath(os.path.join(dir_path, 33 | 'sub-' + '%02d' % participant, 34 | 'self')) 35 | # Load the files 36 | log_files = glob.glob(os.path.join(log_path, '*.xpd')) 37 | log_files.sort() 38 | # For every log file: 39 | runs = [] 40 | all_scores = [] 41 | trials_number = [] 42 | for log_fname in log_files: 43 | print(log_fname) 44 | log_file = [line for line in csv.reader(open( 45 | log_fname, encoding='iso-8859-1'), delimiter=',')] 46 | # Retrieve table from log_file 47 | for r, row in enumerate(log_file): 48 | if row[0] == str(participant): 49 | break 50 | # Discard last trials from log files of acq. when interrupted 51 | if len(log_file[r:]) < 533: 52 | discarded_trials = len(log_file[r:]) % 123 53 | else: 54 | discarded_trials = len(log_file[r:]) % 533 55 | if discarded_trials == 0: 56 | data_list = log_file[r:] 57 | else: 58 | data_list = log_file[r:][:-discarded_trials] 59 | # Extract data 60 | runn = [] 61 | trial_type = [] 62 | answers = [] 63 | right_answers = [] 64 | for d, data in enumerate(data_list): 65 | # Retrieve answers from "recognition" trials 66 | if data[5] == 'recognition': 67 | runn.append(data[1]) 68 | trial_type.append(data[6]) 69 | answers.append(data[10]) 70 | # Compute the scores for every run 71 | if (len(runn) > 0 and data[1] != runn[-1]) or \ 72 | d == len(data_list) - 1: 73 | right_answers = np.where(np.array(trial_type) == '0', 74 | 'g', 'y') 75 | score = round(calc_score(right_answers, answers), 2) 76 | trials_number.append(str(len(answers))) 77 | all_scores.append(score) 78 | runs.append(runn[-1]) 79 | runn = [] 80 | trial_type = [] 81 | answers = [] 82 | right_answers = [] 83 | # Compute mean of scores in all runs for each session 84 | # of each participant 85 | score_mean = np.rint(np.mean(all_scores)) 86 | all_scores.append(score_mean) 87 | # Append total average per participant 88 | all_scores = ["%d" % s for s in all_scores] 89 | all_pt_scores.append(all_scores) 90 | return all_pt_scores, runs, trials_number 91 | 92 | 93 | # # %% 94 | # # ========================== GENERAL PARAMETERS ============================= 95 | 96 | # Inputs 97 | pt_list = [1, 4, 5, 6, 7, 9, 11, 12, 13, 14, 15] 98 | 99 | HERE = os.path.dirname(__file__) 100 | intermediate_folder = 'neurospin_data/info/' 101 | parent_dir = os.path.abspath(os.path.join(HERE, os.pardir, 102 | intermediate_folder)) 103 | 104 | task_name = 'self' 105 | 106 | # Outputs 107 | pt_full = ['one', 'four', 'five', 'six', 'seven', 'nine', 'eleven', 'twelve', 108 | 'thirteen', 'fourteen', 'fifteen'] 109 | 110 | HEADER = ['sub' + sub for sub in pt_full] 111 | HEADER.insert(0, 'trials') 112 | HEADER.insert(0, 'run') 113 | 114 | main_dir = '/home/analu/mygit/ibc_ghub/' + \ 115 | 'admin/papers/data_paper2/behavioral_results' 116 | 117 | # # %% 118 | # # =========================== COMPUTE SCORES ================================ 119 | 120 | participants_scores, runs_id, trials = self_scores_extractor(pt_list, 121 | parent_dir) 122 | csv_file = 'success_rate_' + task_name + '.csv' 123 | output_path = os.path.join(main_dir, csv_file) 124 | generate_csv(participants_scores, runs_id, HEADER, output_path, 125 | n_trials = trials) 126 | -------------------------------------------------------------------------------- /papers_scripts/scidata2020/behavioral_data/success_rate_tom.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Compute success rates for the performances of the IBC participants 4 | for the theory-of-mind task 5 | 6 | author: Ana Luisa Pinho 7 | e-mail: ana.pinho@inria.fr 8 | 9 | Compatibility: Python 3.5 10 | 11 | Date: September 2019 12 | """ 13 | 14 | import os 15 | # import sys 16 | import glob 17 | import csv 18 | import numpy as np 19 | 20 | # Add momentarily the parent dir to the path in order to call 'scores' module 21 | # new_path = os.path.abspath(os.pardir) 22 | # if new_path not in sys.path: 23 | # sys.path.append(new_path) 24 | 25 | from behav_utils import calc_score, generate_csv 26 | 27 | 28 | def tom_scores_extractor(participants, dir_path, correct_answers_1, 29 | correct_answers_2): 30 | all_pt_scores = [] 31 | # For each participant... 32 | for participant in participants: 33 | log_path = os.path.abspath(os.path.join(dir_path, 34 | 'sub-' + '%02d' % participant, 35 | 'tom/tom')) 36 | # Load the files 37 | log_files = glob.glob(os.path.join(log_path, '*.mat')) 38 | log_files.sort() 39 | # For every log file, i.e. for every run: 40 | runs = [] 41 | all_scores = [] 42 | for ll, log_fname in enumerate(log_files): 43 | print(log_fname) 44 | data_list = [line for line in csv.reader(open(log_fname), 45 | delimiter=',')] 46 | answers = [] 47 | counter_onset = 0 48 | # Read it line by line... 49 | for dt, data in enumerate(data_list): 50 | # ...and retrieve answers 51 | if data_list[dt - 4 - counter_onset] == ["# name: key"]: 52 | if len(data) == 0: 53 | break 54 | else: 55 | counter_onset = counter_onset + 1 56 | answer = data[0].strip() 57 | answers.append(answer) 58 | # Estimate score for the present run 59 | if participant == 9: 60 | score = round(calc_score(correct_answers_2[ll], answers), 2) 61 | else: 62 | score = round(calc_score(correct_answers_1[ll], answers), 2) 63 | all_scores.append(score) 64 | runs.append(str(ll)) 65 | # Compute mean of scores in all runs for each participant 66 | score_mean = np.rint(np.mean(np.trim_zeros(all_scores))) 67 | all_scores.append(score_mean) 68 | # Append total average per participant 69 | all_scores = ["%d" % s for s in all_scores] 70 | all_pt_scores.append(all_scores) 71 | return all_pt_scores, runs 72 | 73 | 74 | # # %% 75 | # # ========================== GENERAL PARAMETERS ============================= 76 | 77 | # Inputs 78 | pt_list = [1, 4, 5, 6, 7, 9, 11, 12, 13, 14, 15] 79 | 80 | HERE = os.path.dirname(__file__) 81 | intermediate_folder = 'neurospin_data/info/' 82 | parent_dir = os.path.abspath(os.path.join(HERE, os.pardir, 83 | intermediate_folder)) 84 | 85 | all_right_answers = [['False', 'True', 'True', 'True', 'True', 'True', 'False', 86 | 'False', 'True', 'False'], 87 | ['False', 'False', 'False', 'True', 'False', 'True', 88 | 'False', 'True', 'False', 'True']] 89 | 90 | converted_right_answers_vs1 = [] 91 | converted_right_answers_vs2 = [] 92 | for right_answ in all_right_answers: 93 | cright_answ_1 = [] 94 | cright_answ_2 = [] 95 | cright_answ_1 = np.where(np.array(right_answ) == 'True', '89', '71') 96 | cright_answ_2 = np.where(np.array(right_answ) == 'True', '30', '43') 97 | converted_right_answers_vs1.append(cright_answ_1) 98 | converted_right_answers_vs2.append(cright_answ_2) 99 | 100 | task_name = 'tom' 101 | 102 | # Outputs 103 | pt_full = ['one', 'four', 'five', 'six', 'seven', 'nine', 'eleven', 'twelve', 104 | 'thirteen', 'fourteen', 'fifteen'] 105 | 106 | HEADER = ['sub' + sub for sub in pt_full] 107 | HEADER.insert(0, 'run') 108 | 109 | main_dir = '/home/analu/mygit/ibc_ghub/' + \ 110 | 'admin/papers/data_paper2/behavioral_results' 111 | 112 | # # %% 113 | # # =========================== COMPUTE SCORES ================================ 114 | 115 | participants_scores, runs_id = tom_scores_extractor(pt_list, parent_dir, 116 | converted_right_answers_vs1, 117 | converted_right_answers_vs2) 118 | 119 | # Replace '0's by 'n/a' in scores of participants 120 | participants_scores = [np.where(np.array(s) == '0', 'n/a', s) 121 | for s in participants_scores] 122 | 123 | csv_file = 'success_rate_' + task_name + '.csv' 124 | output_path = os.path.join(main_dir, csv_file) 125 | generate_csv(participants_scores, runs_id, HEADER, output_path) 126 | -------------------------------------------------------------------------------- /papers_scripts/scidata2020/neuroimaging_data/bids_postprocessed.json: -------------------------------------------------------------------------------- 1 | {"ArchiEmotional": ["ArchiEmotional"], "HcpLanguage": ["HcpLanguage"], "HcpSocial": ["HcpSocial"], "ArchiSpatial": ["ArchiSpatial"], "HcpMotor": ["HcpMotor"], "theory_of_mind": ["theory_of_mind"], "HcpWm": ["HcpWm"], "RSVPLanguage": ["RSVPLanguage"], "self": ["self"], "Enumeration": ["Enumeration"], "VSTM": ["VSTM"], "ArchiSocial": ["ArchiSocial"], "HcpGambling": ["HcpGambling"], "EmotionalPain": ["EmotionalPain"], "Bang": ["Bang"], "preference": ["PreferenceFood", "PreferencePaintings", "PreferenceFaces", "PreferenceHouses"], "MTTNS": ["MTTNS"], "MTTWE": ["MTTWE"], "HcpEmotion": ["HcpEmotion"], "Hcprelational": ["HcpRelational"], "PainMovie": ["PainMovie"], "ArchiStandard": ["ArchiStandard"], "PreferenceFood": ["PreferenceFood"], "PreferencePaintings": ["PreferencePaintings"], "PreferenceFaces": ["PreferenceFaces"], "PreferenceHouses": ["PreferenceHouses"]} 2 | -------------------------------------------------------------------------------- /papers_scripts/scidata2020/neuroimaging_data/bids_preprocessed.json: -------------------------------------------------------------------------------- 1 | {"ArchiEmotional": ["ArchiEmotional"], "HcpLanguage": ["HcpLanguage"], "HcpSocial": ["HcpSocial"], "ArchiSpatial": ["ArchiSpatial"], "HcpMotor": ["HcpMotor"], "TheoryOfMind": ["TheoryOfMind"], "Hcpwm": ["HcpWm"], "RSVPLanguage": ["RSVPLanguage"], "self": ["Self"], "Enumeration": ["Enumeration"], "VSTM": ["VSTM"], "ArchiSocial": ["ArchiSocial"], "HcpGambling": ["HcpGambling"], "EmotionalPain": ["EmotionalPain"], "Bang": ["Bang"], "preference": ["PreferenceFood", "PreferencePaintings", "PreferenceFaces", "PreferenceHouses"], "MTTNS": ["MTTNS"], "MTTWE": ["MTTWE"], "HcpEmotion": ["HcpEmotion"], "HcpRelational": ["HcpRelational"], "PainMovie": ["PainMovie"], "ArchiStandard": ["ArchiStandard"]} 2 | -------------------------------------------------------------------------------- /papers_scripts/scidata2023/ibc_fastsrm_utils.py: -------------------------------------------------------------------------------- 1 | """ 2 | Utilities to run FastSRM in the IBC dataset 3 | 4 | Author: Ana Luisa Pinho 5 | 6 | Created: October 2020 7 | Last revision: May 2021 8 | 9 | Compatibility: Python 3.9.1 10 | """ 11 | 12 | import os 13 | import glob 14 | import re 15 | import numpy as np 16 | 17 | from ibc_public.utils_data import DERIVATIVES 18 | 19 | 20 | def flatten(li): 21 | return sum(([x] if not isinstance(x, list) else flatten(x) 22 | for x in li), []) 23 | 24 | 25 | def reshape_preprocdata(participants_list, tasks, preprocdata, 26 | input_type='vol'): 27 | """ 28 | Return list of lists of lists: (n_subjects, n_tasks, n_runs) 29 | """ 30 | files = [] 31 | for p in participants_list: 32 | pt_files = [] 33 | for t in tasks: 34 | task_files = [] 35 | if input_type == 'vol': 36 | fname = 'wrdcsub-%02d_ses-*_task-%s' % (p, t) + \ 37 | '_dir-*_run-*_bold_masked.npy' 38 | fpath = os.path.join(preprocdata, fname) 39 | match_expression = '.*run-(..)_bold_masked.npy' 40 | else: 41 | assert input_type == 'surf' 42 | if preprocdata == DERIVATIVES: 43 | fname = 'rdcsub-%02d_ses-*_task-%s' % (p, t) + \ 44 | '_dir-*_run-*_bold_fsaverage_*' 45 | fpath = os.path.join(preprocdata, 'sub-*/ses-*/freesurfer', 46 | fname) 47 | else: 48 | fname = 'rdcsub-%02d_ses-*_task-%s' % (p, t) + \ 49 | '_dir-*_run-*_bold_fsaverage*' 50 | fpath = os.path.join(preprocdata, fname) 51 | match_expression = '.*run-(..)_bold_fsaverage*' 52 | # List paths 53 | task_files = glob.glob(fpath) 54 | # Sort in ascending order by run number 55 | run_numbers = [int(re.match(match_expression, 56 | task_file).groups()[0]) 57 | for task_file in task_files] 58 | indices_order = flatten([ 59 | [r for r, run_number in enumerate(run_numbers, 1) 60 | if run_number == j] 61 | for j in np.arange(1, len(run_numbers) + 1)]) 62 | task_files_sorted = [task_files[k-1] for k in indices_order] 63 | # Append list of runs for one task 64 | pt_files.append(task_files_sorted) 65 | files.append(pt_files) 66 | return files 67 | 68 | 69 | def stacker(subjects_set, tasks_set, data_paths): 70 | """ 71 | Inputs a list of lists of lists w/ shape (n_subjects, n_tasks, n_runs) 72 | Outputs numpy array of paths w/ shape (n_subjects, n_runs) 73 | Note: runs follow order of tasks and run number within tasks 74 | """ 75 | output_array = [] 76 | for ss, subject_set in enumerate(subjects_set): 77 | task_array = [] 78 | for task_set in tasks_set: 79 | task_array.extend(data_paths[subject_set][task_set]) 80 | if ss < len(subjects_set) - 1: 81 | output_array.append(task_array) 82 | elif ss == len(subjects_set) - 1 and ss != 0: 83 | output_array = np.vstack((output_array, task_array)) 84 | else: 85 | assert len(subjects_set) - 1 == 0 86 | output_array = np.array(task_array) 87 | 88 | return output_array 89 | -------------------------------------------------------------------------------- /papers_scripts/scidata2023/script_retinotopic_maps.py: -------------------------------------------------------------------------------- 1 | """ 2 | This script gathers individual retinotopic maps into one single image 3 | """ 4 | import glob 5 | import os 6 | from os.path import join as pjoin 7 | from ibc_public.utils_data import DERIVATIVES 8 | import matplotlib.pyplot as plt 9 | import numpy as np 10 | 11 | wedge_maps = sorted(glob.glob(pjoin( 12 | DERIVATIVES, '*', '*', 'res_fsaverage7_retinotopy_ffx', 'stat_maps', 13 | 'phase_wedge.png'))) 14 | ring_maps = sorted(glob.glob(pjoin( 15 | DERIVATIVES, '*', '*', 'res_fsaverage7_retinotopy_ffx', 'stat_maps', 16 | 'phase_ring.png'))) 17 | 18 | n_subjects = len(wedge_maps) 19 | 20 | """ 21 | x, y = np.linspace(-1, 1, 101)[np.newaxis], np.linspace(-1, 1, 101)[np.newaxis].T 22 | r, t = np.sqrt(x ** 2 + y ** 2), np.arctan2(x, y) 23 | r, t = r[r < 1], t[r < 1] 24 | plt.figure(figsize=(1, 1), facecolor='w', edgecolor='w') 25 | plt.plot(r * np.cos(t), r * np.sin(t), r, '.') 26 | """ 27 | 28 | import matplotlib.tri as tri 29 | import math 30 | n_angles = 100 31 | n_radii = 20 32 | min_radius = 0.0 33 | radii = np.linspace(min_radius, 1., n_radii) 34 | 35 | angles = np.linspace(0, 2 * math.pi, n_angles, endpoint=False) 36 | angles = np.repeat(angles[..., np.newaxis], n_radii, axis=1) 37 | angles[:, 1::2] += math.pi / n_angles 38 | 39 | x = (radii * np.sin(angles)).flatten() 40 | y = (-radii * np.cos(angles)).flatten() 41 | z1 = ((radii > 0) * angles).flatten() 42 | z2 = (radii * (angles > -1)).flatten() 43 | 44 | # Create the Triangulation; no triangles so Delaunay triangulation created. 45 | triang = tri.Triangulation(x, y) 46 | 47 | # Mask off unwanted triangles. 48 | xmid = x[triang.triangles].mean(axis=1) 49 | ymid = y[triang.triangles].mean(axis=1) 50 | mask = np.where(xmid * xmid + ymid * ymid < min_radius * min_radius, 1, 0) 51 | triang.set_mask(mask) 52 | 53 | # Illustrate Gouraud shading. 54 | wedge = plt.figure(figsize=(1, 1)) 55 | plt.gca().set_aspect('equal') 56 | plt.tripcolor(triang, z1, shading='gouraud', cmap='RdBu_r') 57 | plt.axis('off') 58 | plt.savefig('/tmp/wedge.png', dpi=300) 59 | 60 | ring = plt.figure(figsize=(1, 1)) 61 | plt.gca().set_aspect('equal') 62 | plt.tripcolor(triang, z2, shading='gouraud', cmap='RdBu_r') 63 | plt.axis('off') 64 | plt.savefig('/tmp/ring.png', dpi=300) 65 | 66 | plt.figure(figsize=(8, 6), facecolor='w', edgecolor='w') 67 | 68 | delta = 2./ n_subjects 69 | for i, img in enumerate(wedge_maps): 70 | ax = plt.axes([np.mod(i, 2) * .25, (i // 2) * delta, .25, delta]) 71 | img = plt.imread(img) 72 | ax.imshow(img) 73 | plt.axis('off') 74 | 75 | for i, img in enumerate(ring_maps): 76 | ax = plt.axes([0.5 + np.mod(i, 2) * .25, (i // 2) * delta, .25, delta]) 77 | img = plt.imread(img) 78 | ax.imshow(img) 79 | plt.axis('off') 80 | 81 | ax = plt.axes([.2, .45, .1, .1]) 82 | ax.imshow(plt.imread('/tmp/wedge.png')) 83 | plt.axis('off') 84 | ax = plt.axes([.7, .45, .1, .1]) 85 | ax.imshow(plt.imread('/tmp/ring.png')) 86 | plt.axis('off') 87 | plt.plot([.5, .5], [-1, 2], linewidth=2, color='k') 88 | plt.axis('off') 89 | 90 | 91 | write_dir = '/neurospin/tmp/bthirion' 92 | plt.savefig(os.path.join(write_dir, 'retino_montage.pdf'), 93 | facecolor='w', dpi=300) 94 | 95 | plt.show(block=False) 96 | -------------------------------------------------------------------------------- /papers_scripts/scidata2023/surfimg_visualization.py: -------------------------------------------------------------------------------- 1 | """ 2 | Interactive display of surface FastSRM results and Glasser atlas in the browser 3 | 4 | Authors: Ana Luisa Pinho 5 | 6 | Created: May 2021 7 | 8 | Compatibility: Python 3.7 9 | """ 10 | 11 | import os 12 | 13 | import numpy as np 14 | 15 | from nilearn.surface import load_surf_data 16 | from nilearn.datasets import fetch_surf_fsaverage 17 | from nilearn.plotting import plot_surf_stat_map, view_surf 18 | 19 | 20 | # Some paths 21 | this_dir = os.path.dirname(os.path.abspath(__file__)) 22 | atlas_path = os.path.join(this_dir, 'data_paper3_results', 'glasser_atlas') 23 | results_path = os.path.join( 24 | this_dir, 'data_paper3_results', 'second_level_surface_encoding') 25 | 26 | # Loads the high-resolution fsaverage mesh (163842 nodes) 27 | fsaverage = fetch_surf_fsaverage(mesh='fsaverage') 28 | 29 | # Load data 30 | surf_lh = load_surf_data(os.path.join(atlas_path, 'lh.HCPMMP1.annot')) 31 | surf_rh = load_surf_data(os.path.join(atlas_path, 'rh.HCPMMP1.annot')) 32 | zvals_lh_array = np.load(os.path.join(results_path, 'zvals_lh.npy')) 33 | zvals_rh_array = np.load(os.path.join(results_path, 'zvals_rh.npy')) 34 | zvals_lh = load_surf_data(zvals_lh_array) 35 | zvals_rh = load_surf_data(zvals_rh_array) 36 | 37 | # Plot atlas 38 | output_file_glh = os.path.join(atlas_path, 'fig_lh.png') 39 | output_file_grh = os.path.join(atlas_path, 'fig_rh.png') 40 | plot_surf_stat_map(fsaverage.infl_left, surf_lh, hemi='left', view='lateral', 41 | colorbar=False, bg_map=fsaverage.sulc_left, 42 | output_file='fig_lh.png') 43 | 44 | plot_surf_stat_map(fsaverage.infl_right, surf_rh, hemi='right', view='lateral', 45 | colorbar=False, bg_map=fsaverage.sulc_left, 46 | output_file='fig_rh.png') 47 | 48 | # Interactive display of atlas and results (w/o threshold) in the browser 49 | glasser_lh = view_surf(fsaverage.infl_left, surf_lh) 50 | glasser_rh = view_surf(fsaverage.infl_right, surf_rh) 51 | 52 | map_lh = view_surf(fsaverage.infl_left, zvals_lh) 53 | map_rh = view_surf(fsaverage.infl_right, zvals_rh) 54 | 55 | glasser_lh.open_in_browser() 56 | glasser_rh.open_in_browser() 57 | map_lh.open_in_browser() 58 | map_rh.open_in_browser() 59 | -------------------------------------------------------------------------------- /papers_scripts/scidata2023/volimg_visualization.py: -------------------------------------------------------------------------------- 1 | """ 2 | Interactive display of volume FastSRM results in the browser 3 | 4 | Authors: Ana Luisa Pinho 5 | 6 | Created: April 2021 7 | Last update: May 2021 8 | 9 | Compatibility: Python 3.7 10 | """ 11 | 12 | import os 13 | import numpy as np 14 | 15 | import ibc_public.utils_data 16 | 17 | from nilearn.input_data import NiftiMasker 18 | from nilearn import plotting 19 | 20 | 21 | # Mask of the grey matter of the IBC participants 22 | _package_directory = os.path.dirname( 23 | os.path.abspath(ibc_public.utils_data.__file__)) 24 | mask_gm = os.path.join(_package_directory, 25 | '../ibc_data', 'gm_mask_1_5mm.nii.gz') 26 | 27 | # Masker 28 | masker = NiftiMasker(mask_img=mask_gm).fit() 29 | 30 | # Data paths 31 | this_dir = os.path.dirname(os.path.abspath(__file__)) 32 | clips_path = os.path.join(this_dir, 'data_paper3_results', 'volume_encoding', 33 | 'pearson_correlations_clips.npy') 34 | raiders_path = os.path.join(this_dir, 'data_paper3_results', 'volume_encoding', 35 | 'pearson_correlations_raiders.npy') 36 | 37 | # Load the 2D-array map 38 | clips_array = np.load(clips_path) 39 | raiders_array = np.load(raiders_path) 40 | 41 | # Generate niimg files 42 | clips_map = masker.inverse_transform(clips_array) 43 | raiders_map = masker.inverse_transform(raiders_array) 44 | 45 | # Save NIfTI files 46 | clips_outpath = os.path.join(this_dir, 'data_paper3_results', 47 | 'volume_encoding', 48 | 'pearson_correlations_clips_map.nii.gz') 49 | raiders_outpath = os.path.join(this_dir, 'data_paper3_results', 50 | 'volume_encoding', 51 | 'pearson_correlations_raiders_map.nii.gz') 52 | clips_map.to_filename(clips_outpath) 53 | raiders_map.to_filename(raiders_outpath) 54 | 55 | # Create visualization object 56 | clips_view = plotting.view_img(clips_map) 57 | raiders_view = plotting.view_img(raiders_map) 58 | 59 | # Open the interactive panel in the browser 60 | clips_view.open_in_browser() 61 | raiders_view.open_in_browser() 62 | -------------------------------------------------------------------------------- /scripts/b02b0.cnf: -------------------------------------------------------------------------------- 1 | # Resolution (knot-spacing) of warps in mm 2 | --warpres=20,16,14,12,10,6,4,4,4 3 | # Subsampling level (a value of 2 indicates that a 2x2x2 neighbourhood is collapsed to 1 voxel) 4 | --subsamp=2,2,2,2,2,1,1,1,1 5 | # FWHM of gaussian smoothing 6 | --fwhm=8,6,4,3,3,2,1,0,0 7 | # Maximum number of iterations 8 | --miter=5,5,5,5,5,10,10,20,20 9 | # Relative weight of regularisation 10 | --lambda=0.005,0.001,0.0001,0.000015,0.000005,0.0000005,0.00000005,0.0000000005,0.00000000001 11 | # If set to 1 lambda is multiplied by the current average squared difference 12 | --ssqlambda=1 13 | # Regularisation model 14 | --regmod=bending_energy 15 | # If set to 1 movements are estimated along with the field 16 | --estmov=1,1,1,1,1,0,0,0,0 17 | # 0=Levenberg-Marquardt, 1=Scaled Conjugate Gradient 18 | --minmet=0,0,0,0,0,1,1,1,1 19 | # Quadratic or cubic splines 20 | --splineorder=3 21 | # Precision for calculation and storage of Hessian 22 | --numprec=double 23 | # Linear or spline interpolation 24 | --interp=spline 25 | # If set to 1 the images are individually scaled to a common mean intensity 26 | --scale=1 -------------------------------------------------------------------------------- /scripts/b0_acquisition_params_AP.txt: -------------------------------------------------------------------------------- 1 | 0.000000 -1.000000 0.000000 0.086000 2 | 0.000000 1.000000 0.000000 0.086000 -------------------------------------------------------------------------------- /scripts/cluster_bundles.py: -------------------------------------------------------------------------------- 1 | """ 2 | This algorithm 3 | * removes singleton fibers 4 | * outputs a colormap for remaining fibers 5 | """ 6 | import os 7 | import matplotlib.pyplot as plt 8 | import numpy as np 9 | from dipy.io.streamline import load_tck, save_tck, load_trk 10 | from dipy.segment.clustering import QuickBundles 11 | 12 | workdir = '/neurospin/ibc/derivatives/sub-04/ses-08/dwi' 13 | #f = os.path.join(workdir, 14 | # 'reduced-tracks-100k_sub-04_ses-08.tck') 15 | f = os.path.join(workdir, 16 | 'tracks_sub-04_ses-08_t1.tck') 17 | 18 | ref = os.path.join(workdir, 19 | 'sub-04_ses-08_desc-denoise-eddy-correct_dwi.nii.gz') 20 | tract = load_tck(f, ref) 21 | 22 | """ 23 | qb = QuickBundles(threshold=20.) 24 | 25 | # want to get symmetric clusters 26 | streamlines = tract.streamlines.copy() 27 | for streamline in streamlines: 28 | streamline[:, 0] = np.abs(streamline[:, 0]) 29 | 30 | clusters = qb.cluster(streamlines) 31 | sizes = clusters.clusters_sizes() 32 | print(sizes) 33 | size_threshold = 100 34 | labels = np.zeros(len(tract.streamlines), dtype=int) 35 | 36 | q = 1 37 | for i, cluster in enumerate(clusters): 38 | if sizes[i] > size_threshold: 39 | labels[cluster.indices] = q 40 | q += 1 41 | 42 | # proportion = .1 43 | # labels *= (np.random.rand(len(labels)) < proportion) 44 | tract.streamlines = tract.streamlines[labels > 0] 45 | labels = labels[labels > 0] 46 | labels -= 1 47 | unique_labels = np.unique(labels) 48 | np.random.seed(1) 49 | np.random.shuffle(unique_labels) 50 | labels = unique_labels[labels] 51 | n_valid_labels = len(unique_labels) 52 | np.savetxt(os.path.join(workdir, 'palette.txt'), labels) 53 | 54 | print(save_tck( 55 | tract, 56 | os.path.join(workdir, 'cleaned-tracks-100k_sub-04_ses-08.tck'), 57 | bbox_valid_check=True)) 58 | """ 59 | 60 | 61 | from dipy.align.streamlinear import whole_brain_slr 62 | from dipy.segment.bundles import RecoBundles 63 | from dipy.data import fetch_bundle_atlas_hcp842, get_bundle_atlas_hcp842 64 | from dipy.io.utils import create_tractogram_header 65 | 66 | atlas_file, atlas_folder = fetch_bundle_atlas_hcp842() 67 | atlas_file, all_bundles_files = get_bundle_atlas_hcp842() 68 | sft_atlas = load_trk(atlas_file, "same", bbox_valid_check=False) 69 | atlas = sft_atlas.streamlines 70 | atlas_header = create_tractogram_header(atlas_file, 71 | *sft_atlas.space_attributes) 72 | 73 | moved, transform, qb_centroids1, qb_centroids2 = whole_brain_slr( 74 | atlas, load_tck(f, ref).streamlines, 75 | x0='affine', verbose=True, progressive=True, 76 | rng=np.random.RandomState(1984)) 77 | 78 | rb = RecoBundles(moved, verbose=True, rng=np.random.RandomState(2001)) 79 | 80 | import glob 81 | bundle_files = sorted(glob.glob(all_bundles_files)) 82 | 83 | clusters = [] 84 | for bf in bundle_files: 85 | model = load_trk(bf, "same", bbox_valid_check=False).streamlines 86 | recognized, label = rb.recognize(model_bundle=model, 87 | model_clust_thr=0.05, 88 | reduction_thr=10, 89 | pruning_thr=5, 90 | reduction_distance='mdf', 91 | pruning_distance='mdf', 92 | slr=True) 93 | clusters.append(label) 94 | 95 | n_fibers= len(moved) 96 | labels = np.zeros(n_fibers, dtype=int) 97 | for i, cluster in enumerate(clusters): 98 | labels[cluster] = i + 1 99 | 100 | tract.streamlines = tract.streamlines[labels > 0] 101 | labels_ = labels[labels > 0] 102 | labels_ -= 1 103 | 104 | unique_bundles = np.arange(len(bundle_files)) 105 | np.random.seed(1) 106 | np.random.shuffle(unique_bundles) 107 | labels_ = unique_bundles[labels_] 108 | np.savetxt(os.path.join(workdir, 'palette.txt'), labels_) 109 | 110 | print(save_tck( 111 | tract, 112 | os.path.join(workdir, 'bundle-tracks-all_sub-04_ses-08.tck'), 113 | bbox_valid_check=True)) 114 | -------------------------------------------------------------------------------- /scripts/connectivity/estimate_fc_calculate_similarity.py: -------------------------------------------------------------------------------- 1 | """This script estimates functional connectivity (if needed) and then 2 | calculates the similarity between functional connectivity 3 | matrices from different tasks and structural connectivity""" 4 | 5 | import os 6 | import time 7 | import pandas as pd 8 | from nilearn import datasets 9 | from joblib import Parallel, delayed 10 | from ibc_public.connectivity.utils_similarity import ( 11 | mean_connectivity, 12 | get_similarity, 13 | ) 14 | from ibc_public.connectivity.utils_fc_estimation import ( 15 | get_connectomes, 16 | get_time_series, 17 | ) 18 | 19 | cache = DATA_ROOT = "/storage/store2/work/haggarwa/" 20 | output_dir = f"fc_similarity_{time.strftime('%Y%m%d-%H%M%S')}" 21 | output_dir = os.path.join(DATA_ROOT, output_dir) 22 | os.makedirs(output_dir, exist_ok=True) 23 | calculate_connectivity = False 24 | n_parcels = 400 25 | if n_parcels == 400: 26 | fc_data_path = os.path.join(cache, "connectomes_400_comprcorr") 27 | sc_data_path = os.path.join(cache, "sc_data_native_new") 28 | elif n_parcels == 200: 29 | fc_data_path = os.path.join(cache, "connectomes_200_comprcorr") 30 | sc_data_path = os.path.join(cache, "sc_data_native_200") 31 | # number of jobs to run in parallel 32 | n_jobs = 50 33 | # tasks 34 | tasks = [ 35 | "RestingState", 36 | "Raiders", 37 | "GoodBadUgly", 38 | "MonkeyKingdom", 39 | "Mario", 40 | ] 41 | # cov estimators 42 | cov_estimators = ["Graphical-Lasso", "Ledoit-Wolf", "Unregularized"] 43 | # connectivity measures for each cov estimator 44 | measures = ["correlation", "partial correlation"] 45 | 46 | task_pairs = [ 47 | ("RestingState", "Raiders"), 48 | ("RestingState", "GoodBadUgly"), 49 | ("RestingState", "MonkeyKingdom"), 50 | ("RestingState", "Mario"), 51 | ("Raiders", "GoodBadUgly"), 52 | ("Raiders", "MonkeyKingdom"), 53 | ("GoodBadUgly", "MonkeyKingdom"), 54 | ("Raiders", "Mario"), 55 | ("GoodBadUgly", "Mario"), 56 | ("MonkeyKingdom", "Mario"), 57 | ("RestingState", "SC"), 58 | ("Raiders", "SC"), 59 | ("GoodBadUgly", "SC"), 60 | ("MonkeyKingdom", "SC"), 61 | ("Mario", "SC"), 62 | ] 63 | 64 | 65 | def all_combinations(task_pairs, cov_estimators, measures): 66 | """generator to yield all combinations of task pairs, cov estimators, to 67 | parallelize the similarity calculation for each combination""" 68 | for task_pair in task_pairs: 69 | for cov in cov_estimators: 70 | for measure in measures: 71 | yield task_pair, cov, measure 72 | 73 | 74 | if calculate_connectivity: 75 | # get the atlas 76 | atlas = datasets.fetch_atlas_schaefer_2018( 77 | data_dir=cache, resolution_mm=2, n_rois=n_parcels 78 | ) 79 | # use the atlas to extract time series for each task in parallel 80 | # get_time_series returns a dataframe with 81 | # the time series for each task, consisting of runs x subjects 82 | print("Time series extraction...") 83 | data = Parallel(n_jobs=n_jobs, verbose=0)( 84 | delayed(get_time_series)(task, atlas, cache) for task in tasks 85 | ) 86 | # concatenate all the dataframes so we have a single dataframe 87 | # with the time series from all tasks 88 | data = pd.concat(data) 89 | # estimate the connectivity matrices for each cov estimator in parallel 90 | # get_connectomes returns a dataframe with two columns each corresponding 91 | # to the partial correlation and correlation connectome from each cov 92 | # estimator 93 | print("Connectivity estimation...") 94 | data = Parallel(n_jobs=n_jobs, verbose=0)( 95 | delayed(get_connectomes)(cov, data, n_jobs) for cov in cov_estimators 96 | ) 97 | # concatenate the dataframes so we have a single dataframe 98 | # with the connectomes from all cov estimators 99 | cols_to_use = data[0].columns.difference(data[1].columns) 100 | data = pd.concat([data[1], data[0][cols_to_use]], axis=1) 101 | data.reset_index(inplace=True, drop=True) 102 | else: 103 | data = pd.read_pickle(fc_data_path) 104 | sc_data = pd.read_pickle(sc_data_path) 105 | all_connectivity = mean_connectivity(data, tasks, cov_estimators, measures) 106 | all_connectivity = pd.concat([all_connectivity, sc_data], axis=0) 107 | 108 | results = Parallel(n_jobs=n_jobs, verbose=2, backend="loky")( 109 | delayed(get_similarity)(all_connectivity, task_pair, cov, measure) 110 | for task_pair, cov, measure in all_combinations( 111 | task_pairs, cov_estimators, measures 112 | ) 113 | ) 114 | 115 | results = [item for sublist in results for item in sublist] 116 | results = pd.DataFrame(results) 117 | results.to_pickle(os.path.join(output_dir, "results.pkl")) 118 | -------------------------------------------------------------------------------- /scripts/connectivity/estimate_sc_README.md: -------------------------------------------------------------------------------- 1 | # DWI preprocessing 2 | Preproc is done using MRtrix's `dwidenoise` and FSL's `topup` and `eddy` in script `dmri_preprocessing_tractography.py` 3 | 4 | ## Steps 5 | 0. 4 runs of dwimages concatenated, similarly for bvecs and bvals 6 | 1. Denoising: used `dwidenoise`, only denoising dwimages 7 | 2. FSL Topup: 8 | * collate B0 volumes, these are specific volumes in the dwimages indexed 0, 60, 122, 183 9 | * make an acquisition parameter matrix reqd by the FSL topup func, second column in the matrix is the phase encoding direction, should be 1 for ap and -1 for pa [FSL topup doc](https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/topup/TopupUsersGuide#A--datain) 10 | * calculate distortion from the collated b0 images using FSL's `topup` and a set of parameters given in `b02b0.cnf` file 11 | 3. Masking (to be used with eddy correction), created using fslmaths 12 | 4. FSL Eddy correction using `eddy_cuda9.1`: the input index file has 1s and 3s in it which is correct **just update the comment in the script saying it is 1s and 2s** 13 | 5. No bias field correction done 14 | 15 | # DWI tractography 16 | Tractography is done using MRtrix in script `dmri_preprocessing_tractography.py` 17 | 18 | ## Steps 19 | 0. The output of eddy correction from above is converted to .mif format that is compatible with MRtrix 20 | 1. Generate response functions: using `dwi2response` on eddy corrected images 21 | 2. Calculate fiber orientation densities (estimates of amt of diffusion in 3 orthogonal directions): using `dwi2fod` with multi-shell multi-tissue constrained spherical deconvolution 22 | 3. Get grey-matter white-matter boundary: using `5tt2gmwmi` 23 | 4. Generate streamlines: using `tckgen` 24 | 25 | # DWI connectome 26 | 27 | ## Steps 28 | 1. The streamlines obtained from tractography were first warped into MNI152 space using ANTs' image registration `antsRegistration` and MRtrix's `tcktransform` in script `estimate_sc.py`. 29 | 2. In addition, the script `estimate_sc.py` also transforms the given atlas to the native individual space. This way we can calculate two kinds of structural connectivity matrices: one in the MNI space and the other in the native individual space. 30 | 3. Finally, the two connectomes are calculated using MRtrix's `tck2connectome` function in the same script `estimate_sc.py`. 31 | -------------------------------------------------------------------------------- /scripts/connectivity/plotting/plot_all_accuracy_table.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pandas as pd 3 | import seaborn as sns 4 | 5 | sns.set_theme() 6 | sns.set_style("whitegrid") 7 | sns.set_context("talk") 8 | 9 | 10 | ### table of all accuracies ### 11 | cache = DATA_ROOT = "/storage/store/work/haggarwa/" 12 | DATA_ROOT2 = "/storage/store2/work/haggarwa/" 13 | n_parcels = 200 14 | 15 | if n_parcels == 400: 16 | acrosstask_results_dir = "fc_acrosstask_classification_400_20240118-143947" 17 | withintask_results_dir = "fc_withintask_classification_400_20240120-154926" 18 | output_dir = os.path.join(DATA_ROOT2, "fc_accuracy_tables_compcorr") 19 | elif n_parcels == 200: 20 | acrosstask_results_dir = "fc_acrosstask_classification_200_20240117-185001" 21 | withintask_results_dir = "fc_withintask_classification_200_20240118-143124" 22 | output_dir = os.path.join(DATA_ROOT2, "fc_accuracy_tables_200_compcorr") 23 | os.makedirs(output_dir, exist_ok=True) 24 | acrosstask_results_pkl = os.path.join( 25 | DATA_ROOT2, acrosstask_results_dir, "all_results.pkl" 26 | ) 27 | withintask_results_pkl = os.path.join( 28 | DATA_ROOT2, withintask_results_dir, "all_results.pkl" 29 | ) 30 | 31 | acrosstask_df = pd.read_pickle(acrosstask_results_pkl).reset_index(drop=True) 32 | withintask_df = pd.read_pickle(withintask_results_pkl).reset_index(drop=True) 33 | 34 | # cov estimators 35 | cov_estimators = ["Graphical-Lasso", "Ledoit-Wolf", "Unregularized"] 36 | # connectivity measures for each cov estimator 37 | measures = ["correlation", "partial correlation"] 38 | # what to classify 39 | classify = ["Tasks", "Subjects", "Runs"] 40 | # tasks 41 | tasks = ["RestingState", "Raiders", "GoodBadUgly", "MonkeyKingdom", "Mario"] 42 | 43 | # get accuracies for each classification scenario 44 | for clas in classify: 45 | print(clas) 46 | classifying_df = pd.concat( 47 | [ 48 | acrosstask_df[acrosstask_df["classes"] == clas], 49 | withintask_df[withintask_df["classes"] == clas], 50 | ] 51 | ) 52 | classifying_df.reset_index(inplace=True, drop=True) 53 | for metric in [ 54 | "balanced_accuracy", 55 | "dummy_balanced_accuracy", 56 | "LinearSVC_auc", 57 | "Dummy_auc", 58 | ]: 59 | mean_acc = ( 60 | classifying_df.groupby(["task_label", "connectivity"])[metric] 61 | .mean() 62 | .round(2) 63 | ) 64 | mean_acc = mean_acc.unstack(level=1) 65 | mean_acc["mean"] = mean_acc.mean(axis=1).round(2) 66 | mean_acc = mean_acc[ 67 | [ 68 | "Unregularized correlation", 69 | "Unregularized partial correlation", 70 | "Ledoit-Wolf correlation", 71 | "Ledoit-Wolf partial correlation", 72 | "Graphical-Lasso correlation", 73 | "Graphical-Lasso partial correlation", 74 | "mean", 75 | ] 76 | ] 77 | mean_acc.to_csv( 78 | os.path.join(DATA_ROOT2, output_dir, f"{clas}_mean_{metric}.csv") 79 | ) 80 | -------------------------------------------------------------------------------- /scripts/connectivity/plotting/plot_classifier_coefficients.py: -------------------------------------------------------------------------------- 1 | """This script fits classifiers to full data and 2 | plots the classifier coefficients""" 3 | 4 | import os 5 | import pandas as pd 6 | import seaborn as sns 7 | from nilearn import datasets 8 | from joblib import Parallel, delayed 9 | from sklearn import preprocessing 10 | import numpy as np 11 | from scipy.stats import mannwhitneyu 12 | from ibc_public.connectivity.utils_plot import ( 13 | fit_classifier, 14 | get_clas_cov_measure, 15 | plot_full_weight_matrix, 16 | plot_network_weight_matrix, 17 | get_network_labels, 18 | _average_over_networks, 19 | ) 20 | 21 | sns.set_theme() 22 | sns.set_style("whitegrid") 23 | sns.set_context("talk") 24 | 25 | ### fit classifiers to get weights 26 | DATA_ROOT = cache = "/storage/store2/work/haggarwa/" 27 | n_parcels = 400 28 | if n_parcels == 400: 29 | func_data_path = os.path.join(cache, "connectomes_400_comprcorr") 30 | output_dir = os.path.join(DATA_ROOT, "weights_compcorr") 31 | elif n_parcels == 200: 32 | func_data_path = os.path.join(cache, "connectomes_200_comprcorr") 33 | output_dir = os.path.join(DATA_ROOT, "weights_200_compcorr") 34 | os.makedirs(output_dir, exist_ok=True) 35 | func_data = pd.read_pickle(func_data_path) 36 | cov_estimators = ["Graphical-Lasso", "Ledoit-Wolf", "Unregularized"] 37 | measures = ["correlation", "partial correlation"] 38 | classify = ["Tasks", "Subjects", "Runs"] 39 | x = Parallel(n_jobs=20, verbose=11)( 40 | delayed(fit_classifier)( 41 | clas, cov, measure, func_data, output_dir=output_dir 42 | ) 43 | for clas, cov, measure in get_clas_cov_measure( 44 | classify, cov_estimators, measures 45 | ) 46 | ) 47 | 48 | ### network pair SVC weight matrices 49 | DATA_ROOT = cache = "/storage/store2/work/haggarwa/" 50 | n_parcels = 400 51 | if n_parcels == 400: 52 | weight_dir = os.path.join(DATA_ROOT, "weights_compcorr") 53 | output_dir = os.path.join(DATA_ROOT, "weight_plots_compcorr") 54 | elif n_parcels == 200: 55 | weight_dir = os.path.join(DATA_ROOT, "weights_200_compcorr") 56 | output_dir = os.path.join(DATA_ROOT, "weight_plots_200_compcorr") 57 | os.makedirs(output_dir, exist_ok=True) 58 | # get atlas for yeo network labels 59 | atlas = datasets.fetch_atlas_schaefer_2018( 60 | data_dir=cache, resolution_mm=2, n_rois=n_parcels 61 | ) 62 | # cov estimators 63 | cov_estimators = ["Graphical-Lasso", "Ledoit-Wolf", "Unregularized"] 64 | # connectivity measures for each cov estimator 65 | measures = ["correlation", "partial correlation"] 66 | # what to classify 67 | classify = ["Tasks", "Subjects", "Runs"] 68 | 69 | x = Parallel(n_jobs=20, verbose=11)( 70 | delayed(plot_full_weight_matrix)( 71 | clas, 72 | cov, 73 | measure, 74 | atlas, 75 | transform="l2", 76 | output_dir=output_dir, 77 | fontsize=15, 78 | weight_dir=weight_dir, 79 | n_parcels=n_parcels, 80 | ) 81 | for clas, cov, measure in get_clas_cov_measure( 82 | classify, cov_estimators, measures 83 | ) 84 | ) 85 | x = Parallel(n_jobs=20, verbose=11)( 86 | delayed(plot_network_weight_matrix)( 87 | clas, 88 | cov, 89 | measure, 90 | atlas, 91 | labels_fmt="network", 92 | transform="l2", 93 | output_dir=output_dir, 94 | fontsize=15, 95 | weight_dir=weight_dir, 96 | n_parcels=n_parcels, 97 | ) 98 | for clas, cov, measure in get_clas_cov_measure( 99 | classify, cov_estimators, measures 100 | ) 101 | ) 102 | 103 | ### compare within network and between network weights 104 | DATA_ROOT = cache = "/storage/store2/work/haggarwa/" 105 | n_parcels = 400 106 | if n_parcels == 400: 107 | weight_dir = os.path.join(DATA_ROOT, "weights_compcorr") 108 | elif n_parcels == 200: 109 | weight_dir = os.path.join(DATA_ROOT, "weights_200_compcorr") 110 | # get atlas for yeo network labels 111 | atlas = datasets.fetch_atlas_schaefer_2018( 112 | data_dir=cache, resolution_mm=2, n_rois=n_parcels 113 | ) 114 | # cov estimators 115 | cov_estimators = ["Graphical-Lasso", "Ledoit-Wolf", "Unregularized"] 116 | # connectivity measures for each cov estimator 117 | measures = ["correlation", "partial correlation"] 118 | # what to classify 119 | classify = ["Tasks", "Subjects", "Runs"] 120 | 121 | labels = get_network_labels(atlas)[1] 122 | 123 | 124 | le = preprocessing.LabelEncoder() 125 | encoded_labels = le.fit_transform(labels) 126 | transform = "l2" 127 | for clas, cov, measure in get_clas_cov_measure( 128 | classify, cov_estimators, measures 129 | ): 130 | unique_labels = np.unique(encoded_labels) 131 | network_weights = _average_over_networks( 132 | encoded_labels, 133 | unique_labels, 134 | clas, 135 | cov, 136 | measure, 137 | transform, 138 | weight_dir, 139 | n_parcels, 140 | ) 141 | 142 | within_network_weights = np.diag(network_weights) 143 | # keep only lower triangle 144 | between_network_weights = network_weights[ 145 | np.tril_indices_from(network_weights, k=-1) 146 | ] 147 | _, within_greater = mannwhitneyu( 148 | within_network_weights, between_network_weights, alternative="greater" 149 | ) 150 | _, between_greater = mannwhitneyu( 151 | between_network_weights, within_network_weights, alternative="greater" 152 | ) 153 | _, two_sided = mannwhitneyu( 154 | within_network_weights, between_network_weights 155 | ) 156 | 157 | print( 158 | "*** Network weights ***\n", 159 | f"Classifying {clas} with {cov} {measure}:\n Within network > ", 160 | f"between network: {within_greater:.2e}\n Between network > ", 161 | f"within network: {between_greater:.2e}\n", 162 | f"Two-sided: {two_sided:.2e}\n", 163 | ) 164 | -------------------------------------------------------------------------------- /scripts/connectivity/plotting/plot_generalize_connectomes.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pandas as pd 3 | import seaborn as sns 4 | import numpy as np 5 | from nilearn.connectome import vec_to_sym_matrix 6 | import matplotlib.pyplot as plt 7 | from nilearn import datasets 8 | from tqdm import tqdm 9 | from ibc_public.connectivity.utils_plot import get_lower_tri_heatmap 10 | 11 | sns.set_theme() 12 | sns.set_style("whitegrid") 13 | sns.set_context("talk") 14 | 15 | ### transfer IBC -> external connectivity matrices ### 16 | # get atlas for yeo network labels 17 | cache = "/storage/store2/work/haggarwa/" 18 | DATA_ROOT = "/storage/store2/work/haggarwa/" 19 | IBC_ROOT = os.path.join( 20 | DATA_ROOT, "ibc_sync_external_connectivity_20231206-110710" 21 | ) 22 | external_ROOT = os.path.join( 23 | DATA_ROOT, "external_connectivity_20231205-142311" 24 | ) 25 | 26 | atlas = datasets.fetch_atlas_schaefer_2018( 27 | data_dir=cache, resolution_mm=2, n_rois=200 28 | ) 29 | networks = atlas["labels"].astype("U") 30 | hemi_network_labels = [] 31 | for network in networks: 32 | components = network.split("_") 33 | hemi_network = "_".join(components[1:3]) 34 | hemi_network_labels.append(hemi_network) 35 | ticks = [] 36 | unique_labels = [] 37 | for i, label in enumerate(hemi_network_labels): 38 | if label != hemi_network_labels[i - 1]: 39 | ticks.append(i) 40 | unique_labels.append(label) 41 | # cov estimators 42 | cov_estimators = ["Graphical-Lasso", "Ledoit-Wolf", "Unregularized"] 43 | # connectivity measures for each cov estimator 44 | measures = ["correlation", "partial correlation"] 45 | for dataset in ["ibc", "external"]: 46 | if dataset == "ibc": 47 | # load the data 48 | fc_data = pd.read_pickle(os.path.join(IBC_ROOT, "connectomes_200.pkl")) 49 | mats_dir = os.path.join(IBC_ROOT, "connectivity_matrices") 50 | elif dataset == "external": 51 | # load the data 52 | fc_data = pd.read_pickle( 53 | os.path.join(external_ROOT, "connectomes_200.pkl") 54 | ) 55 | mats_dir = os.path.join(external_ROOT, "connectivity_matrices") 56 | _, uniq_idx = np.unique(hemi_network_labels, return_index=True) 57 | hemi_network_labels = np.array(hemi_network_labels)[np.sort(uniq_idx)] 58 | sns.set_context("notebook", font_scale=1.05) 59 | os.makedirs(mats_dir, exist_ok=True) 60 | for sub in tqdm(np.unique(fc_data["subject_ids"]), desc=dataset): 61 | for cov in cov_estimators: 62 | for measure in measures: 63 | try: 64 | vector = np.mean( 65 | np.vstack( 66 | list( 67 | fc_data[(fc_data["subject_ids"] == sub)][ 68 | f"{cov} {measure}" 69 | ] 70 | ) 71 | ), 72 | axis=0, 73 | ) 74 | matrix = vec_to_sym_matrix(vector, diagonal=np.ones(200)) 75 | except ValueError: 76 | print(f"{sub} {cov} {measure} does not exist") 77 | continue 78 | get_lower_tri_heatmap( 79 | matrix, 80 | title=f"{sub} {dataset}", 81 | output=os.path.join(mats_dir, f"{sub}_{cov}_{measure}"), 82 | ticks=ticks, 83 | labels=hemi_network_labels, 84 | grid=True, 85 | diag=True, 86 | triu=True, 87 | ) 88 | plt.close("all") 89 | -------------------------------------------------------------------------------- /scripts/connectivity/plotting/plot_generalize_distributions.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pandas as pd 3 | import seaborn as sns 4 | import numpy as np 5 | from nilearn.connectome import vec_to_sym_matrix 6 | import matplotlib.pyplot as plt 7 | from nilearn import datasets 8 | from ibc_public.connectivity.utils_plot import get_lower_tri_heatmap 9 | 10 | sns.set_theme() 11 | sns.set_style("whitegrid") 12 | sns.set_context("talk") 13 | 14 | ### transfer IBC -> external connectivity distribution plots ### 15 | cache = "/storage/store2/work/haggarwa/" 16 | DATA_ROOT = "/storage/store2/work/haggarwa/" 17 | IBC_ROOT = os.path.join( 18 | DATA_ROOT, "ibc_sync_external_connectivity_20231206-110710" 19 | ) 20 | external_ROOT = os.path.join( 21 | DATA_ROOT, "external_connectivity_20231205-142311" 22 | ) 23 | atlas = datasets.fetch_atlas_schaefer_2018( 24 | data_dir=cache, resolution_mm=2, n_rois=200 25 | ) 26 | networks = atlas["labels"].astype("U") 27 | hemi_network_labels = [] 28 | for network in networks: 29 | components = network.split("_") 30 | hemi_network = "_".join(components[1:3]) 31 | hemi_network_labels.append(hemi_network) 32 | ticks = [] 33 | unique_labels = [] 34 | for i, label in enumerate(hemi_network_labels): 35 | if label != hemi_network_labels[i - 1]: 36 | ticks.append(i) 37 | unique_labels.append(label) 38 | # cov estimators 39 | cov_estimators = ["Graphical-Lasso", "Ledoit-Wolf", "Unregularized"] 40 | # connectivity measures for each cov estimator 41 | measures = ["correlation", "partial correlation"] 42 | 43 | dist_dir = os.path.join( 44 | DATA_ROOT, "transfer_ibc_external_connectivity_distributions" 45 | ) 46 | os.makedirs(dist_dir, exist_ok=True) 47 | for cov in cov_estimators: 48 | for measure in measures: 49 | fig, ax = plt.subplots() 50 | for dataset in ["ibc", "external"]: 51 | if dataset == "ibc": 52 | # load the data 53 | fc_data = pd.read_pickle( 54 | os.path.join(IBC_ROOT, "connectomes_200.pkl") 55 | ) 56 | color = "blue" 57 | elif dataset == "external": 58 | # load the data 59 | fc_data = pd.read_pickle( 60 | os.path.join(external_ROOT, "connectomes_200.pkl") 61 | ) 62 | color = "red" 63 | try: 64 | vector = np.mean( 65 | np.vstack(list(fc_data[f"{cov} {measure}"])), 66 | axis=0, 67 | ) 68 | matrix = vec_to_sym_matrix(vector, diagonal=np.ones(200)) 69 | except ValueError: 70 | print(f"{cov} {measure} does not exist") 71 | continue 72 | get_lower_tri_heatmap( 73 | matrix, 74 | title=f"{dataset} {cov} {measure}", 75 | output=os.path.join( 76 | dist_dir, f"mat_{dataset}_{cov}_{measure}" 77 | ), 78 | ticks=ticks, 79 | labels=unique_labels, 80 | grid=True, 81 | diag=True, 82 | triu=True, 83 | ) 84 | ax.hist( 85 | vector, 86 | bins=500, 87 | density=True, 88 | log=True, 89 | label=dataset, 90 | color=color, 91 | alpha=0.5, 92 | ) 93 | ax.axvline( 94 | np.mean(vector), linestyle="dashed", linewidth=1, color=color 95 | ) 96 | ax.legend() 97 | ax.set_title(f"{cov} {measure}") 98 | plt.savefig( 99 | os.path.join(dist_dir, f"dist_{cov}_{measure}.png"), 100 | bbox_inches="tight", 101 | ) 102 | plt.close(fig) 103 | -------------------------------------------------------------------------------- /scripts/connectivity/plotting/plot_methods_fmri_surf.py: -------------------------------------------------------------------------------- 1 | import os 2 | import seaborn as sns 3 | from nilearn.plotting import view_img_on_surf 4 | from nilearn import image 5 | 6 | sns.set_theme() 7 | sns.set_style("whitegrid") 8 | sns.set_context("talk") 9 | 10 | ### plot fmri image for methods 11 | cache = DATA_ROOT = "/storage/store/work/haggarwa/" 12 | DATA_ROOT2 = "/storage/store2/work/haggarwa/" 13 | output_dir = os.path.join(DATA_ROOT2, "fmri_methods") 14 | os.makedirs(output_dir, exist_ok=True) 15 | 16 | fmri_file = "/storage/store2/data/ibc/derivatives/sub-04/ses-12/func/wrdcsub-04_ses-12_task-MTTNS_dir-pa_run-01_bold.nii.gz" 17 | 18 | mean_fmri = image.mean_img(fmri_file) 19 | 20 | view_img_on_surf(mean_fmri, surf_mesh="fsaverage").save_as_html( 21 | "output_dir/fmri.html" 22 | ) 23 | -------------------------------------------------------------------------------- /scripts/connectivity/plotting/plot_methods_rbg_regions.py: -------------------------------------------------------------------------------- 1 | from nilearn.plotting import plot_glass_brain 2 | from nilearn import datasets 3 | import numpy as np 4 | import matplotlib.pyplot as plt 5 | 6 | cache = "/storage/store2/work/haggarwa/" 7 | 8 | atlas = datasets.fetch_atlas_schaefer_2018( 9 | data_dir=cache, n_rois=100, resolution_mm=2 10 | ) 11 | display = plot_glass_brain(None, display_mode="r") 12 | colors = ["tab:red", "tab:blue", "tab:green", "tab:purple"] 13 | parcels = [[51, 61], [95, 98], [98, 100]] 14 | 15 | for parcel, color in zip(parcels, colors): 16 | display.add_contours(atlas.maps, filled=True, levels=parcel, colors=color) 17 | 18 | display.savefig("glass_brain.png", dpi=600) 19 | display.close() 20 | 21 | 22 | fig, ax = plt.subplots() 23 | n = 100 24 | ax.plot(np.arange(n), np.random.random_sample(n), color="tab:red") 25 | ax.plot( 26 | np.arange(n), 27 | (np.random.random_sample(n) + 1), 28 | color="tab:green", 29 | ) 30 | ax.plot(np.arange(n), (np.random.random_sample(n) + 2), color="tab:blue") 31 | ax.grid(False) 32 | ax.set_xticks([]) 33 | ax.set_yticks([]) 34 | plt.savefig("line.png", dpi=600, transparent=True, bbox_inches="tight") 35 | plt.close() 36 | -------------------------------------------------------------------------------- /scripts/connectivity/plotting/plot_multi_task_classification_accuracy.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pandas as pd 3 | import seaborn as sns 4 | import matplotlib.pyplot as plt 5 | from ibc_public.connectivity.utils_plot import wrap_labels 6 | 7 | sns.set_theme() 8 | sns.set_style("whitegrid") 9 | sns.set_context("talk") 10 | 11 | ### pooled or multi task classification accuracies ### 12 | cache = DATA_ROOT = "/storage/store/work/haggarwa/" 13 | DATA_ROOT2 = "/storage/store2/work/haggarwa/" 14 | 15 | n_parcels = 400 16 | if n_parcels == 400: 17 | # with compcorr and fixed resting state confounds 18 | results_dir = "fc_acrosstask_classification_400_20240118-143947" 19 | results_pkl = os.path.join(DATA_ROOT2, results_dir, "all_results.pkl") 20 | output_dir = "classification_plots_compcorr" 21 | elif n_parcels == 200: 22 | # with compcorr and fixed resting state confounds 23 | results_dir = "fc_acrosstask_classification_200_20240117-185001" 24 | results_pkl = os.path.join(DATA_ROOT2, results_dir, "all_results.pkl") 25 | output_dir = "classification_plots_200_compcorr" 26 | output_dir = os.path.join(DATA_ROOT2, output_dir) 27 | os.makedirs(output_dir, exist_ok=True) 28 | df = pd.read_pickle(results_pkl) 29 | 30 | # cov estimators 31 | cov_estimators = ["Graphical-Lasso", "Ledoit-Wolf", "Unregularized"] 32 | # connectivity measures for each cov estimator 33 | measures = ["correlation", "partial correlation"] 34 | # what to classify 35 | classify = ["Tasks", "Subjects", "Runs"] 36 | 37 | df[df.select_dtypes(include=["number"]).columns] *= 100 38 | 39 | for clas in classify: 40 | df_ = df[df["classes"] == clas] 41 | df_.reset_index(inplace=True, drop=True) 42 | 43 | for how_many in ["all", "three"]: 44 | if how_many == "all": 45 | order = [ 46 | "Unregularized correlation", 47 | "Unregularized partial correlation", 48 | "Ledoit-Wolf correlation", 49 | "Ledoit-Wolf partial correlation", 50 | "Graphical-Lasso correlation", 51 | "Graphical-Lasso partial correlation", 52 | ] 53 | elif how_many == "three": 54 | order = [ 55 | "Unregularized correlation", 56 | "Ledoit-Wolf correlation", 57 | "Graphical-Lasso partial correlation", 58 | ] 59 | ax_score = sns.barplot( 60 | y="connectivity", 61 | x="balanced_accuracy", 62 | data=df_, 63 | orient="h", 64 | palette=sns.color_palette()[0:1], 65 | order=order, 66 | facecolor=(0.4, 0.4, 0.4, 1), 67 | ) 68 | for i in ax_score.containers: 69 | plt.bar_label( 70 | i, 71 | fmt="%.1f", 72 | label_type="edge", 73 | fontsize="x-small", 74 | padding=-45, 75 | weight="bold", 76 | color="white", 77 | ) 78 | wrap_labels(ax_score, 20) 79 | sns.barplot( 80 | y="connectivity", 81 | x="dummy_balanced_accuracy", 82 | data=df_, 83 | orient="h", 84 | palette=sns.color_palette("pastel")[0:1], 85 | order=order, 86 | ci=None, 87 | facecolor=(0.8, 0.8, 0.8, 1), 88 | ) 89 | plt.xlabel("Accuracy") 90 | plt.ylabel("FC measure") 91 | fig = plt.gcf() 92 | if how_many == "three": 93 | fig.set_size_inches(6, 2.5) 94 | plt.savefig( 95 | os.path.join(output_dir, f"{clas}_classification_{how_many}.png"), 96 | bbox_inches="tight", 97 | ) 98 | plt.savefig( 99 | os.path.join(output_dir, f"{clas}_classification_{how_many}.svg"), 100 | bbox_inches="tight", 101 | ) 102 | plt.close("all") 103 | -------------------------------------------------------------------------------- /scripts/connectivity/plotting/plot_reliability.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pandas as pd 3 | import seaborn as sns 4 | import matplotlib.pyplot as plt 5 | from ibc_public.connectivity.utils_plot import ( 6 | wrap_labels, 7 | insert_stats_reliability, 8 | ) 9 | 10 | sns.set_theme() 11 | sns.set_style("whitegrid") 12 | sns.set_context("talk") 13 | 14 | ### plot reliability 15 | 16 | # cov estimators 17 | cov_estimators = ["Graphical-Lasso", "Ledoit-Wolf", "Unregularized"] 18 | # connectivity measures for each cov estimator 19 | measures = ["correlation", "partial correlation"] 20 | # tasks 21 | tasks = [ 22 | "RestingState", 23 | "Raiders", 24 | "GoodBadUgly", 25 | ] 26 | # load the data 27 | DATA_ROOT = "/storage/store2/work/haggarwa/" 28 | n_parcels = 200 29 | results_dir = f"reliability_{n_parcels}" 30 | reliability_data = pd.read_pickle( 31 | os.path.join(DATA_ROOT, results_dir, f"corrs_full_mat_{n_parcels}") 32 | ) 33 | p_values = pd.read_pickle( 34 | os.path.join(DATA_ROOT, results_dir, f"p_vals_{n_parcels}") 35 | ) 36 | keep_only = [ 37 | "Unregularized correlation", 38 | "Ledoit-Wolf correlation", 39 | "Graphical-Lasso partial correlation", 40 | "time_series", 41 | ] 42 | hue_order = [ 43 | "RestingState", 44 | "Raiders", 45 | "GoodBadUgly", 46 | ] 47 | rest_colors = [sns.color_palette("tab20c")[0]] 48 | movie_colors = sns.color_palette("tab20c")[4:7] 49 | color_palette = rest_colors + movie_colors 50 | fig = plt.figure() 51 | ax1 = plt.subplot2grid((1, 20), (0, 0), colspan=15) 52 | ax2 = plt.subplot2grid((1, 16), (0, -4)) 53 | ax3 = plt.subplot2grid((1, 18), (0, -2)) 54 | sns.boxplot( 55 | x="correlation", 56 | y="measure", 57 | hue="task", 58 | data=reliability_data, 59 | palette=color_palette, 60 | orient="h", 61 | order=keep_only, 62 | hue_order=hue_order, 63 | ax=ax1, 64 | ) 65 | wrap_labels(ax1, 20) 66 | legend = ax1.legend(framealpha=0, loc="center left", bbox_to_anchor=(1.4, 0.5)) 67 | for i, task in enumerate(keep_only): 68 | p_val = p_values[p_values["measure"] == task].reset_index(drop=True) 69 | index = abs((i - len(p_values)) - 1) 70 | for j in range(2): 71 | if p_val.loc[j]["comp"].split(" ")[2] == "Raiders": 72 | axis = ax2 73 | xoff_1 = 0.2 74 | xoff_2 = 0.1 75 | else: 76 | axis = ax3 77 | xoff_1 = 0.2 78 | xoff_2 = 0.3 79 | p = p_val.loc[j]["p_val"] 80 | 81 | insert_stats_reliability( 82 | axis, 83 | p, 84 | 1.2, 85 | loc=[index - xoff_1, index + xoff_2], 86 | x_n=len(keep_only), 87 | ) 88 | 89 | ax1.set_xlabel("Reliability") 90 | ax1.set_ylabel("Measure") 91 | plot_file = os.path.join( 92 | results_dir, 93 | "reliability.svg", 94 | ) 95 | plot_file2 = os.path.join( 96 | results_dir, 97 | "reliability.png", 98 | ) 99 | plt.savefig(plot_file, bbox_inches="tight", transparent=True) 100 | plt.savefig(plot_file2, bbox_inches="tight", transparent=False) 101 | plt.close() 102 | -------------------------------------------------------------------------------- /scripts/connectivity/supplementary/compile_sc_in_dataframe.py: -------------------------------------------------------------------------------- 1 | import os 2 | from glob import glob 3 | import pandas as pd 4 | from ibc_public.connectivity.utils_fc_estimation import get_ses_modality 5 | from nilearn.connectome import sym_matrix_to_vec 6 | 7 | ### create sc_data dataframe for native space 8 | cache = DATA_ROOT = "/storage/store2/work/haggarwa/" 9 | n_parcels = 200 10 | sub_ses, _ = get_ses_modality("DWI") 11 | sc_data_native = [] 12 | for sub, session in sub_ses.items(): 13 | data = {"subject": sub, "measure": "SC", "task": "SC"} 14 | path = os.path.join(DATA_ROOT, sub, session, "dwi") 15 | csv = glob( 16 | os.path.join( 17 | path, 18 | f"*connectome_schaefer{n_parcels}_individual_siftweighted.csv", 19 | ) 20 | ) 21 | matrix = pd.read_csv(csv[0], header=None).to_numpy() 22 | print(matrix.shape) 23 | matrix = sym_matrix_to_vec(matrix, discard_diagonal=True) 24 | data["connectivity"] = matrix 25 | sc_data_native.append(data) 26 | 27 | sc_data_native = pd.DataFrame(sc_data_native) 28 | sc_data_native.to_pickle( 29 | os.path.join(DATA_ROOT, f"sc_data_native_{n_parcels}") 30 | ) 31 | 32 | ### create sc_data dataframe for MNI space 33 | sub_ses, _ = get_ses_modality("DWI") 34 | sc_data_mni = [] 35 | for sub, session in sub_ses.items(): 36 | data = {"subject": sub, "measure": "SC", "task": "SC"} 37 | path = os.path.join(DATA_ROOT, sub, session, "dwi") 38 | csv = glob( 39 | os.path.join(path, "*connectome_schaefer400_MNI152_siftweighted.csv") 40 | ) 41 | matrix = pd.read_csv(csv[0], header=None).to_numpy() 42 | print(matrix.shape) 43 | matrix = sym_matrix_to_vec(matrix, discard_diagonal=True) 44 | data["connectivity"] = matrix 45 | sc_data_mni.append(data) 46 | 47 | sc_data_mni = pd.DataFrame(sc_data_mni) 48 | sc_data_mni.to_pickle(os.path.join(DATA_ROOT, "sc_data_mni_new")) 49 | -------------------------------------------------------------------------------- /scripts/connectivity/supplementary/umap_ibc_external_gbu.py: -------------------------------------------------------------------------------- 1 | """This script creates 2D UMAP representations of IBC and external GBU data, to 2 | assess the covariate shift between the two datasets. Also tries different 3 | scaling methods to reduce the covariate shift""" 4 | import umap 5 | import pandas as pd 6 | import numpy as np 7 | import os 8 | from sklearn.preprocessing import StandardScaler 9 | from sklearn.preprocessing import RobustScaler 10 | from skimage import exposure 11 | from matplotlib import pyplot as plt 12 | import seaborn as sns 13 | 14 | 15 | cache = DATA_ROOT = "/storage/store2/work/haggarwa/" 16 | # load connectomes for external GBU 17 | external_connectomes = pd.read_pickle( 18 | os.path.join( 19 | DATA_ROOT, 20 | "external_connectivity_20240125-104121", 21 | "connectomes_200_compcorr.pkl", 22 | ) 23 | ) 24 | 25 | # load connectomes for IBC GBU 26 | IBC_connectomes = pd.read_pickle( 27 | os.path.join( 28 | DATA_ROOT, 29 | "connectomes_200_comprcorr", 30 | ) 31 | ) 32 | 33 | IBC_connectomes = IBC_connectomes[IBC_connectomes["tasks"] == "GoodBadUgly"] 34 | IBC_connectomes = IBC_connectomes[ 35 | IBC_connectomes["run_labels"].isin(["run-03", "run-04", "run-05"]) 36 | ] 37 | IBC_connectomes.reset_index(inplace=True, drop=True) 38 | # rename run labels to match across datasets 39 | IBC_connectomes["run_labels"].replace("run-03", "1", inplace=True) 40 | IBC_connectomes["run_labels"].replace("run-04", "2", inplace=True) 41 | IBC_connectomes["run_labels"].replace("run-05", "3", inplace=True) 42 | 43 | external_connectomes["run_labels"].replace("run-01", "1", inplace=True) 44 | external_connectomes["run_labels"].replace("run-02", "2", inplace=True) 45 | external_connectomes["run_labels"].replace("run-03", "3", inplace=True) 46 | 47 | external_connectomes["tasks"].replace( 48 | {"externalGoodBadUgly": "Mantini et al."}, inplace=True 49 | ) 50 | IBC_connectomes["tasks"].replace({"GoodBadUgly": "IBC"}, inplace=True) 51 | 52 | connectomes = pd.concat( 53 | [external_connectomes, IBC_connectomes], ignore_index=True 54 | ) 55 | connectomes.reset_index(inplace=True, drop=True) 56 | connectomes["Dataset, run"] = ( 57 | connectomes["tasks"] + ", run " + connectomes["run_labels"] 58 | ) 59 | 60 | # cov estimators 61 | cov_estimators = ["Unregularized", "Ledoit-Wolf", "Graphical-Lasso"] 62 | # connectivity measures for each cov estimator 63 | measures = ["correlation", "partial correlation"] 64 | 65 | output_dir = os.path.join(DATA_ROOT, "umap_ibc_external_gbu_robust") 66 | os.makedirs(output_dir, exist_ok=True) 67 | 68 | for cov_estimator in cov_estimators: 69 | for measure in measures: 70 | umap_reducer = umap.UMAP(random_state=42, n_components=2) 71 | 72 | # ibc connectomes 73 | ibc_fc = np.array( 74 | IBC_connectomes[f"{cov_estimator} {measure}"].tolist() 75 | ) 76 | ibc_fc = RobustScaler(unit_variance=True).fit_transform(ibc_fc) 77 | print(ibc_fc.shape) 78 | # external connectomes 79 | external_fc = np.array( 80 | external_connectomes[f"{cov_estimator} {measure}"].tolist() 81 | ) 82 | external_fc = RobustScaler(unit_variance=True).fit_transform( 83 | external_fc 84 | ) 85 | print(external_fc.shape) 86 | 87 | # connectomes[f"{cov_estimator} {measure}"] = connectomes[ 88 | # f"{cov_estimator} {measure}" 89 | # ].apply(lambda x: exposure.equalize_hist(np.array(x))) 90 | 91 | fc = np.concatenate([ibc_fc, external_fc], axis=0) 92 | print(fc.shape) 93 | 94 | fc_umap = umap_reducer.fit_transform(fc) 95 | fig, ax = plt.subplots() 96 | connectomes[f"{cov_estimator} {measure} umap 1"] = fc_umap[:, 0] 97 | connectomes[f"{cov_estimator} {measure} umap 2"] = fc_umap[:, 1] 98 | sns.scatterplot( 99 | data=connectomes, 100 | x=f"{cov_estimator} {measure} umap 1", 101 | y=f"{cov_estimator} {measure} umap 2", 102 | hue="Dataset, run", 103 | palette="Paired", 104 | ax=ax, 105 | hue_order=[ 106 | "IBC, run 1", 107 | "Mantini et al., run 1", 108 | "IBC, run 2", 109 | "Mantini et al., run 2", 110 | "IBC, run 3", 111 | "Mantini et al., run 3", 112 | ], 113 | s=100, 114 | ) 115 | sns.move_legend( 116 | ax, 117 | "upper center", 118 | ncol=1, 119 | # frameon=True, 120 | # shadow=True, 121 | bbox_to_anchor=(1.2, 1), 122 | ) 123 | plt.title(f"{cov_estimator} {measure}") 124 | 125 | plt.savefig( 126 | os.path.join( 127 | output_dir, 128 | f"umap_{cov_estimator}_{measure}.svg", 129 | ), 130 | bbox_inches="tight", 131 | ) 132 | plt.savefig( 133 | os.path.join( 134 | output_dir, 135 | f"umap_{cov_estimator}_{measure}.png", 136 | ), 137 | bbox_inches="tight", 138 | ) 139 | plt.close() 140 | -------------------------------------------------------------------------------- /scripts/expert.opts: -------------------------------------------------------------------------------- 1 | mri_em_register -p .5 2 | mris_fix_topology -s 100 -------------------------------------------------------------------------------- /scripts/ini_files/IBC_preproc_anat1.ini: -------------------------------------------------------------------------------- 1 | ###################################################################################### 2 | # 3 | # pypreprocess configuration. 4 | # 5 | # Copy this file to the acquisition directory containing the data you wish to 6 | # preprocess. Then, manually edit the values to customize the pipeline to suite your 7 | # needs. 8 | # 9 | # Disable a preprocessing step by setting 'disable = True' under the corresponding 10 | # section, or simply comment the section altogether. 11 | # 12 | # IMPORTANT NOTES 13 | # =============== 14 | # - indexing begins from 1 (matlab style) 15 | # - you can explicitly specifiy the software to be used for a specific stage of the 16 | # preprocessing by accordingly setting the 'software' field under the 17 | # corresponding section (e.g like so: software = spm) 18 | # - A value of 'auto', 'unspecified', 'none', etc. for a parameter means it should 19 | # be specified or inferred at run-time 20 | # 21 | # Authored by DOHMATOB Elvis Dopgima 22 | # 23 | ###################################################################################### 24 | 25 | [config] # DON'T TOUCH THIS LINE ! 26 | 27 | ########## 28 | # INPUT 29 | ########## 30 | 31 | protocol = anat1 32 | 33 | # Path (relative or full) of directory containing data (if different from directory 34 | # containing this configuration file). 35 | dataset_dir = /neurospin/ibc/derivatives/sub-01 36 | 37 | # Brief description of dataset (you can use html formatting) 38 | dataset_description = """ IBC dataset """ 39 | 40 | # The name of the dataset as will be shown in the report pages. Must be an integer 41 | # or auto 42 | dataset_id = auto 43 | 44 | # The number of subjects to include; by default all subjects are included. 45 | nsubjects = auto 46 | 47 | # List of (or wildcard for) subject id's to be ignored / excluded; must be space- 48 | # separated list of subject ids. 49 | exclude_these_subject_ids = 50 | 51 | # List of (or wildcard for) the only subjects to be included; must be space 52 | # separated list of subject ids. 53 | include_only_these_subject_ids = auto 54 | 55 | # Wildcard for, or space-separated list of, subject directories relative to the 56 | # acquisition directory 57 | subject_dirs = ses-* 58 | 59 | # Path of session-wise functional images, relative to the subject data dir. 60 | # Wildcards are allowed. Each session must be specified in the form 61 | 62 | # Path of T1 (anat) image relative to the subject data dir 63 | anat = anat/sub-*_acq-highres_T1w.nii 64 | 65 | # Should caching (nipype, joblib, etc.) be used to safe ages of hard-earned computation ? 66 | caching = True 67 | 68 | # Number of jobs to be spawn altogether. 69 | n_jobs = 1 70 | 71 | # Should orientation meta-date be stripped-off image headers ? 72 | deleteorient = False 73 | 74 | # distortion correction ? 75 | disable_distortion_correction = False 76 | 77 | ############################ 78 | # Slice-Timing Correction 79 | ############################ 80 | 81 | # Don't you want us to do Slice-Timing Correction (STC) ? 82 | disable_slice_timing = True 83 | 84 | # Repetition Time 85 | TR = 2.0 86 | 87 | # Formula for Acquisition Time for single brain volume. 88 | TA = TR * (1 - 1 / nslices) 89 | 90 | # Can be ascending, descending, or an explicitly specified sequence. 91 | slice_order = ascending 92 | 93 | # Were the EPI slices interleaved ? 94 | interleaved = True 95 | 96 | # Reference slice (indexing begins from 1) 97 | refslice = 1 98 | 99 | # software to use for Slice-Timing Correction 100 | slice_timing_software = spm 101 | 102 | 103 | #################################### 104 | # Realignment (Motion Correction) 105 | #################################### 106 | 107 | # Don't do realignment / motion correction ? 108 | disable_realign = False 109 | 110 | # Register all volumes to the mean thereof ? 111 | register_to_mean = True 112 | 113 | # Reslice volumes ? 114 | realign_reslice = False 115 | 116 | # Software to use realignment / motion correction. Can be spm or fsl 117 | realign_software = spm 118 | 119 | 120 | ################### 121 | # Coregistration 122 | ################### 123 | 124 | # Don't you want us to do coregistration of T1 (anat) and fMRI (func) ? 125 | disable_coregister = False 126 | 127 | # During coregistration, do you want us to register func -> anat or anat -> func ? 128 | coreg_func_to_anat = True 129 | 130 | # Should we reslice files during coregistration ? 131 | coregister_reslice = False 132 | 133 | # Software to use for coregistration 134 | coregister_software = spm 135 | 136 | 137 | ######################## 138 | # Tissue Segmentation 139 | ######################## 140 | 141 | # Don't you want us to segment the brain (into gray-matter, white matter, csf, etc.) ? 142 | disable_segment = False 143 | 144 | # Software to use for tissue segmentation. 145 | segment_software = spm 146 | 147 | # Use spm's NewSegment ? 148 | newsegment = True 149 | 150 | ################## 151 | # Normalization 152 | ################## 153 | 154 | # Don't you want want us to normalize each subject's brain unto a template (MNI 155 | # for example) ? 156 | disable_normalize = False 157 | 158 | # Path to your template image. 159 | template = "MNI" 160 | 161 | # Voxel sizes of final func images 162 | func_write_voxel_sizes = [1.5, 1.5, 1.5] 163 | 164 | # Voxel sizes of final anat images 165 | anat_write_voxel_size = [.7, .7, .7] 166 | 167 | # Use dartel for normalization ? 168 | dartel = False 169 | 170 | # Software to use for normalization. 171 | normalize_software = spm 172 | 173 | 174 | ############## 175 | # Smoothing 176 | ############## 177 | 178 | # FWHM (in mm) of smoothing kernel. 179 | fwhm = [0, 0, 0] 180 | 181 | 182 | ########### 183 | # Output 184 | ########### 185 | 186 | # Root directory (full path or relative to the directory containing this file) for 187 | # all output files and reports 188 | output_dir = /neurospin/ibc/derivatives/sub-01 189 | 190 | # Generate html reports ? 191 | report = True 192 | 193 | # Plot coefficient of variation post-preprocessing ? 194 | plot_tsdiffana = True 195 | 196 | scratch = /neurospin/tmp/ibc 197 | -------------------------------------------------------------------------------- /scripts/make_t1_template.py: -------------------------------------------------------------------------------- 1 | """ 2 | This script is meant to create a nice(r) 3 | t1 image template for better rendering 4 | """ 5 | import os 6 | import glob 7 | from pypreprocess.nipype_preproc_spm_utils import (do_subjects_preproc, 8 | SubjectData) 9 | from nilearn.image import mean_img 10 | import nibabel as nib 11 | 12 | # Set firectories first 13 | data_dir = '/neurospin/ibc/derivatives' 14 | scratch = '/neurospin/tmp/ibc' 15 | ref_img = os.path.join(data_dir, 'sub-01/ses-00/mask.nii.gz') 16 | output_dir = os.path.join(data_dir, 'group', 'anat') 17 | if not os.path.exists(output_dir): 18 | os.mkdir(output_dir) 19 | 20 | # glob for subject ids 21 | subject_id_wildcard = 'sub-*' 22 | subject_ids = [os.path.basename(x) 23 | for x in glob.glob(os.path.join(data_dir, subject_id_wildcard))] 24 | 25 | anats = glob.glob( 26 | os.path.join( 27 | data_dir, 'sub*', 'ses-*', 'anat', 'sub-*_ses-*_acq-highres_T1w.nii')) 28 | for anat in anats: 29 | img = nib.load(anat) 30 | nib.Nifti1Image(img.get_data().astype('int32'), img.affine).to_filename(anat) 31 | 32 | # producer subject data 33 | def subject_factory(): 34 | anats = glob.glob( 35 | os.path.join( 36 | data_dir, 'sub*', 'ses-*', 'anat', 'sub-*_ses-*_acq-highres_T1w.nii')) 37 | subject_sessions = [(anat.split('/')[-4], anat.split('/')[-3]) for anat in anats] 38 | subject_sessions = [('sub-01', 'ses-12')] 39 | for subject_session in subject_sessions: 40 | subject, session = subject_session 41 | subject_data = SubjectData(isdicom=False, scratch=scratch, session_output_dirs=[], n_sessions=0) 42 | subject_data.subject_id = subject 43 | subject_data.anat = os.path.join(data_dir, subject, session, 'anat', 44 | '%s_%s_acq-highres_T1w.nii' % (subject, session)) 45 | subject_data.func = [] 46 | subject_data.output_dir = os.path.join( 47 | data_dir, subject, session, 'anat', 'dartel') 48 | # yield data for this subject 49 | yield subject_data 50 | 51 | # do preprocessing proper 52 | report_filename = os.path.join(output_dir, '_report.html') 53 | 54 | do_subjects_preproc( 55 | subject_factory(), 56 | dataset_id='ibc', 57 | output_dir=output_dir, 58 | do_report=True, 59 | do_dartel=True, 60 | dataset_description="ibc", 61 | report_filename=report_filename, 62 | do_shutdown_reloaders=True,) 63 | 64 | 65 | # Create mean images for masking and display 66 | wanats = sorted(glob.glob(os.path.join(data_dir, 'sub-*', 'ses-*', 'anat', 'dartel', 67 | 'w*_ses-*_acq-highres_T1w.nii.gz'))) 68 | template = mean_img(wanats) 69 | template.to_filename(os.path.join(output_dir, 'highres_T1avg.nii.gz')) 70 | 71 | mgms = sorted(glob.glob(os.path.join(data_dir, 'sub-*', 'ses-*', 'anat', 'dartel', 72 | 'mwc1*_ses-*_acq-highres_T1w.nii.gz'))) 73 | 74 | # take a reference functional image 75 | ref_image = nib.load(ref_img) 76 | ref_affine = ref_image.affine 77 | ref_shape = ref_image.shape 78 | mean_gm = mean_img( 79 | mgms, target_affine=ref_affine, target_shape=ref_shape) 80 | gm_mask = nib.Nifti1Image((mean_gm.get_data() > .25).astype('uint8'), 81 | ref_affine) 82 | mean_gm.to_filename(os.path.join(output_dir, 'mean_highres_gm.nii.gz')) 83 | gm_mask.to_filename(os.path.join(output_dir, 'highres_gm_mask.nii.gz')) 84 | -------------------------------------------------------------------------------- /scripts/qmri_README.md: -------------------------------------------------------------------------------- 1 | ## Get it working: 2 | 3 | * Install some of the dependencies as follows: 4 | 5 | pip3 install nilearn nibabel matplotlib numpy joblib dicom progressbar Cython 6 | 7 | * Install `qmri` from bioproj (requires CEA login) as follows: 8 | 9 | git clone -b ibc_changes https://bioproj.extra.cea.fr/git/qmri 10 | cd qmri 11 | python3 setup.py build_ext 12 | python3 setup.py install 13 | 14 | * Install `pypreprocess` by following the instructions given [here](https://github.com/neurospin/pypreprocess) 15 | 16 | * After installing all the dependencies, to run, simply do: 17 | 18 | python3 qmri_run_estimation.py 19 | 20 | ## qMRI preprocessing and estimation pipeline: 21 | 22 | * `qmri_run_estimation.py` runs 4 variations of preprocessing and qmri-estimation pipelines on all 12 IBC subjects for which the data is available: 23 | 24 | * T2 estimation on qmri images in subject-space 25 | * T2 estimation on qmri images in MNI-space 26 | * T1 estimation on qmri images in subject-space 27 | * T1 estimation on qmri images in MNI-space 28 | 29 | * `qmri_run_estimation.py` imports from `ibc_public/util_relaxo.py` containing preprocessing steps for qmri images that happen before estimation, run the estimation itself (explained in next point), and plot the final estimated maps: 30 | 31 | * all preprocessing is done using `pypreprocess` 32 | 33 | * function `t1_pipeline`: 34 | 35 | 1. checks whether sourcedata is defaced (by looking for "Deface: True" field in .json sidecars) - if not, defaces it using `pydeface` 36 | 2. copies and extracts t1 images from sourcedata in a tmp directory 37 | 3. checks whether to return maps in MNI or subject-space: 38 | * if `do_normalise_before` is set to True, segments highest flip angle image and uses the segments to run spatial normalisation and transforms the raw images to MNI space and then computes mask in MNI space 39 | * if `do_normalise_before` is set to False, segments highest flip angle image and uses the segments to compute a mask in subject-space to remove the skull 40 | 4. extracts relevant acquisition parameters from .json sidecar (by running `scripts/qmri_t1_map_b1_params.py`) 41 | 5. runs qmri t1 estimation pipeline (`scripts/qmri_t1_map_b1.py`) 42 | 6. thresholds voxel intensity at 99 percentile and plots the estimated maps. 43 | 44 | * function `t2_pipeline`: 45 | 46 | 1. checks whether sourcedata is defaced (by looking for "Deface: True" field in .json sidecars) - if not, defaces it using `pydeface` 47 | 2. copies and extracts t2 images from sourcedata in a tmp directory 48 | 3. since t2 maps are low resolution, also copies highest flip-angle t1 image for better masking 49 | 3. checks whether to return maps in MNI or subject-space: 50 | * if `do_normalise_before` is set to True, segments the t1 image and uses the segments to run spatial normalisation to transform the raw t1 and t2 images to MNI space and then use the high res t1 image to compute mask in MNI-space 51 | * if `do_normalise_before` is set to False, corregisters the t1 image to t2 image and uses the high res corregistered t1 image to compute a mask in subject-space 52 | 4. runs qmri t2 estimation pipeline (`scripts/t2_map.py`) 53 | 6. thresholds voxel intensity at 95 percentile and plots the estimated maps. 54 | 55 | * Package [`qmri`](https://bioproj.extra.cea.fr/git/qmri), hosted on bioproj, is used for T1 and T2 map estimation. 56 | 57 | * `scripts/qmri_t1_map_b1.py` (for T1 estimation) and `scripts/qmri_t2_map.py` (for T2 estimation) are the scripts containing estimation pipelines using `qmri`. 58 | 59 | ## Author: 60 | 61 | Himanshu Aggarwal 62 | himanshu.aggarwal@inria.fr 63 | 2021-22 -------------------------------------------------------------------------------- /scripts/qmri_T2star_echo-times.json: -------------------------------------------------------------------------------- 1 | {"TEs": [2.03, 5.84, 9.65, 13.46, 17.27, 21.08, 24.89, 28.7, 32.51, 36.32, 40.13, 43.94]} -------------------------------------------------------------------------------- /scripts/qmri_run_estimation.py: -------------------------------------------------------------------------------- 1 | """ 2 | Author: Himanshu Aggarwal (himanshu.aggarwal@inria.fr), 2021-22 3 | """ 4 | 5 | from ibc_public.utils_relaxo import t1_pipeline, t2_pipeline, t2star_pipeline 6 | from joblib import Parallel, delayed 7 | 8 | def run_all_estimation(data_root_path, sub, sess): 9 | """ Run t2. t1 and t2star preproc and estimation pipelines, that return qmri 10 | maps in subject-space and in MNI-152 space 11 | 12 | Parameters 13 | ---------- 14 | data_root_path : str 15 | root directory path consisting of the subject sub-directories that then 16 | consist of all sessions 17 | sub : str 18 | subject name, also the name of the directory consisting of MRI data for 19 | all sessions 20 | sess : str 21 | session number, also the name of the directory consisting of qMRI data 22 | for the specific subject 23 | """ 24 | 25 | # T2 estimation in subject space 26 | print('Running t2-est without spat. norm. for {}'.format(sub)) 27 | t2_pipeline(sub_name=sub, sess_num=sess, do_plot=False, keep_tmp=True, 28 | root_path=data_root_path) 29 | 30 | # T2 estimation in MNI space 31 | print('Running t2-est with spat. norm. for {}'.format(sub)) 32 | t2_pipeline(do_normalise_before=True, sub_name=sub, sess_num=sess, 33 | do_plot=False, keep_tmp=False, root_path=data_root_path) 34 | 35 | # T1 estimation in subject space 36 | print('Running t1-est without spat. norm. for {}'.format(sub)) 37 | t1_pipeline(sub_name=sub, sess_num=sess, do_plot=False, keep_tmp=True, 38 | root_path=data_root_path) 39 | 40 | # T1 estimation in MNI space 41 | print('Running t1-est with spat. norm. for {}'.format(sub)) 42 | t1_pipeline(do_normalise_before=True, sub_name=sub, sess_num=sess, 43 | do_plot=False, keep_tmp=False, root_path=data_root_path) 44 | 45 | # T2-star estimation in subject space 46 | print('Running t2star-est without spat. norm. for {}'.format(sub)) 47 | t2star_pipeline(do_normalise_before=False, sub_name=sub, sess_num=sess, 48 | do_plot=False, keep_tmp=True, root_path=data_root_path) 49 | 50 | # T2-star estimation in MNI space 51 | print('Running t2star-est with spat. norm. for {}'.format(sub)) 52 | t2star_pipeline(do_normalise_before=True, sub_name=sub, sess_num=sess, 53 | do_plot=False, keep_tmp=True, root_path=data_root_path) 54 | 55 | if __name__ == "__main__": 56 | 57 | # root location of sourcedata and derivatives 58 | DATA_ROOT = '/neurospin/ibc/' 59 | 60 | # relaxometry session numbers for each subject 61 | sub_sess = {'sub-01': 'ses-21', 'sub-04': 'ses-20', 'sub-05': 'ses-22', 62 | 'sub-06': 'ses-20', 'sub-07': 'ses-20', 'sub-08': 'ses-35', 63 | 'sub-09': 'ses-19', 'sub-11': 'ses-17', 'sub-12': 'ses-17', 64 | 'sub-13': 'ses-20', 'sub-14': 'ses-20', 'sub-15': 'ses-18'} 65 | 66 | Parallel(n_jobs=1)(delayed(run_all_estimation)(DATA_ROOT, sub, sess) 67 | for sub, sess in sub_sess.items()) 68 | -------------------------------------------------------------------------------- /scripts/qmri_t1_map_b1_params.py: -------------------------------------------------------------------------------- 1 | ########################################################################## 2 | # NSAp - Copyright (C) CEA, 2016 - 2021 3 | # Distributed under the terms of the CeCILL-B license, as published by 4 | # the CEA-CNRS-INRIA. Refer to the LICENSE file or to 5 | # http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html 6 | # for details. 7 | ########################################################################## 8 | 9 | # Sytem import 10 | import argparse 11 | import json 12 | import os 13 | 14 | # Qmri import 15 | from qmri.t1.t1_io import load_b1_sequence_parameters 16 | from qmri.t1.t1_io import load_gre_sequence_parameters 17 | 18 | 19 | # Script documentation 20 | doc = """ 21 | T1 map reconstruction parameters 22 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 23 | 24 | Get the B1 and GRE sequences associated parameters. 25 | """ 26 | 27 | def is_file(filearg): 28 | """ Type for argparse - checks that file exists but does not open. 29 | """ 30 | if not os.path.isfile(filearg): 31 | raise argparse.ArgumentError( 32 | "The file '{0}' does not exist!".format(filearg)) 33 | return filearg 34 | 35 | 36 | def is_directory(dirarg): 37 | """ Type for argparse - checks that directory exists. 38 | """ 39 | if not os.path.isdir(dirarg): 40 | raise argparse.ArgumentError( 41 | "The directory '{0}' does not exist!".format(dirarg)) 42 | return dirarg 43 | 44 | 45 | parser = argparse.ArgumentParser(description=doc) 46 | parser.add_argument( 47 | "-v", "--verbose", dest="verbose", type=int, choices=[0, 1], default=0, 48 | help="increase the verbosity level: 0 silent, 1 verbose.") 49 | parser.add_argument( 50 | "-s", "--subjectid", dest="subjectid", required=True, 51 | help="the subject code in study.") 52 | parser.add_argument( 53 | "-o", "--outdir", dest="outdir", required=True, metavar="PATH", 54 | help="the destination directory.", type=is_directory) 55 | parser.add_argument( 56 | "-g", "--jsongres", dest="jsongres", nargs="+", required=True, 57 | help="one json file of each GRE sequence.", type=is_file) 58 | parser.add_argument( 59 | "-b", "--jsonb1", dest="jsonb1", metavar="FILE", required=True, 60 | help="one json file of the b1 sequence.", type=is_file) 61 | args = parser.parse_args() 62 | 63 | 64 | """ 65 | Welcome message and checks 66 | """ 67 | verbose = args.verbose 68 | if verbose > 0: 69 | print("[INFO] Starting B1 and GRE sequences parameters extraction...") 70 | b1jsonfile = args.jsonb1 71 | grejsonfiles = args.jsongres 72 | working_dir = args.outdir 73 | subjectid = args.subjectid 74 | if verbose > 0: 75 | print("[INFO] b1jsonfile: '{0}'.".format(b1jsonfile)) 76 | print("[INFO] grejsonfiles: '{0}'.".format(grejsonfiles)) 77 | print("[INFO] working directory: '{0}'.".format(working_dir)) 78 | 79 | 80 | """ 81 | Load sequences and sequences parameters 82 | """ 83 | b1fa, b1tr = load_b1_sequence_parameters(b1jsonfile) 84 | gre_sequences = [] 85 | for fjson in grejsonfiles: 86 | gre_sequences.append(load_gre_sequence_parameters(fjson)) 87 | grefas = [x[0] for x in gre_sequences] 88 | gretrs = [x[1] for x in gre_sequences] 89 | if verbose > 0: 90 | print("[INFO] b1fa: {0}.".format(b1fa)) 91 | print("[INFO] b1tr: {0}.".format(b1tr)) 92 | print("[INFO] grefas: {0}.".format(grefas)) 93 | print("[INFO] gretrs: {0}.".format(gretrs)) 94 | 95 | 96 | """ 97 | Save the result following bids (http://bids.neuroimaging.io/) standard. 98 | """ 99 | record = { 100 | subjectid: { 101 | "B1": { 102 | "FlipAngle": b1fa, 103 | "RepetitionTime": b1tr 104 | }, 105 | "GRE": { 106 | "FlipAngle": grefas, 107 | "RepetitionTime": gretrs 108 | } 109 | } 110 | } 111 | record_file = os.path.join(working_dir, "{0}_t1_map_b1.json".format(subjectid)) 112 | with open(record_file, "wt") as open_file: 113 | json.dump(record, open_file, indent=4) 114 | if verbose > 0: 115 | print("[INFO] record: {0}.".format(record_file)) 116 | 117 | 118 | 119 | -------------------------------------------------------------------------------- /scripts/qmri_t2_map.py: -------------------------------------------------------------------------------- 1 | ########################################################################## 2 | # NSAp - Copyright (C) CEA, 2016 3 | # Alexandre Vignaud - Yann Leprince 4 | # Distributed under the terms of the CeCILL-B license, as published by 5 | # the CEA-CNRS-INRIA. Refer to the LICENSE file or to 6 | # http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html 7 | # for details. 8 | ########################################################################## 9 | 10 | # Sytem import 11 | from __future__ import print_function 12 | import argparse 13 | import os 14 | import numpy 15 | 16 | # Qmri import 17 | from qmri.t2.t2_io import get_serie_echo_times 18 | from qmri.t2.t2_io import load_sequence 19 | from qmri.t2.t2_io import save_sequence 20 | from qmri.t2.decay_fit import decay_fit 21 | 22 | 23 | # Script documentation 24 | doc = """ 25 | T2 map reconstruction 26 | ~~~~~~~~~~~~~~~~~~~~~ 27 | 28 | Reconstruct the t2 map from a multi-constrast t2 relaxometry sequence. 29 | 30 | Command: 31 | 32 | python $HOME/git/qmri/qmri/scripts/t2_map \ 33 | -v 1 \ 34 | -s sub-11 35 | -o /neurospin/tmp/agrigis/qmri/processed \ 36 | -n /neurospin/tmp/agrigis/qmri/data/s19.nii.gz \ 37 | -d /neurospin/tmp/agrigis/qmri/data/000019_relaxometry-T2-tra-2mm-multise-12contrastes \ 38 | -c /etc/fsl/5.0/fsl.sh \ 39 | -t 0.3 \ 40 | -m 41 | """ 42 | 43 | def is_file(filearg): 44 | """ Type for argparse - checks that file exists but does not open. 45 | """ 46 | if not os.path.isfile(filearg): 47 | raise argparse.ArgumentError( 48 | "The file '{0}' does not exist!".format(filearg)) 49 | return filearg 50 | 51 | 52 | def is_directory(dirarg): 53 | """ Type for argparse - checks that directory exists. 54 | """ 55 | if not os.path.isdir(dirarg): 56 | raise argparse.ArgumentError( 57 | "The directory '{0}' does not exist!".format(dirarg)) 58 | return dirarg 59 | 60 | 61 | parser = argparse.ArgumentParser(description=doc) 62 | parser.add_argument( 63 | "-v", "--verbose", dest="verbose", type=int, choices=[0, 1], default=0, 64 | help="increase the verbosity level: 0 silent, 1 verbose.") 65 | parser.add_argument( 66 | "-s", "--subjectid", dest="subjectid", required=True, 67 | help="the subject code in study.") 68 | parser.add_argument( 69 | "-o", "--outdir", dest="outdir", required=True, metavar="PATH", 70 | help="the reconstruction directory.", type=is_directory) 71 | parser.add_argument( 72 | "-n", "--niirelaxo", dest="niirelaxo", metavar="FILE", required=True, 73 | help="the relaxometry nifti image.", type=is_file) 74 | parser.add_argument( 75 | "-m", "--mask", dest="mask", metavar="FILE", 76 | help=("the mask file.")) 77 | parser.add_argument( 78 | "-t", "--thresh", dest="thresh", default=0.5, type=float, 79 | help="fractional intensity threshold (0->1), smaller values give larger " 80 | "brain outline estimates.") 81 | args = parser.parse_args() 82 | 83 | 84 | # Welcome message and checks 85 | verbose = args.verbose 86 | if verbose > 0: 87 | print("[INFO] T2 map computation using the analytic method...") 88 | niirelaxo = args.niirelaxo 89 | subjectid = args.subjectid 90 | mask_file = args.mask 91 | if verbose > 0: 92 | print("[INFO] niirelaxo: '{0}'.".format(niirelaxo)) 93 | 94 | # Create the working directory 95 | working_dir = args.outdir 96 | if verbose > 0: 97 | print("[INFO] Working directory: '{0}'.".format(working_dir)) 98 | 99 | # Load sequence and sequence parameters 100 | echo_times = get_serie_echo_times(niirelaxo) 101 | relaxoarray, relaxoaffine = load_sequence(niirelaxo) 102 | if len(echo_times) != relaxoarray.shape[-1]: 103 | raise Exception("Wrong echo number in '{0}'.".format(echo_times)) 104 | teparms = [] 105 | for key in sorted(echo_times.keys()): 106 | teparms.append(echo_times[key]) 107 | teparms = numpy.asarray(teparms) 108 | 109 | # Estimate the t2 110 | t2array = decay_fit(relaxoarray, teparms, maskfile=mask_file) 111 | t2file = os.path.join(working_dir, "{0}_T2map.nii.gz".format(subjectid)) 112 | save_sequence(t2array, relaxoaffine, t2file) 113 | print("[INFO] t2file: '{0}'".format(t2file)) 114 | 115 | -------------------------------------------------------------------------------- /scripts/script_resample_normalized_data.py: -------------------------------------------------------------------------------- 1 | """ 2 | This script resampled normalized data to a reference shape: (105, 127, 105) 3 | """ 4 | import glob 5 | from nilearn.image import resample_to_img 6 | from joblib import Parallel, delayed 7 | import nibabel as nib 8 | import os 9 | 10 | SMOOTH_DERIVATIVES = '/neurospin/ibc/smooth_derivatives' 11 | DERIVATIVES = '/neurospin/ibc/derivatives' 12 | THREE_MM = '/neurospin/ibc/3mm' 13 | _package_directory = os.path.dirname(os.path.abspath(__file__)) 14 | 15 | 16 | def _resample(img, reference, target=None): 17 | rimg = resample_to_img(img, reference) 18 | if target is not None: 19 | rimg.to_filename(target) 20 | else: 21 | rimg.to_filename(img) 22 | print(img) 23 | 24 | 25 | def _prepare_targets(imgs): 26 | """Prepare target filenames""" 27 | targets = [] 28 | for img in imgs: 29 | parts = img.split('/') 30 | subject_dir = os.path.join(THREE_MM, parts[-4]) 31 | if not os.path.exists(subject_dir): 32 | print(subject_dir) 33 | os.mkdir(subject_dir) 34 | sess_dir = os.path.join(subject_dir, parts[-3]) 35 | if not os.path.exists(sess_dir): 36 | print(sess_dir) 37 | os.mkdir(sess_dir) 38 | func_dir = os.path.join(sess_dir, 'func') 39 | if not os.path.exists(func_dir): 40 | print(func_dir) 41 | os.mkdir(func_dir) 42 | target = os.path.join(func_dir, os.path.basename(img)) 43 | targets.append(target) 44 | return targets 45 | 46 | 47 | def resample_func_data(n_jobs=2): 48 | reference = os.path.join( 49 | _package_directory, '../ibc_data', 'gm_mask_1_5mm.nii.gz') 50 | imgs = glob.glob(os.path.join(DERIVATIVES, 51 | 'sub-*', 'ses-*', 'func', 'wrdcsub-*.nii.gz')) 52 | Parallel(n_jobs=n_jobs)( 53 | delayed(_resample)(img, reference) for img in imgs 54 | if (nib.load(img).shape[2] != 105) 55 | and ('RestingState' not in img)) 56 | 57 | 58 | def resample_anat_data(n_jobs=2): 59 | reference = os.path.join(DERIVATIVES, 'sub-01', 'ses-00', 'anat', 60 | 'wsub-01_ses-00_T1w.nii.gz') # FIXME 61 | imgs = glob.glob(os.path.join(DERIVATIVES, 'sub-*', 'ses-*', 'anat', 62 | 'mwc*sub-*_ses-*_T1w.nii.gz')) 63 | reference_shape = nib.load(reference).shape 64 | Parallel(n_jobs=n_jobs)( 65 | delayed(_resample)(img, reference) for img in imgs 66 | if nib.load(img).shape != reference_shape) 67 | 68 | 69 | def resample_3mm_func_data(n_jobs=2): 70 | reference = os.path.join( 71 | _package_directory, '../ibc_data', 'gm_mask_3mm.nii.gz') 72 | wc = os.path.join(DERIVATIVES, 'sub-*/ses-*/func/wrdcsub-*.nii.gz') 73 | imgs = glob.glob(wc) 74 | targets = _prepare_targets(imgs) 75 | Parallel(n_jobs=n_jobs)( 76 | delayed(_resample)(img, reference, target) for (img, target) in 77 | zip(imgs, targets) 78 | if not os.path.exists(target)) 79 | 80 | 81 | def resample_func_and_anat(n_jobs=4): 82 | resample_func_data(n_jobs=n_jobs) 83 | resample_anat_data(n_jobs=n_jobs) 84 | resample_3mm_func_data(n_jobs=n_jobs) 85 | 86 | 87 | if __name__ == '__main__': 88 | resample_func_and_anat() 89 | -------------------------------------------------------------------------------- /scripts/script_skull_stripping.py: -------------------------------------------------------------------------------- 1 | 2 | import glob 3 | import os 4 | from nilearn.image import math_img, resample_to_img 5 | from scipy.ndimage import grey_closing 6 | from nibabel import Nifti1Image 7 | 8 | derivatives = '/neurospin/ibc/derivatives' 9 | subjects = sorted(glob.glob(os.path.join(derivatives, 'sub-*'))) 10 | """ 11 | 12 | for subject in subjects: 13 | subject_id = os.path.basename(subject) 14 | src = os.path.join(subject, 'ses-00', 'anat', 15 | 'w%s_ses-00_T1w.nii.gz' % subject_id) 16 | dst = os.path.join(subject, 'ses-00', 'anat', 17 | 'w%s_ses-00_T1w_bet.nii.gz' % subject_id) 18 | wm = os.path.join(subject, 'ses-00', 'anat', 19 | 'mwc2%s_ses-00_T1w.nii.gz' % subject_id) 20 | gm = os.path.join(subject, 'ses-00', 'anat', 21 | 'mwc1%s_ses-00_T1w.nii.gz' % subject_id) 22 | brain = math_img('i1 + i2', i1=wm, i2=gm) 23 | brain.to_filename('/tmp/brain_%s.nii.gz' % subject_id) 24 | large_brain = resample_to_img(brain, src) 25 | large_brain_data = grey_closing(large_brain.get_data(), 5) 26 | large_brain = Nifti1Image(large_brain_data, large_brain.get_affine()) 27 | bet_img = math_img('i1 * i2', i1=large_brain, i2=src) 28 | bet_img.to_filename(dst) 29 | """ 30 | 31 | subject_sessions = [('sub-14', 'ses-05'), 32 | ('sub-04', 'ses-08'), 33 | ('sub-05', 'ses-08'), 34 | ('sub-06', 'ses-09'), 35 | ('sub-07', 'ses-09'), 36 | ('sub-08', 'ses-09'), 37 | ('sub-09', 'ses-09'), 38 | ('sub-11', 'ses-09'), 39 | ('sub-12', 'ses-09'), 40 | ('sub-13', 'ses-09'), 41 | ('sub-01', 'ses-10'), 42 | ('sub-01', 'ses-10')] 43 | 44 | for (subject, session) in subject_sessions: 45 | subject_id = subject 46 | src = os.path.join(derivatives, subject, session, 'anat', 47 | 'w%s_%s_acq-highres_T1w.nii.gz' % (subject_id, session)) 48 | dst = os.path.join(derivatives, subject, session, 'anat', 49 | 'w%s_%s_acq-highres_T1w_bet.nii.gz' % 50 | (subject_id, session)) 51 | wm = os.path.join(derivatives, subject, session, 'anat', 52 | 'mwc2%s_%s_acq-highres_T1w.nii.gz' % 53 | (subject_id, session)) 54 | gm = os.path.join(derivatives, subject, session, 'anat', 55 | 'mwc1%s_%s_acq-highres_T1w.nii.gz' % 56 | (subject_id, session)) 57 | brain = math_img('i1 + i2', i1=wm, i2=gm) 58 | brain.to_filename('/tmp/brain_%s.nii.gz' % subject_id) 59 | large_brain = resample_to_img(brain, src) 60 | large_brain_data = grey_closing(large_brain.get_data(), 5) 61 | large_brain = Nifti1Image(large_brain_data, large_brain.get_affine()) 62 | bet_img = math_img('i1 * i2', i1=large_brain, i2=src) 63 | bet_img.to_filename(dst) 64 | -------------------------------------------------------------------------------- /scripts/sin_cos_regressors.csv: -------------------------------------------------------------------------------- 1 | cos,sin 2 | 1,0 3 | 0.9238795325,0.3826834324 4 | 0.7071067812,0.7071067812 5 | 0.3826834324,0.9238795325 6 | 6.12323399573677E-17,1 7 | -0.3826834324,0.9238795325 8 | -0.7071067812,0.7071067812 9 | -0.9238795325,0.3826834324 10 | -1,1.22464679914735E-16 11 | -0.9238795325,-0.3826834324 12 | -0.7071067812,-0.7071067812 13 | -0.3826834324,-0.9238795325 14 | -1.83697019872103E-16,-1 15 | 0.3826834324,-0.9238795325 16 | 0.7071067812,-0.7071067812 17 | 0.9238795325,-0.3826834324 18 | 1,-2.44929359829471E-16 19 | 0.9238795325,0.3826834324 20 | 0.7071067812,0.7071067812 21 | 0.3826834324,0.9238795325 22 | 3.06161699786838E-16,1 23 | -0.3826834324,0.9238795325 24 | -0.7071067812,0.7071067812 25 | -0.9238795325,0.3826834324 26 | -1,3.67394039744206E-16 27 | -0.9238795325,-0.3826834324 28 | -0.7071067812,-0.7071067812 29 | -0.3826834324,-0.9238795325 30 | -4.28626379701574E-16,-1 31 | 0.3826834324,-0.9238795325 32 | 0.7071067812,-0.7071067812 33 | 0.9238795325,-0.3826834324 34 | 1,-4.89858719658941E-16 35 | 0.9238795325,0.3826834324 36 | 0.7071067812,0.7071067812 37 | 0.3826834324,0.9238795325 38 | 5.51091059616309E-16,1 39 | -0.3826834324,0.9238795325 40 | -0.7071067812,0.7071067812 41 | -0.9238795325,0.3826834324 42 | -1,6.12323399573677E-16 43 | -0.9238795325,-0.3826834324 44 | -0.7071067812,-0.7071067812 45 | -0.3826834324,-0.9238795325 46 | -2.44991257893129E-15,-1 47 | 0.3826834324,-0.9238795325 48 | 0.7071067812,-0.7071067812 49 | 0.9238795325,-0.3826834324 50 | 1,-7.34788079488412E-16 51 | 0.9238795325,0.3826834324 52 | 0.7071067812,0.7071067812 53 | 0.3826834324,0.9238795325 54 | -9.80336419954471E-16,1 55 | -0.3826834324,0.9238795325 56 | -0.7071067812,0.7071067812 57 | -0.9238795325,0.3826834324 58 | -1,8.57252759403147E-16 59 | -0.9238795325,-0.3826834324 60 | -0.7071067812,-0.7071067812 61 | -0.3826834324,-0.9238795325 62 | -2.69484193876077E-15,-1 63 | 0.3826834324,-0.9238795325 64 | 0.7071067812,-0.7071067812 65 | 0.9238795325,-0.3826834324 66 | 1,-9.79717439317883E-16 67 | 0.9238795325,0.3826834324 68 | 0.7071067812,0.7071067812 69 | 0.3826834324,0.9238795325 70 | -7.35407060125E-16,1 71 | -0.3826834324,0.9238795325 72 | -0.7071067812,0.7071067812 73 | -0.9238795325,0.3826834324 74 | -1,1.10218211923262E-15 75 | -0.9238795325,-0.3826834324 76 | -0.7071067812,-0.7071067812 77 | -0.3826834324,-0.9238795325 78 | -2.93977129859024E-15,-1 79 | 0.3826834324,-0.9238795325 80 | 0.7071067812,-0.7071067812 81 | 0.9238795325,-0.3826834324 82 | 1,-1.22464679914735E-15 83 | 0.9238795325,0.3826834324 84 | 0.7071067812,0.7071067812 85 | 0.3826834324,0.9238795325 86 | -4.9047770029553E-16,1 87 | -0.3826834324,0.9238795325 88 | -0.7071067812,0.7071067812 89 | -0.9238795325,0.3826834324 90 | -1,4.89982515786259E-15 91 | -0.9238795325,-0.3826834324 92 | -0.7071067812,-0.7071067812 93 | -0.3826834324,-0.9238795325 94 | -3.18470065841971E-15,-1 95 | 0.3826834324,-0.9238795325 96 | 0.7071067812,-0.7071067812 97 | 0.9238795325,-0.3826834324 98 | 1,-1.46957615897682E-15 99 | 0.9238795325,0.3826834324 100 | 0.7071067812,0.7071067812 101 | 0.3826834324,0.9238795325 102 | -2.45548340466059E-16,1 103 | -0.3826834324,0.9238795325 104 | -0.7071067812,0.7071067812 105 | -0.9238795325,0.3826834324 106 | -1,-1.96067283990894E-15 107 | -0.9238795325,-0.3826834324 108 | -0.7071067812,-0.7071067812 109 | -0.3826834324,-0.9238795325 110 | -3.42963001824918E-15,-1 111 | 0.3826834324,-0.9238795325 112 | 0.7071067812,-0.7071067812 113 | 0.9238795325,-0.3826834324 114 | 1,-1.71450551880629E-15 115 | 0.9238795325,0.3826834324 116 | 0.7071067812,0.7071067812 117 | 0.3826834324,0.9238795325 118 | -6.18980636588358E-19,1 119 | -0.3826834324,0.9238795325 120 | -0.7071067812,0.7071067812 121 | -0.9238795325,0.3826834324 122 | -1,5.38968387752153E-15 123 | -0.9238795325,-0.3826834324 124 | -0.7071067812,-0.7071067812 125 | -0.3826834324,-0.9238795325 126 | -3.67455937807865E-15,-1 127 | 0.3826834324,-0.9238795325 128 | 0.7071067812,-0.7071067812 129 | 0.9238795325,-0.3826834324 130 | 1,-1.95943487863577E-15 131 | 0.9238795325,0.3826834324 132 | 0.7071067812,0.7071067812 133 | 0.3826834324,0.9238795325 134 | 2.44310379192882E-16,1 135 | -0.3826834324,0.9238795325 136 | -0.7071067812,0.7071067812 137 | -0.9238795325,0.3826834324 138 | -1,-1.47081412025E-15 139 | -0.9238795325,-0.3826834324 140 | -0.7071067812,-0.7071067812 141 | -0.3826834324,-0.9238795325 142 | -3.91948873790812E-15,-1 143 | 0.3826834324,-0.9238795325 144 | 0.7071067812,-0.7071067812 145 | 0.9238795325,-0.3826834324 146 | 1,-2.20436423846524E-15 147 | 0.9238795325,0.3826834324 148 | 0.7071067812,0.7071067812 149 | 0.3826834324,0.9238795325 150 | 4.89239739022353E-16,1 151 | -0.3826834324,0.9238795325 152 | -0.7071067812,0.7071067812 153 | -0.9238795325,0.3826834324 154 | -1,5.87954259718047E-15 155 | -0.9238795325,-0.3826834324 156 | -0.7071067812,-0.7071067812 157 | -0.3826834324,-0.9238795325 158 | -4.16441809773759E-15,-1 159 | 0.3826834324,-0.9238795325 160 | 0.7071067812,-0.7071067812 161 | 0.9238795325,-0.3826834324 162 | 1,-2.44929359829471E-15 163 | 0.9238795325,0.3826834324 164 | 0.7071067812,0.7071067812 165 | 0.3826834324,0.9238795325 166 | 7.83959645645282E-15,1 167 | -------------------------------------------------------------------------------- /scripts/surface_glm_only.py: -------------------------------------------------------------------------------- 1 | """ 2 | :Synopsis: script for GLM and stats only on IBC datasets 3 | 4 | :Author: THIRION Bertrand 5 | 6 | """ 7 | import os 8 | from pypreprocess.conf_parser import _generate_preproc_pipeline 9 | from joblib import Parallel, delayed 10 | from ibc_public.utils_pipeline import fixed_effects_analysis, first_level 11 | 12 | from pipeline import (clean_subject, clean_anatomical_images, 13 | _adapt_jobfile, get_subject_session) 14 | 15 | 16 | SUBJECTS = [1, 2, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14, 15] 17 | retino_sessions = ['task-WedgeAnti_dir-pa', 18 | 'task-WedgeClock_dir-ap', 19 | 'task-ContRing_dir-ap', 20 | 'task-WedgeAnti_dir-ap', 21 | 'task-ExpRing_dir-pa', 22 | 'task-WedgeClock_dir-pa'] 23 | RETINO_REG = dict([(session_id, 'sin_cos_regressors.csv') 24 | for session_id in retino_sessions]) 25 | IBC = 'neurospin/ibc' 26 | 27 | 28 | def generate_glm_input(jobfile, mesh=None): 29 | """ retrun a list of dictionaries that represent the data available 30 | for GLM analysis""" 31 | list_subjects, params = _generate_preproc_pipeline(jobfile) 32 | output = [] 33 | for subject in list_subjects: 34 | output_dir = subject.output_dir 35 | reports_output_dir = os.path.join(output_dir, 'reports') 36 | basenames = ['wr' + os.path.basename(func_)[:-3] 37 | for func_ in subject.func] 38 | gii_basenames = ['r' + os.path.basename(func_).split('.')[0] + 39 | '_{}_lh.gii'.format(mesh) for func_ in subject.func] 40 | gii_basenames += ['r' + os.path.basename(func_).split('.')[0] + 41 | '_{}_rh.gii'.format(mesh) for func_ in subject.func] 42 | func = [os.path.join(output_dir, 'freesurfer', basename) 43 | for basename in gii_basenames] 44 | realignment_parameters = [ 45 | os.path.join(session_output_dir, 'rp_' + basename[2:-4] + '.txt') 46 | for (session_output_dir, basename) in 47 | zip(subject.session_output_dirs, basenames)] * 2 48 | session_ids = [session_id for (session_id, onset) in 49 | zip(subject.session_id, subject.onset) 50 | ] # if onset is not None 51 | onsets = [onset for onset in subject.onset] # if onset is not None 52 | subject_ = { 53 | 'output_dir': output_dir, 54 | 'session_output_dirs': subject.session_output_dirs, 55 | 'subject_id': subject.subject_id, 56 | 'session_id': session_ids * 2, 57 | 'TR': subject.TR, 58 | 'drift_model': subject.drift_model, 59 | 'high_pass': 1. / 128, 60 | 'time_units': subject.time_units, 61 | 'hrf_model': subject.hrf_model, 62 | 'onset': onsets * 2, 63 | 'report': True, 64 | 'reports_output_dir': reports_output_dir, 65 | 'basenames': gii_basenames, 66 | 'func': func, 67 | 'realignment_parameters': realignment_parameters, 68 | } 69 | output.append(subject_) 70 | return output 71 | 72 | 73 | def run_subject_surface_glm(jobfile, subject, session, protocol, mesh=None, compcorr=True): 74 | """ Create jobfile and run it """ 75 | output_name = os.path.join( 76 | '/tmp', os.path.basename(jobfile)[:-4] + '_%s.ini' % subject) 77 | _adapt_jobfile(jobfile, subject, output_name, session) 78 | list_subjects_update = generate_glm_input(output_name, mesh) 79 | clean_anatomical_images(IBC) 80 | if protocol == 'mathlang': 81 | compcorr = False 82 | for subject in list_subjects_update: 83 | clean_subject(subject) 84 | if len(subject['session_id']) > 0: 85 | print(len(subject['session_id'])) 86 | if len(subject['session_id']) > 0: 87 | if protocol == 'retino_': 88 | subject['onset'] = [''] * len(subject['onset']) 89 | first_level(subject, compcorr=compcorr, 90 | additional_regressors=RETINO_REG, 91 | smooth=None, mesh=mesh) 92 | else: 93 | first_level(subject, compcorr=compcorr, smooth=None, mesh=mesh) 94 | fixed_effects_analysis(subject, mesh=mesh) 95 | 96 | 97 | if __name__ == '__main__': 98 | """ 99 | protocols = ['stanford3'] 100 | protocols += ['screening', 'rsvp-language', 'hcp1', 'hcp2', 'archi'] 101 | protocols += ['preference', 'mtt1', 'mtt2', 'tom', 'self', 102 | 'retino'] 103 | protocols += ['mathlang', 'enumeration', 'lyon1', 'lyon2'] 104 | protocols = ['stanford1', 'stanford2, 'stanford3'] 105 | protocols = ['preference', 'audio1', 'audio2'] 106 | protocols = ['enumeration', 'biological_motion', 'reward', 'mathlang', 107 | 'navigation', 'search'] 108 | protocols = ['color', 'aomic'] 'abstraction' 'lyon1', 'aomic' 'optimism' 109 | """ 110 | protocols = ['retino'] # 'mario1', 'mdtb', 'color', 'mario2', 'leuven' 'scene' 111 | for protocol in protocols: 112 | jobfile = 'ini_files/IBC_preproc_%s.ini' % protocol 113 | acquisition = protocol 114 | if protocol == 'retino': 115 | acquisition = 'clips4' 116 | subject_session = sorted(get_subject_session(acquisition)) 117 | for mesh in ['fsaverage5', 'individual', 'fsaverage7']: 118 | Parallel(n_jobs=1)( 119 | delayed(run_subject_surface_glm)( 120 | jobfile, subject, session, protocol, mesh=mesh) 121 | for (subject, session) in subject_session) 122 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | from setuptools import setup 4 | 5 | setup(name='ibc_public', 6 | version='0.1', 7 | description='Public code for IBC data analysis', 8 | url='https://github.com/hbp-brain-charting/public_analysis_code', 9 | author='Bertrand Thirion', 10 | author_email='bertrand.thirion@inria.fr', 11 | packages=['ibc_public'], 12 | #package_data={'ibc_pulic': ['ibc_data/*.tsv']}, 13 | #include_package_data=True, 14 | ) 15 | --------------------------------------------------------------------------------