├── LICENSE ├── README.md ├── requirements.txt └── src ├── data_processing ├── __main__.py ├── config │ ├── ColorPrompt.py │ ├── ColorPrompt.pyc │ ├── __init__.py │ ├── __init__.pyc │ ├── __pycache__ │ │ ├── ColorPrompt.cpython-36.pyc │ │ ├── __init__.cpython-36.pyc │ │ ├── config_init.cpython-36.pyc │ │ └── config_read.cpython-36.pyc │ ├── config_init.py │ ├── config_init.pyc │ ├── config_read.py │ └── config_read.pyc ├── interface │ ├── __init__.py │ ├── __init__.pyc │ ├── __pycache__ │ │ ├── __init__.cpython-36.pyc │ │ └── inline_print.cpython-36.pyc │ ├── inline_print.py │ └── inline_print.pyc ├── io_data │ ├── __init__.py │ ├── __init__.pyc │ ├── __pycache__ │ │ ├── __init__.cpython-36.pyc │ │ ├── data_acces_file.cpython-36.pyc │ │ └── xml_acces_file.cpython-36.pyc │ ├── data_acces_file.py │ ├── data_acces_file.pyc │ ├── xml_acces_file.py │ └── xml_acces_file.pyc ├── models │ ├── HippModel.py │ ├── HippModel.pyc │ ├── Subject.py │ ├── Subject.pyc │ ├── __init__.py │ ├── __init__.pyc │ └── __pycache__ │ │ ├── HippModel.cpython-36.pyc │ │ ├── Subject.cpython-36.pyc │ │ └── __init__.cpython-36.pyc ├── plot │ ├── __init__.py │ ├── __init__.pyc │ ├── __main__.py │ ├── __pycache__ │ │ ├── __init__.cpython-36.pyc │ │ ├── plot_3D_ROI.cpython-36.pyc │ │ └── plot_data.cpython-36.pyc │ ├── plot_3D_ROI.py │ ├── plot_data.py │ └── plot_data.pyc └── services │ ├── __init__.py │ ├── __init__.pyc │ ├── __pycache__ │ ├── __init__.cpython-36.pyc │ ├── generate_sample_sets.cpython-36.pyc │ ├── process.cpython-36.pyc │ └── tools.cpython-36.pyc │ ├── generate_sample_sets.py │ ├── generate_sample_sets.pyc │ ├── process.py │ ├── process.pyc │ ├── tools.py │ └── tools.pyc ├── pytorch-project ├── 3Dconv.tgz ├── config │ ├── __init__.py │ └── config_init.py ├── dataloader │ ├── __init__.py │ └── data_loader.py ├── datasets │ ├── __init__.py │ └── datasets.py ├── graphs │ └── __init__.py ├── logs │ └── __init__.py ├── main │ ├── __init__.py │ ├── __main2__.py │ └── __main__.py ├── models │ ├── __init__.py │ └── __pycache__ │ │ └── __init__.cpython-36.pyc ├── networks │ ├── 2DEpsilon.py │ ├── MyModel.py │ ├── SiameseNet.py │ ├── __init__.py │ ├── mnist.py │ └── network.py ├── note.txt ├── services │ └── __init__.py ├── siamese │ └── siamese_baseline.py ├── single │ ├── single3D.py │ └── single3D_backup.py ├── test │ ├── test.py │ └── test2.py └── tools │ ├── __init__.py │ ├── dataset.py │ └── maap.py └── pytorch-template ├── config ├── ColorPrompt.py ├── __init__.py └── __pycache__ │ ├── ColorPrompt.cpython-36.pyc │ └── __init__.cpython-36.pyc ├── data_loader ├── __init__.py ├── __pycache__ │ ├── __init__.cpython-36.pyc │ └── data_loader.cpython-36.pyc └── data_loader.py ├── models ├── 28size │ ├── 2D_net │ │ └── 2D_epsilon.py │ ├── 3D_net │ │ ├── 3D_SE-Net_from_2D_epsilon.py │ │ ├── 3D_SENet.py │ │ ├── 3D_single_baseline.py │ │ └── 3D_single_v2.py │ └── 3D_two_stream │ │ └── 3D_baseline.py ├── 32size │ └── 3D_SE │ │ └── __3d_senet__.py └── 42size │ └── __2d_epsilon_baseline.py ├── old ├── __main__.py ├── baseline │ ├── Network_num_1.py │ └── Network_num_2.py ├── check_devices.py ├── models │ ├── __3d_senet__.py │ ├── __init__.py │ ├── baseline_3D_single.py │ └── senet │ │ ├── __init__.py │ │ └── senet_block.py ├── senet │ ├── 3D_SE_net_1.py │ └── squeeze_and_excitation.py └── test.py └── tools └── __init__.py /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 Karim ADERGHAL 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | ## ADNI_Data_processing 3 | 4 | The Alzheimer's Disease Neuroimaging Initiative (ADNI) unites researchers with study data as they work to define the progression of Alzheimer's disease (AD). This repository includes the preprocessing of the data to extract 2D and 3D data with a specific prepration and pytroch based-projet to work and feed Neural Network for Alzheimer's Disease Classification problem. 5 | 6 | ## Repository organization 7 | 8 | * dataset: external URl to download the dataset used in the project 9 | * sources: src folder contains two subfolder : 10 | > - code source: with python language to prepare the datasets. 11 | > - pytorch project: for creating architectures and training parameters 12 | 13 | ## Requirements 14 | 15 | * Linux Operating system (Ubuntu Distribution : 18.04 LTS) 16 | * Python >= (3.6) 17 | * python libraries: 18 | 19 | ## Install python virtual environment (optionnal) 20 | 21 | ```bash 22 | wget https://bootstrap.pypa.io/get-pip.py 23 | sudo python get-pip.py 24 | sudo python3 get-pip.py 25 | sudo pip install virtualenv virtualenvwrapper 26 | sudo rm -rf ~/.cache/pip get-pip.py 27 | nano .bashrc 28 | ``` 29 | 30 | > add these lines to the bashrc file [by karim for python env] 31 | 32 | ```bash 33 | export WORKON_HOME=$HOME/.virtualenvs 34 | export VIRTUALENVWRAPPER_PYTHON=/usr/bin/python3 35 | source /usr/local/bin/virtualenvwrapper.sh 36 | ``` 37 | 38 | ```bash 39 | source .bashrc 40 | mkvirtualenv ADNI_dl4cv -p python3 41 | workon ADNI_dl4cv 42 | pip3 list 43 | ``` 44 | 45 | ### Install python libs 46 | 47 | > 1. for upgrading the setuptools & pip 48 | ```bash 49 | pip3 install --upgrade setuptools pip3 50 | ``` 51 | 52 | > 2. used to plot graphes and images 53 | ```bash 54 | pip3 install matplotlib 55 | ``` 56 | 57 | > 3. to check python syntaxe ... 58 | ```bash 59 | pip3 install pylint pyparsing six 60 | ``` 61 | 62 | > 4. nibabel (for NIfTI Medical images) 63 | ```bash 64 | > pip3 install nibabel 65 | ``` 66 | 67 | > 5. for scientific data structure (fast matrix & array ) 68 | ```bash 69 | > pip3 install numpy 70 | > pip3 install scipy 71 | ``` 72 | 73 | > 6. work on images 74 | ```bash 75 | > pip3 install pillow 76 | ``` 77 | 78 | ### Deep learning Framework 79 | * [PyTorch](http://pytorch.org/) 80 | ```bash 81 | pip3 install pytorch # for cpu installation (see Official website) 82 | ``` 83 | 84 | pytorch website : [https://pytorch.org/](https://pytorch.org/) 85 | 86 | 87 | 88 | 89 | ## List of Acronyms: 90 | 91 | This table shows the acronyms used in the project. 92 | 93 | | Abvs.| meaning | 94 | |:---|:---:| 95 | | AD | Alzheimer's Disease | 96 | | MCI | Mild Co,gnitive Impairment | 97 | | NC | Normal Control | 98 | | MMSE | Mini-Mental State Examination | 99 | | sMRI | Structural Magnitic Imaging | 100 | | DTI | Diffusion Tensor Imaging | 101 | | HIPP | Hippocampus | 102 | | PPC | Posterior Parietal Cortex | 103 | 104 | ### Author Information 105 | 106 | > ADERGHAL KARIM 2020 107 | 108 | LaBRI - University of Bordeaux - Bordeaux/France 109 | 110 | LabSIV - University Ibn Zohr - Agadir/Morocco 111 | 112 | email: {aderghal}.{karim}@gmail.com 113 | 114 | email: {karim}.{aderghal}@labri.fr 115 | 116 | page: [http://www.labri.fr/perso/kadergha](http://www.labri.fr/perso/kadergha) 117 | 118 | ## Citation 119 | 120 | 121 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | astroid==2.3.3 2 | cycler==0.10.0 3 | isort==4.3.21 4 | kiwisolver==1.1.0 5 | lazy-object-proxy==1.4.3 6 | matplotlib==3.1.1 7 | mccabe==0.6.1 8 | nibabel==2.5.1 9 | numpy==1.17.4 10 | Pillow==6.2.1 11 | pylint==2.4.3 12 | pyparsing==2.4.5 13 | python-dateutil==2.8.1 14 | scipy==1.3.2 15 | six==1.13.0 16 | torch==1.3.1+cpu 17 | torchvision==0.4.2+cpu 18 | typed-ast==1.4.0 19 | wrapt==1.11.2 20 | -------------------------------------------------------------------------------- /src/data_processing/__main__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | 3 | import sys 4 | import interface.inline_print as iprint 5 | import services.tools as tls 6 | import services.generate_sample_sets as gss 7 | import io_data.data_acces_file as daf 8 | import config.config_read as rsd 9 | import config.config_init as cfg 10 | import config.ColorPrompt as CP 11 | import services.process as prc 12 | import time 13 | 14 | #------------------------------------------------------------------------------------------ 15 | # function::__main__ :: 16 | #------------------------------------------------------------------------------------------ 17 | def main(): 18 | print('\n\n' + CP.style.BRIGHT + CP.fg.BLUE +'==========================================================================================================') 19 | print('------ $ ADNI Data-set Preproceccing for Alzheimer Diseases. $ ------ ') 20 | print('==========================================================================================================\n' + CP.fg.WHITE + CP.style.BRIGHT) 21 | 22 | print(CP.style.BRIGHT + CP.fg.RED + "\n|------------------------------------------|") 23 | print('| >> Configuration details .. << |') 24 | print("|------------------------------------------|\n" + CP.fg.WHITE + CP.style.RESET_ALL) 25 | 26 | # Display Data Parameters 27 | iprint.print_author_info() 28 | iprint.print_global_params() 29 | iprint.print_adni_datasets_path() 30 | iprint.print_augmentation_params() 31 | iprint.print_split_params() 32 | iprint.print_roi_params_hippocampus() 33 | iprint.print_roi_params_posterior_cc() 34 | iprint.print_label_binary_codes() 35 | data_params = rsd.get_all_data_params() 36 | 37 | # original dimensions for nii file (full brain) 38 | iprint.print_roi_params_global() 39 | # compute dimensions 40 | HIPP_l, HIPP_r = tls.get_dimensions_cubes_HIPP(data_params) 41 | PPC_l, PPC_r = tls.get_dimensions_cubes_PPC(data_params) 42 | iprint.print_dimensions_cubes_HIPP(HIPP_l, HIPP_r ) 43 | iprint.print_dimensions_cubes_PPC(PPC_l, PPC_r) 44 | 45 | exit_input = input('\n' + CP.fg.YELLOW + 'To change the parameters. exit and update the \"config.py\" file \nto continue press yes (Y/n) ? : ' + CP.fg.RESET) 46 | exit_bool = False if str(exit_input).lower() == 'y' else True 47 | if exit_bool: 48 | print(CP.style.BRIGHT + CP.fg.RED + '\n Exiting ...! ;) \n' + CP.fg.RESET + CP.style.RESET_ALL) 49 | sys.exit(1) 50 | print('\n\n') 51 | 52 | #-------------------------------------------------------------------- 53 | # Start execution Start Timing 54 | #-------------------------------------------------------------------- 55 | start_time = time.time() 56 | localtime = time.localtime(time.time()) 57 | print(CP.style.BRIGHT + CP.fg.BLUE + "==========================================================================================================") 58 | print('= The data-set will be splitted into Train & Validation & Test folders. ') 59 | print('= Start Time : {} '.format(time.strftime('%Y-%m-%d %H:%M:%S', localtime))) 60 | print("==========================================================================================================\n" + CP.fg.WHITE + CP.style.RESET_ALL) 61 | 62 | #-------------------------------------------------------------------- 63 | # [0] : Computes demoghraphie description Table 64 | # 65 | #-------------------------------------------------------------------- 66 | # print(CP.style.BRIGHT + CP.fg.RED + '>$ Computing of Demography description table. \n' + CP.fg.RESET + CP.style.RESET_ALL) 67 | # time.sleep(1) 68 | # data_desc = prc.compute_demography_description(data_params) 69 | # iprint.print_datasetDescription(data_desc) 70 | # daf.save_desc_table(data_params, data_desc) 71 | # time.sleep(1) 72 | # #-------------------------------------------------------------------- 73 | # [1] : save parameters from the config file to re-used it 74 | # 75 | #-------------------------------------------------------------------- 76 | daf.save_data_params(data_params) 77 | 78 | #-------------------------------------------------------------------- 79 | # [2] : generate lists 80 | # 81 | #-------------------------------------------------------------------- 82 | gss.generate_lists(data_params) 83 | 84 | #-------------------------------------------------------------------- 85 | # [3] : generate data: 86 | # -> by using the generated lists in the step before [2] 87 | #-------------------------------------------------------------------- 88 | gss.generate_data_from_lists(data_params) 89 | 90 | #-------------------------------------------------------------------- 91 | # Execution finished 92 | #-------------------------------------------------------------------- 93 | total_time = round((time.time() - start_time)) 94 | print(CP.style.BRIGHT + CP.fg.BLUE + "==========================================================================================================") 95 | print('= Finished Time : {} '.format(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())))) 96 | print('= Execution Time : {}s / [{}min] '.format(total_time, round(total_time/60, 2))) 97 | print("=========================================================================================================="+ CP.fg.WHITE + CP.style.RESET_ALL) 98 | 99 | #------------------------------------------------------------------------------------------ 100 | # Start ->>>->>> 101 | #------------------------------------------------------------------------------------------ 102 | if __name__ == '__main__': 103 | main() 104 | 105 | 106 | -------------------------------------------------------------------------------- /src/data_processing/config/ColorPrompt.py: -------------------------------------------------------------------------------- 1 | #------------------------------------------------------------------------------------------ 2 | # Color Config: For Terminal output color 3 | #------------------------------------------------------------------------------------------ 4 | 5 | 6 | class fg: 7 | BLACK = '\033[30m' 8 | RED = '\033[31m' 9 | GREEN = '\033[32m' 10 | YELLOW = '\033[33m' 11 | BLUE = '\033[34m' 12 | MAGENTA = '\033[35m' 13 | CYAN = '\033[36m' 14 | WHITE = '\033[37m' 15 | RESET = '\033[39m' 16 | 17 | class bg: 18 | BLACK = '\033[40m' 19 | RED = '\033[41m' 20 | GREEN = '\033[42m' 21 | YELLOW = '\033[43m' 22 | BLUE = '\033[44m' 23 | MAGENTA = '\033[45m' 24 | CYAN = '\033[46m' 25 | WHITE = '\033[47m' 26 | RESET = '\033[49m' 27 | 28 | class style: 29 | BRIGHT = '\033[1m' 30 | DIM = '\033[2m' 31 | NORMAL = '\033[22m' 32 | RESET_ALL = '\033[0m' -------------------------------------------------------------------------------- /src/data_processing/config/ColorPrompt.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaderghal/ADNI_Data_processing/454462d3913d77e3bc4de2b9725b456301c7b351/src/data_processing/config/ColorPrompt.pyc -------------------------------------------------------------------------------- /src/data_processing/config/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaderghal/ADNI_Data_processing/454462d3913d77e3bc4de2b9725b456301c7b351/src/data_processing/config/__init__.py -------------------------------------------------------------------------------- /src/data_processing/config/__init__.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaderghal/ADNI_Data_processing/454462d3913d77e3bc4de2b9725b456301c7b351/src/data_processing/config/__init__.pyc -------------------------------------------------------------------------------- /src/data_processing/config/__pycache__/ColorPrompt.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaderghal/ADNI_Data_processing/454462d3913d77e3bc4de2b9725b456301c7b351/src/data_processing/config/__pycache__/ColorPrompt.cpython-36.pyc -------------------------------------------------------------------------------- /src/data_processing/config/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaderghal/ADNI_Data_processing/454462d3913d77e3bc4de2b9725b456301c7b351/src/data_processing/config/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /src/data_processing/config/__pycache__/config_init.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaderghal/ADNI_Data_processing/454462d3913d77e3bc4de2b9725b456301c7b351/src/data_processing/config/__pycache__/config_init.cpython-36.pyc -------------------------------------------------------------------------------- /src/data_processing/config/__pycache__/config_read.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaderghal/ADNI_Data_processing/454462d3913d77e3bc4de2b9725b456301c7b351/src/data_processing/config/__pycache__/config_read.cpython-36.pyc -------------------------------------------------------------------------------- /src/data_processing/config/config_init.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | 3 | # ==================================================== 4 | # Author: Karim ADERGHAL 5 | # Year: 2019 6 | # Labs: LaBRI & LabSIV 7 | # for ADNI Dataset : ADNI-1 baseline SMRI 8 | # screening selected dataset 9 | # URL: http://adni.loni.usc.edu/ 10 | # ==================================================== 11 | 12 | 13 | #------------------------------------------------------------------------------------------ 14 | # Debuging & Time Zone 15 | #------------------------------------------------------------------------------------------ 16 | DEBUG = False 17 | TIMEZONE = 'France/Bordeaux' 18 | 19 | #------------------------------------------------------------------------------------------ 20 | # Author Informations 21 | #------------------------------------------------------------------------------------------ 22 | AUTHOR_INFO = { 23 | 'author': 'Karim ADERGHAL', 24 | 'name': 'ALZ-ADNI PCS', 25 | 'version': '1.2', 26 | 'year': '2019', 27 | 'description': 'Data Extracting scripts for CNN Alzheimer\'s Disease Classification', 28 | 'url': 'http://github.com/kaderghal', 29 | 'email': 'aderghal.karim@gmail.com', 30 | 'university': 'University of Bordeaux (Bordeaux)/ University IBN Zohr (Agadir)', 31 | 'lab': 'LaBRI & LabSIV' 32 | } 33 | 34 | #------------------------------------------------------------------------------------------ 35 | # Root path to local workspace (local Machine) 36 | #------------------------------------------------------------------------------------------ 37 | ROOT_PATH_LOCAL_MACHINE = { 38 | # 'root_machine': '/home/karim/workspace/ADNI_workspace' # HP machine 39 | 'root_machine':'/home/kadergha/ADERGHAL/ADNI_workspace' # Aivcalc4 server 40 | 41 | } 42 | 43 | #------------------------------------------------------------------------------------------ 44 | # Global parameters: 45 | # -> Path to the used Deep learning Framework 46 | # -> Path to the output resutls 47 | #------------------------------------------------------------------------------------------ 48 | GLOBAL_PARAMS = { 49 | 'pytorch_root': ROOT_PATH_LOCAL_MACHINE['root_machine'] + '/path/to/pythorch/', 50 | 'adni_data_src': ROOT_PATH_LOCAL_MACHINE['root_machine'] + '/results/ADNI1_src/', 51 | 'adni_data_des': ROOT_PATH_LOCAL_MACHINE['root_machine'] + '/results/ADNI1_des/' 52 | } 53 | 54 | #------------------------------------------------------------------------------------------ 55 | # Dataset Folders : 56 | # -> brain-data: contains "nii" files (Image for brain) 57 | # -> target-data: contains txt file that gives classes for each subject 58 | # -> meta-data: contains XML files for each subject (used to extract meta-data like : age, sex, mmse etc ...) 59 | #------------------------------------------------------------------------------------------ 60 | ADNI_DATASET = { 61 | 'adni_1_brain_data': ROOT_PATH_LOCAL_MACHINE['root_machine'] + '/ADNI1/brain-data', 62 | 'adni_1_target_data': ROOT_PATH_LOCAL_MACHINE['root_machine'] + '/ADNI1/target-data', 63 | 'adni_1_meta_data': ROOT_PATH_LOCAL_MACHINE['root_machine'] + '/ADNI1/meta-data', 64 | } 65 | 66 | #------------------------------------------------------------------------------------------ 67 | # Classes for Subjects 68 | #------------------------------------------------------------------------------------------ 69 | ADNI_CLASSES = { 70 | 'adni_1_classes': 'ADNI_1_classes.txt' 71 | } 72 | 73 | #------------------------------------------------------------------------------------------ 74 | # Coordinates of the ROIs (Hippocampus region, PPC ... ), selected by using the AAL Atlas. 75 | # -> HIPP: we have two cubes (3D Volume) that contain the Left and Right Hippocampus 76 | # !: Note that the default dimension of the cubes are (28*28*28) 77 | # you can change it by adding padding parameter as follow 28 + (x*2) where x is the pad to add. 78 | # For example if padding_size (x = 3) then the cubes size become : 34 = 28 + (3*2) 79 | # -> PPC: we will add it soon !!!! 80 | #------------------------------------------------------------------------------------------ 81 | ROI_PARAMS_GLOBAL = { 82 | 'ROI_selection': 0, # HIPP: 0, PPC: 1, BOTH: 2 83 | 'ROI_list': {0 : 'HIPP', 1 : 'PPC', 2 :'BOTH'}, 84 | '3D_or_2D': '3D', # extract data 85 | 'padding_size': 0, # => 28 + (x*2) 86 | 'neighbors': 1, # number of neighbors of the median slice if 2D is selected 87 | 'brain_dims': [121, 145, 121] # full brain dimensions (x, y, z) 88 | } 89 | 90 | ROI_PARAMS_HIPP = { 91 | 'hipp_left': (30, 58, 58, 86, 31, 59), # min_x,max_x ; min_y,max_y ; min_z,max_z 92 | 'hipp_right': (64, 92, 58, 86, 31, 59), # calculation model : [coordinates - (index + shift, padding)] 93 | # 'hipp_left': (40, 82 , 82, 124, 40, 82), 94 | # 'hipp_right': (98, 140, 82, 124, 40, 82), 95 | 96 | } 97 | 98 | ROI_PARAMS_PPC = { # to calculate from Atlas AAL 99 | 'ppc_left': (30, 58, 58, 86, 31, 59), # min_x,max_x ; min_y,max_y ; min_z,max_z 100 | 'ppc_right': (64, 92, 58, 86, 31, 59), # calculation model : [coordinates - (index + shift, padding)] 101 | } 102 | 103 | #------------------------------------------------------------------------------------------ 104 | # Augmentation params: 105 | # -> factor F: is a multiplication number to augment data 106 | # in our case : we have 3 classes so: 107 | # (AD, MCI, NC) ==>> after augmentation ==>> (card(AD) = x, card(MCI) = x, card(NC) = x // where x = F*max(card(AD), card(MCI), card(NC))) 108 | # -> shift (max shift): its mean that we can generate numbers a, b, and c in [-s,s] 109 | # to make translation / (a, b, c) 110 | # -> sigma (max sigma): parameter for gaussian blur 111 | # -> (we can use also rotation, flip to augment data) 112 | #------------------------------------------------------------------------------------------ 113 | AUGMENTATION_PARAMS = { 114 | 'augm_test': False, #False, #True, # augment Test set 115 | 'shift': 2, # Max Shift 116 | 'sigma': 0.4, # Max Sigma for Gaussian Blur 117 | 'factor': 10, # Augmentation Factor 118 | 'flip': True #False, #True # excute the flip operation for cubes 119 | } 120 | 121 | #------------------------------------------------------------------------------------------ 122 | # Info to split database to Train, Valid, and Test folders 123 | # -> "static_split" is True : we use selected number to perform split operation 124 | # -> "static_split" is False : we compute 20% for Valid folder (NB: we keep same number for Test) 125 | #------------------------------------------------------------------------------------------ 126 | SPLIT_SET_PARAMS = { 127 | 'static_split': False, # if false we comptue numbers with the % 128 | 'select_valid': {'AD': 30, 'MCI': 72, 'NC': 38}, # %20 129 | 'select_test': {'AD': 40, 'MCI': 40, 'NC': 40} # almost %20 130 | } 131 | 132 | #------------------------------------------------------------------------------------------ 133 | # Labels Naming system 134 | #------------------------------------------------------------------------------------------ 135 | LABELS_CODES = { 136 | # 2-way classification 137 | 'AD-NC': {'AD': 0, 'NC': 1}, 138 | 'AD-MCI': {'AD': 0, 'MCI': 1}, 139 | 'MCI-NC': {'MCI': 0, 'NC': 1}, 140 | # 3-way classification 141 | 'AD-MCI-NC': {'AD': 0, 'MCI': 1, 'NC': 2} 142 | } 143 | 144 | -------------------------------------------------------------------------------- /src/data_processing/config/config_init.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaderghal/ADNI_Data_processing/454462d3913d77e3bc4de2b9725b456301c7b351/src/data_processing/config/config_init.pyc -------------------------------------------------------------------------------- /src/data_processing/config/config_read.py: -------------------------------------------------------------------------------- 1 | import config.config_init as cfg 2 | 3 | #------------------------------------------------------------------------------------------ 4 | # Config read: read data from config file to dict structure 5 | #------------------------------------------------------------------------------------------ 6 | 7 | def get_author_info(): 8 | tempo_dict = {} 9 | tempo_dict['name'] = str(cfg.AUTHOR_INFO['name']) 10 | tempo_dict['version'] = str(cfg.AUTHOR_INFO['version']) 11 | tempo_dict['year'] = str(cfg.AUTHOR_INFO['year']) 12 | tempo_dict['description'] = str(cfg.AUTHOR_INFO['description']) 13 | tempo_dict['url'] = str(cfg.AUTHOR_INFO['url']) 14 | tempo_dict['author'] = str(cfg.AUTHOR_INFO['author']) 15 | tempo_dict['email'] = str(cfg.AUTHOR_INFO['email']) 16 | tempo_dict['lab'] = str(cfg.AUTHOR_INFO['lab']) 17 | return tempo_dict 18 | 19 | def get_global_params(): 20 | tempo_dict = {} 21 | tempo_dict['pytorch_root'] = str(cfg.GLOBAL_PARAMS['pytorch_root']) 22 | tempo_dict['adni_data_src'] = str(cfg.GLOBAL_PARAMS['adni_data_src']) 23 | tempo_dict['adni_data_des'] = str(cfg.GLOBAL_PARAMS['adni_data_des']) 24 | return tempo_dict 25 | 26 | def get_adni_datasets(): 27 | tempo_dict = {} 28 | tempo_dict['adni_1_brain_data'] = str(cfg.ADNI_DATASET['adni_1_brain_data']) 29 | tempo_dict['adni_1_target_data'] = str(cfg.ADNI_DATASET['adni_1_target_data']) 30 | tempo_dict['adni_1_meta_data'] = str(cfg.ADNI_DATASET['adni_1_meta_data']) 31 | return tempo_dict 32 | 33 | def get_classes_datasets(): 34 | tempo_dict = {} 35 | tempo_dict['adni_1_classes'] = str(cfg.ADNI_DATASET['adni_1_target_data']) + '/' + str(cfg.ADNI_CLASSES['adni_1_classes']) 36 | return tempo_dict 37 | 38 | def get_roi_params_global(): 39 | tempo_dict = {} 40 | tempo_dict['ROI_selection'] = int(cfg.ROI_PARAMS_GLOBAL['ROI_selection']) 41 | tempo_dict['ROI_list'] = cfg.ROI_PARAMS_GLOBAL['ROI_list'] 42 | tempo_dict['3D_or_2D'] = cfg.ROI_PARAMS_GLOBAL['3D_or_2D'] 43 | tempo_dict['padding_size'] = int(cfg.ROI_PARAMS_GLOBAL['padding_size']) 44 | tempo_dict['neighbors'] = int(cfg.ROI_PARAMS_GLOBAL['neighbors']) 45 | tempo_dict['brain_dims'] = cfg.ROI_PARAMS_GLOBAL['brain_dims'] 46 | return tempo_dict 47 | 48 | def get_roi_params_hippocampus(): 49 | tempo_dict = {} 50 | tempo_dict['hipp_left'] = cfg.ROI_PARAMS_HIPP['hipp_left'] 51 | tempo_dict['hipp_right'] = cfg.ROI_PARAMS_HIPP['hipp_right'] 52 | return tempo_dict 53 | 54 | def get_roi_params_posterior_cc(): 55 | tempo_dict = {} 56 | tempo_dict['ppc_left'] = cfg.ROI_PARAMS_PPC['ppc_left'] 57 | tempo_dict['ppc_right'] = cfg.ROI_PARAMS_PPC['ppc_right'] 58 | return tempo_dict 59 | 60 | def get_augmentation_params(): 61 | tempo_dict = {} 62 | tempo_dict['augm_test'] = cfg.AUGMENTATION_PARAMS['augm_test'] 63 | tempo_dict['shift'] = cfg.AUGMENTATION_PARAMS['shift'] 64 | tempo_dict['sigma'] = cfg.AUGMENTATION_PARAMS['sigma'] 65 | tempo_dict['factor'] = cfg.AUGMENTATION_PARAMS['factor'] 66 | tempo_dict['flip'] = cfg.AUGMENTATION_PARAMS['flip'] 67 | return tempo_dict 68 | 69 | def get_split_params(): 70 | tempo_dict = {} 71 | tempo_dict['static_split'] = cfg.SPLIT_SET_PARAMS['static_split'] 72 | tempo_dict['select_valid'] = cfg.SPLIT_SET_PARAMS['select_valid'] 73 | tempo_dict['select_test'] = cfg.SPLIT_SET_PARAMS['select_test'] 74 | return tempo_dict 75 | 76 | def get_label_binary_codes(): 77 | tempo_dict = {} 78 | tempo_dict['AD-NC'] = cfg.LABELS_CODES['AD-NC'] 79 | tempo_dict['AD-MCI'] = cfg.LABELS_CODES['AD-MCI'] 80 | tempo_dict['MCI-NC'] = cfg.LABELS_CODES['MCI-NC'] 81 | tempo_dict['AD-MCI-NC'] = cfg.LABELS_CODES['AD-MCI-NC'] 82 | return tempo_dict 83 | 84 | # get all data to dict 85 | def get_all_data_params(): 86 | lst_all = {} 87 | for item in get_global_params(): 88 | lst_all[item] = get_global_params()[item] 89 | 90 | for item in get_adni_datasets(): 91 | lst_all[item] = get_adni_datasets()[item] 92 | 93 | for item in get_classes_datasets(): 94 | lst_all[item] = get_classes_datasets()[item] 95 | 96 | for item in get_roi_params_global(): 97 | lst_all[item] = get_roi_params_global()[item] 98 | 99 | for item in get_roi_params_hippocampus(): 100 | lst_all[item] = get_roi_params_hippocampus()[item] 101 | 102 | for item in get_roi_params_posterior_cc(): 103 | lst_all[item] = get_roi_params_posterior_cc()[item] 104 | 105 | for item in get_augmentation_params(): 106 | lst_all[item] = get_augmentation_params()[item] 107 | 108 | for item in get_label_binary_codes(): 109 | lst_all[item] = get_label_binary_codes()[item] 110 | 111 | for item in get_split_params(): 112 | lst_all[item] = get_split_params()[item] 113 | 114 | return lst_all -------------------------------------------------------------------------------- /src/data_processing/config/config_read.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaderghal/ADNI_Data_processing/454462d3913d77e3bc4de2b9725b456301c7b351/src/data_processing/config/config_read.pyc -------------------------------------------------------------------------------- /src/data_processing/interface/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaderghal/ADNI_Data_processing/454462d3913d77e3bc4de2b9725b456301c7b351/src/data_processing/interface/__init__.py -------------------------------------------------------------------------------- /src/data_processing/interface/__init__.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaderghal/ADNI_Data_processing/454462d3913d77e3bc4de2b9725b456301c7b351/src/data_processing/interface/__init__.pyc -------------------------------------------------------------------------------- /src/data_processing/interface/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaderghal/ADNI_Data_processing/454462d3913d77e3bc4de2b9725b456301c7b351/src/data_processing/interface/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /src/data_processing/interface/__pycache__/inline_print.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaderghal/ADNI_Data_processing/454462d3913d77e3bc4de2b9725b456301c7b351/src/data_processing/interface/__pycache__/inline_print.cpython-36.pyc -------------------------------------------------------------------------------- /src/data_processing/interface/inline_print.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | 3 | import config.config_read as rsd 4 | import config.ColorPrompt as CP 5 | 6 | #------------------------------------------------------------------------------------------ 7 | # Display Data: to print data (Terminal) 8 | #------------------------------------------------------------------------------------------ 9 | 10 | def print_author_info(): 11 | print(CP.style.BRIGHT + CP.fg.GREEN + "Author Information: " + CP.fg.RESET + CP.style.RESET_ALL) 12 | for k, v in rsd.get_author_info().items(): 13 | print('\t[' + k + ']: ' + str(v)) 14 | print ("\n") 15 | 16 | def print_global_params(): 17 | print(CP.style.BRIGHT + CP.fg.GREEN + "Global parameters: " + CP.fg.RESET + CP.style.RESET_ALL) 18 | for k, v in rsd.get_global_params().items(): 19 | print('\t[' + k + ']: ' + str(v)) 20 | print("\n") 21 | 22 | def print_adni_datasets_path(): 23 | print(CP.style.BRIGHT + CP.fg.GREEN + "Datasets Images: " + CP.fg.RESET + CP.style.RESET_ALL) 24 | for k, v in rsd.get_adni_datasets().items(): 25 | print('\t[' + k + ']: ' + str(v)) 26 | print("\n") 27 | 28 | def print_classes_datasets_path(): 29 | print(CP.style.BRIGHT + CP.fg.GREEN + "Classes Datasets Paths: " + CP.fg.RESET + CP.style.RESET_ALL) 30 | for k, v in rsd.get_classes_datasets().items(): 31 | print('\t[' + k + ']: ' + str(v)) 32 | print("\n") 33 | 34 | def print_augmentation_params(): 35 | print(CP.style.BRIGHT + CP.fg.GREEN + "Augmentation parameters: " + CP.fg.RESET + CP.style.RESET_ALL) 36 | for k, v in rsd.get_augmentation_params().items(): 37 | print('\t[' + k + ']: ' + str(v)) 38 | print("\n") 39 | 40 | def print_split_params(): 41 | print(CP.style.BRIGHT + CP.fg.GREEN + "Splitting dataset parameters: " + CP.fg.RESET + CP.style.RESET_ALL) 42 | for k, v in rsd.get_split_params().items(): 43 | print('\t[' + k + ']: ' + str(v)) 44 | print("\n") 45 | 46 | def print_roi_params_global(): 47 | print(CP.style.BRIGHT + CP.fg.GREEN + "Roi Global parameters: " + CP.fg.RESET + CP.style.RESET_ALL) 48 | for k, v in rsd.get_roi_params_global().items(): 49 | print('\t[' + k + ']: ' + str(v)) 50 | print("\n") 51 | 52 | def print_roi_params_hippocampus(): 53 | print(CP.style.BRIGHT + CP.fg.GREEN + "Roi Hippocampus parameters: " + CP.fg.RESET + CP.style.RESET_ALL) 54 | for k, v in rsd.get_roi_params_hippocampus().items(): 55 | print('\t[' + k + ']: ' + str(v)) 56 | print("\n") 57 | 58 | def print_roi_params_posterior_cc(): 59 | print(CP.style.BRIGHT + CP.fg.GREEN + "Roi Posterior CC parameters :" + CP.fg.RESET + CP.style.RESET_ALL) 60 | for k, v in rsd.get_roi_params_posterior_cc().items(): 61 | print('\t[' + k + ']: ' + str(v)) 62 | print("\n") 63 | 64 | def print_label_binary_codes(): 65 | print(CP.style.BRIGHT + CP.fg.GREEN + "Labels Binary Codes :" + CP.fg.RESET + CP.style.RESET_ALL) 66 | for k, v in rsd.get_label_binary_codes().items(): 67 | print('\t[' + k + ']: ' + str(v)) 68 | print("\n") 69 | 70 | def print_all_params_data(): 71 | print (CP.style.BRIGHT + CP.fg.GREEN + "All parameters Data :" + CP.fg.RESET + CP.style.RESET_ALL) 72 | for k, v in rsd.get_all_data_params().items(): 73 | print('\t[' + k + ']: ' + str(v)) 74 | print("\n") 75 | 76 | def print_all_params_data_v2(data): 77 | print(CP.style.BRIGHT + CP.fg.GREEN + "All parameters Data :" + CP.fg.RESET + CP.style.RESET_ALL) 78 | for k, v in data.items(): 79 | print('\t {} : {}'.format(k, v)) 80 | print("\n") 81 | 82 | def print_dimensions_cubes_HIPP(l, r): 83 | print(CP.style.BRIGHT + CP.fg.GREEN + "Hippocampus Cube (ROI) dimenssion after the extracting process :" + CP.fg.RESET + CP.style.RESET_ALL) 84 | print('\tHippocampus L : ({}, {}, {})'.format(l[1] - l[0], l[3] - l[2], l[5] - l[4])) 85 | print('\tHippocampus R : ({}, {}, {})'.format(r[1] - r[0], r[3] - r[2], r[5] - r[4])) 86 | print("\n") 87 | 88 | def print_dimensions_cubes_PPC(l, r): 89 | print(CP.style.BRIGHT + CP.fg.GREEN + "Posterior CC Cube (ROI) dimenssion after the extracting process :" + CP.fg.RESET + CP.style.RESET_ALL) 90 | print('\tPosterior_CC L : ({}, {}, {})'.format(l[1] - l[0], l[3] - l[2], l[5] - l[4])) 91 | print('\tPosterior_CC R : ({}, {}, {})'.format(r[1] - r[0], r[3] - r[2], r[5] - r[4])) 92 | print("\n") 93 | 94 | def print_adni_desc(adni1): 95 | print("\t------------------------------------------------------") 96 | print("\t| ADNI Datasets |") 97 | print("\t------------------------------------------------------") 98 | print("\t---------- AD | MCI | NC ------") 99 | print("\t------------------------------------------------------") 100 | print("\t| ADNI 1 | {} | {} | {} ------".format(len(adni1[0]), len(adni1[1]), len(adni1[2]))) 101 | print("\t------------------------------------------------------") 102 | 103 | def print_augmentation_table(data): 104 | print(CP.style.BRIGHT + CP.fg.RED + "----------------------------------------------------------------------------------") 105 | print("| Augmentation description ") 106 | print("----------------------------------------------------------------------------------") 107 | print("| | AD | MCI | NC | ") 108 | print("----------------------------------------------------------------------------------") 109 | print("| Train | {0} -> ({3}) | {1} -> ({3}) | {2} -> ({3}) | {4} -> ({5}) ".format(data[0][0], 110 | data[0][1], 111 | data[0][2], 112 | data[0][3], 113 | data[0][0] + data[0][1] + data[0][2], 114 | data[0][3]*3)) 115 | print("----------------------------------------------------------------------------------") 116 | print("| Valid | {0} -> ({3}) | {1} -> ({3}) | {2} -> ({3}) | {4} -> ({5}) ".format(data[1][0], 117 | data[1][1], 118 | data[1][2], 119 | data[1][3], 120 | data[1][0] + data[1][1] + data[1][2], 121 | data[1][3]*3 )) 122 | print("----------------------------------------------------------------------------------") 123 | print("| Test | {0} -> ({3}) | {1} -> ({3}) | {2} -> ({3}) | {4} -> ({5}) ".format(data[2][0], 124 | data[2][1], 125 | data[2][2], 126 | data[2][3], 127 | data[2][0] + data[2][1] + data[2][2], 128 | data[2][3]*3)) 129 | print("----------------------------------------------------------------------------------") 130 | print("| | {0} -> ({3}) | {1} -> ({3}) | {2} -> ({3}) | {4} -> ({5}) ".format( 131 | data[0][0] + data[1][0] + data[2][0], 132 | data[0][1] + data[1][1] + data[2][1], 133 | data[0][2] + data[1][2] + data[2][2], 134 | data[0][3] + data[1][3] + data[2][3], 135 | (data[0][0] + data[1][0] + data[2][0] + 136 | data[0][1] + data[1][1] + data[2][1] + 137 | data[0][2] + data[1][2] + data[2][2]), 138 | (data[0][3] + data[1][3] + data[2][3])*3, 139 | )) 140 | print("----------------------------------------------------------------------------------" + CP.fg.RESET + CP.style.RESET_ALL) 141 | 142 | def print_datasetDescription(data): 143 | print(CP.style.BRIGHT + CP.fg.CYAN + "----------------------------------------------------------------------------------------------------------") 144 | print("| ADNI-1 description |") 145 | print("----------------------------------------------------------------------------------------------------------") 146 | print("| #Subject | Sex (F/M) | Age [min, max]/mean(std) | MMSE [min, max]mean/std |") 147 | print("----------------------------------------------------------------------------------------------------------") 148 | print("| AD | {} | {} | {} | {} |".format(data[0][1], data[0][2], data[0][3], data[0][4])) 149 | print("----------------------------------------------------------------------------------------------------------") 150 | print("| MCI | {} | {} | {} | {} |".format(data[1][1], data[1][2], data[1][3], data[1][4])) 151 | print("----------------------------------------------------------------------------------------------------------") 152 | print("| NC | {} | {} | {} | {} |".format(data[2][1], data[2][2], data[2][3], data[2][4])) 153 | print("----------------------------------------------------------------------------------------------------------\n" + CP.fg.RESET + CP.style.RESET_ALL) 154 | 155 | print(CP.style.BRIGHT + CP.fg.CYAN + "\n----------------------------------------------------------------------------------------------------------") 156 | print("| suite >> ADNI-1 description |") 157 | print("----------------------------------------------------------------------------------------------------------") 158 | print("| | Global GDS [min, max]/mean(std) | Global CDR [min, max]mean/std |") 159 | print("----------------------------------------------------------------------------------------------------------") 160 | print("| AD | {} | {} |".format(data[0][5], data[0][6])) 161 | print("----------------------------------------------------------------------------------------------------------") 162 | print("| MCI | {} | {} |".format(data[1][5], data[1][6])) 163 | print("----------------------------------------------------------------------------------------------------------") 164 | print("| NC | {} | {} |".format(data[2][5], data[2][6])) 165 | print("----------------------------------------------------------------------------------------------------------\n" + CP.fg.RESET + CP.style.RESET_ALL) 166 | 167 | 168 | 169 | 170 | 171 | # def print_2D_or_3D_data(): 172 | # selected_decision = raw_input("Do you want create 3D Data roi or 2D slices ? \n - [0] 3D - [1] 2D \n ") 173 | # return True if int(selected_decision) == 0 else False 174 | -------------------------------------------------------------------------------- /src/data_processing/interface/inline_print.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaderghal/ADNI_Data_processing/454462d3913d77e3bc4de2b9725b456301c7b351/src/data_processing/interface/inline_print.pyc -------------------------------------------------------------------------------- /src/data_processing/io_data/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaderghal/ADNI_Data_processing/454462d3913d77e3bc4de2b9725b456301c7b351/src/data_processing/io_data/__init__.py -------------------------------------------------------------------------------- /src/data_processing/io_data/__init__.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaderghal/ADNI_Data_processing/454462d3913d77e3bc4de2b9725b456301c7b351/src/data_processing/io_data/__init__.pyc -------------------------------------------------------------------------------- /src/data_processing/io_data/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaderghal/ADNI_Data_processing/454462d3913d77e3bc4de2b9725b456301c7b351/src/data_processing/io_data/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /src/data_processing/io_data/__pycache__/data_acces_file.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaderghal/ADNI_Data_processing/454462d3913d77e3bc4de2b9725b456301c7b351/src/data_processing/io_data/__pycache__/data_acces_file.cpython-36.pyc -------------------------------------------------------------------------------- /src/data_processing/io_data/__pycache__/xml_acces_file.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaderghal/ADNI_Data_processing/454462d3913d77e3bc4de2b9725b456301c7b351/src/data_processing/io_data/__pycache__/xml_acces_file.cpython-36.pyc -------------------------------------------------------------------------------- /src/data_processing/io_data/data_acces_file.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | 3 | import os 4 | import services.tools as tls 5 | import pickle 6 | import errno 7 | # import lmdb # torch 8 | 9 | #------------------------------------------------------------------------------------------ 10 | # DAF: Data access File: Files & Folders processsing 11 | #------------------------------------------------------------------------------------------ 12 | 13 | def get_nii_from_folder(folder): 14 | res = [] 15 | for root, dirs, files in os.walk(folder): 16 | for file in files: 17 | if file.endswith('.nii'): # or file.endswith('.nii.gz'): 18 | res.append(os.path.join(root, file)) 19 | if len(res) > 1: 20 | print('WARNING. Folder %s contains more than one files' % folder) 21 | return res 22 | 23 | 24 | 25 | def initiate_lmdb(folder_path, lmdb_name, drop_existing=False): # save data to lmdb Folder 26 | saving_path = folder_path + '/' + lmdb_name 27 | print("saving_path : ", saving_path) 28 | if drop_existing: 29 | import os 30 | import shutil 31 | if os.path.exists(lmdb_name): 32 | shutil.rmtree(lmdb_name) 33 | env = lmdb.open(lmdb_name, map_size=int(1e12)) 34 | # print('database debug info:', env.stat()) 35 | return env 36 | 37 | #------------------------------------------------------------------------------------------ 38 | # Save parameters to file to use it later in call 39 | #------------------------------------------------------------------------------------------ 40 | 41 | def save_data_params(data_params): 42 | path_file = data_params['adni_data_des'] + tls.get_convention_name(data_params) + '/Data_params.pkl' 43 | try: 44 | os.makedirs(os.path.dirname(path_file)) 45 | except OSError as e: 46 | if e.errno != errno.EEXIST: 47 | raise 48 | with open(path_file, 'wb') as f: 49 | pickle.dump(data_params, f) 50 | 51 | #------------------------------------------------------------------------------------------ 52 | # Read parameters from the file "Data_params.pkl" 53 | #------------------------------------------------------------------------------------------ 54 | 55 | def read_data_params(path_file): 56 | import pickle 57 | dir_name = os.path.dirname(path_file) 58 | with open(path_file, 'rb') as f: 59 | data_params = pickle.load(f) 60 | return data_params 61 | 62 | 63 | def read_lists_from_file(path_file): 64 | dir_name = os.path.dirname(path_file) 65 | with open(path_file, 'rb') as f: 66 | data_list = pickle.load(f) 67 | return data_list 68 | 69 | 70 | def save_lists_to_file(path_file, data_list): 71 | import pickle 72 | import os 73 | import errno 74 | try: 75 | os.makedirs(os.path.dirname(path_file)) 76 | except OSError as e: 77 | if e.errno != errno.EEXIST: 78 | raise 79 | with open(path_file, 'wb') as f: 80 | pickle.dump(data_list, f) 81 | 82 | #------------------------------------------------------------------------------------------ 83 | # read data from file line by line to a List 84 | #------------------------------------------------------------------------------------------ 85 | 86 | def read_data_file(path_file): 87 | with open(path_file) as f: 88 | content = f.readlines() 89 | return [item.strip() for item in content] 90 | 91 | #------------------------------------------------------------------------------------------ 92 | # Save Model to Local Machine 93 | #------------------------------------------------------------------------------------------ 94 | 95 | def save_model(model, path_file): 96 | try: 97 | os.makedirs(os.path.dirname(path_file)) 98 | except OSError as e: 99 | if e.errno != errno.EEXIST: 100 | raise 101 | with open(path_file, 'wb') as f: 102 | pickle.dump(model, f) 103 | 104 | #------------------------------------------------------------------------------------------ 105 | # Read Model to Local Machine 106 | #------------------------------------------------------------------------------------------ 107 | 108 | def read_model(path_file): 109 | dir_name = os.path.dirname(path_file) 110 | with open(path_file, 'rb') as f: 111 | model = pickle.load(f) 112 | return model 113 | 114 | #------------------------------------------------------------------------------------------ 115 | # Save Desciption Demography outpu data to txt file 116 | #------------------------------------------------------------------------------------------ 117 | def save_desc_table(data_params, text_data): 118 | classes = ['AD ', 'MCI', 'NC '] 119 | path_file = data_params['adni_data_des'] + tls.get_convention_name(data_params) + '/Desciption_ADNI_demography.txt' 120 | try: 121 | os.makedirs(os.path.dirname(path_file)) 122 | except OSError as e: 123 | if e.errno != errno.EEXIST: 124 | raise 125 | with open(path_file, 'w') as f: 126 | f.write("----------------------------------------------------------------------------------------------------------\n") 127 | f.write("| ADNI-1 description |\n") 128 | f.write("----------------------------------------------------------------------------------------------------------\n") 129 | f.write("| #Subject | Sex (F/M) | Age [min, max]/mean(std) | MMSE [min, max]mean/std |\n") 130 | f.write("----------------------------------------------------------------------------------------------------------\n") 131 | for i in range(3): 132 | f.write("| {} | {} | {} | {} | {} |\n".format(classes[i], text_data[i][1], text_data[i][2], text_data[i][3], text_data[i][4])) 133 | f.write("----------------------------------------------------------------------------------------------------------\n") 134 | f.close() -------------------------------------------------------------------------------- /src/data_processing/io_data/data_acces_file.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaderghal/ADNI_Data_processing/454462d3913d77e3bc4de2b9725b456301c7b351/src/data_processing/io_data/data_acces_file.pyc -------------------------------------------------------------------------------- /src/data_processing/io_data/xml_acces_file.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | 3 | import xml.etree.ElementTree as ET 4 | 5 | #------------------------------------------------------------------------------------------ 6 | # Function: to read meta-data 7 | #------------------------------------------------------------------------------------------ 8 | 9 | def find_xml_file(data_params, subject_ID): 10 | import os 11 | for file in os.listdir(data_params['adni_1_meta_data']): 12 | if file.endswith(".xml") and str(subject_ID).lower() in file.lower(): 13 | return read_xml_file(os.path.join(data_params['adni_1_meta_data'], file)) 14 | 15 | def read_xml_file(path_file): 16 | return ET.parse(path_file).getroot() 17 | 18 | # get [ID, Date, Class, Age, Sex, MMSE] 19 | def get_Subject_info(data_params, subject_ID): 20 | root = find_xml_file(data_params, subject_ID) 21 | # models 22 | 23 | _Date_Acquisition = root.findall('.//project/subject/study/series/dateAcquired')[0].text 24 | _Groupe_ = root.findall('.//project/subject/subjectInfo/[@item="DX Group"]')[0].text 25 | _AGE_ = root.findall('.//project/subject/study/subjectAge')[0].text 26 | _SEX_ = root.findall('.//project/subject/subjectSex')[0].text 27 | _MMSE_ = root.findall('.//project/subject/visit/assessment/[@name="MMSE"]/component/assessmentScore')[0].text 28 | _GDS_ = root.findall('.//project/subject/visit/assessment/[@name="GDSCALE"]/component/assessmentScore')[0].text 29 | _CDR_ = root.findall('.//project/subject/visit/assessment/[@name="CDR"]/component/assessmentScore')[0].text 30 | 31 | 32 | _Groupe_ = convert_class_name(_Groupe_) 33 | return [subject_ID, _Date_Acquisition, _Groupe_, _AGE_, _SEX_, _MMSE_, _GDS_, _CDR_] 34 | 35 | 36 | # Convert class name 37 | def convert_class_name(groupe): 38 | if 'MCI' in groupe: 39 | return "MCI" 40 | if 'Normal' in groupe: 41 | return "NC" 42 | if 'AD' in groupe: 43 | return "AD" 44 | -------------------------------------------------------------------------------- /src/data_processing/io_data/xml_acces_file.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaderghal/ADNI_Data_processing/454462d3913d77e3bc4de2b9725b456301c7b351/src/data_processing/io_data/xml_acces_file.pyc -------------------------------------------------------------------------------- /src/data_processing/models/HippModel.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | 3 | #------------------------------------------------------------------------------------------ 4 | # Class Model: contains ROI (Left & Right), meta-data vector, and the Label 5 | #------------------------------------------------------------------------------------------ 6 | 7 | class HippModel: 8 | # Attributes members 9 | _hippLeft = None 10 | _hippRight = None # 3D data 11 | _hippMetaDataVector = None # [ID, Date, Class, Age, Sex, MMSE, GDS, CDR] 12 | _hippLabel = None # integer 13 | 14 | # constructor 15 | def __init__(self, hippLeft, hippRight, hippMetaDataVector, hippLabel): 16 | self._hippLeft = hippLeft 17 | self._hippRight = hippRight 18 | self._hippMetaDataVector = hippMetaDataVector 19 | self._hippLabel = hippLabel 20 | 21 | # Properties 22 | @property 23 | def hippLeft(self): 24 | return self._hippLeft 25 | 26 | @property 27 | def hippRight(self): 28 | return self._hippRight 29 | 30 | @property 31 | def hippMetaDataVector(self): 32 | return self._hippMetaDataVector 33 | 34 | @property 35 | def hippLabel(self): 36 | return self._hippLabel 37 | 38 | # Setters 39 | @hippRight.setter 40 | def hippRight(self, value): 41 | self._hippRight = value 42 | 43 | @hippLeft.setter 44 | def hippLeft(self, value): 45 | self._hippLeft = value 46 | 47 | @hippMetaDataVector.setter 48 | def hippMetaDataVector(self, value): 49 | self._hippMetaDataVector = value 50 | 51 | @hippLabel.setter 52 | def hippLabel(self, value): 53 | self._hippLabel = value 54 | 55 | # Getters 56 | @hippRight.getter 57 | def hippRight(self): 58 | return self._hippRight 59 | 60 | @hippLeft.getter 61 | def hippLeft(self): 62 | return self._hippLeft 63 | 64 | @hippMetaDataVector.getter 65 | def hippMetaDataVector(self): 66 | return self._hippMetaDataVector 67 | 68 | @hippLabel.getter 69 | def hippLabel(self): 70 | return self._hippLabel 71 | 72 | # deleter 73 | @hippRight.deleter 74 | def hippRight(self): 75 | del self._hippRight 76 | 77 | @hippLeft.deleter 78 | def hippLeft(self): 79 | del self._hippLeft 80 | 81 | @hippMetaDataVector.deleter 82 | def hippMetaDataVector(self): 83 | del self._hippMetaDataVector 84 | 85 | @hippLabel.deleter 86 | def hippLabel(self): 87 | del self._hippLabel 88 | 89 | -------------------------------------------------------------------------------- /src/data_processing/models/HippModel.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaderghal/ADNI_Data_processing/454462d3913d77e3bc4de2b9725b456301c7b351/src/data_processing/models/HippModel.pyc -------------------------------------------------------------------------------- /src/data_processing/models/Subject.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | 3 | #------------------------------------------------------------------------------------------ 4 | # Class Model: contains ROI (Left & Right), meta-data vector, and the Label 5 | #------------------------------------------------------------------------------------------ 6 | class Subject: 7 | #------------------------------------ 8 | # Attributes members 9 | #------------------------------------ 10 | _subjectID = "" 11 | _dateAcqui = "" 12 | _group = "" 13 | _age = 0 14 | _sex = "" 15 | _mmse = 0 16 | _gds = 0 17 | _cdr = 0 18 | 19 | #------------------------------------ 20 | # constructor 21 | #------------------------------------ 22 | def __init__(self, subjectID, dateAcqui, group, age, sex, mmse, gds, cdr): 23 | self._subjectID = subjectID 24 | self._dateAcqui = dateAcqui 25 | self._group = group 26 | self._age = age 27 | self._sex = sex 28 | self._mmse = mmse 29 | self._gds = gds 30 | self._cdr = cdr 31 | 32 | #------------------------------------ 33 | # Properties 34 | #------------------------------------ 35 | @property 36 | def subjectID(self): 37 | return self._subjectID 38 | 39 | @property 40 | def dateAcqui(self): 41 | return self._dateAcqui 42 | 43 | @property 44 | def group(self): 45 | return self._group 46 | 47 | @property 48 | def age(self): 49 | return self._age 50 | 51 | @property 52 | def sex(self): 53 | return self._sex 54 | 55 | @property 56 | def mmse(self): 57 | return self._mmse 58 | 59 | @property 60 | def gds(self): 61 | return self._gds 62 | 63 | @property 64 | def cdr(self): 65 | return self._cdr 66 | 67 | #------------------------------------ 68 | # Access 69 | #------------------------------------ 70 | # Setters 71 | @subjectID.setter 72 | def subjectID(self, value): 73 | self._subjectID = value 74 | 75 | @dateAcqui.setter 76 | def dateAcqui(self, value): 77 | self._dateAcqui = value 78 | 79 | @group.setter 80 | def group(self, value): 81 | self._group = value 82 | 83 | @age.setter 84 | def age(self, value): 85 | self._age = value 86 | 87 | @sex.setter 88 | def sex(self, value): 89 | self._sex = value 90 | 91 | @mmse.setter 92 | def mmse(self, value): 93 | self._mmse = value 94 | 95 | @gds.setter 96 | def gds(self, value): 97 | self._gds = value 98 | 99 | @cdr.setter 100 | def cdr(self, value): 101 | self._cdr = value 102 | 103 | # Getters 104 | @subjectID.getter 105 | def subjectID(self): 106 | return self._subjectID 107 | 108 | @dateAcqui.getter 109 | def dateAcqui(self): 110 | return self._dateAcqui 111 | 112 | @group.getter 113 | def group(self): 114 | return self._group 115 | 116 | @age.getter 117 | def age(self): 118 | return self._age 119 | 120 | @sex.getter 121 | def sex(self): 122 | return self._sex 123 | 124 | @mmse.getter 125 | def mmse(self): 126 | return self._mmse 127 | 128 | @gds.getter 129 | def gds(self): 130 | return self._gds 131 | 132 | @cdr.getter 133 | def cdr(self): 134 | return self._cdr 135 | 136 | # deleter 137 | @subjectID.deleter 138 | def subjectID(self): 139 | del self._subjectID 140 | 141 | @dateAcqui.deleter 142 | def dateAcqui(self): 143 | del self._dateAcqui 144 | 145 | @group.deleter 146 | def group(self): 147 | del self._group 148 | 149 | @age.deleter 150 | def age(self): 151 | del self._age 152 | 153 | @sex.deleter 154 | def sex(self): 155 | del self._sex 156 | 157 | @mmse.deleter 158 | def mmse(self): 159 | del self._mmse 160 | 161 | @gds.deleter 162 | def gds(self): 163 | del self._gds 164 | 165 | @cdr.deleter 166 | def cdr(self): 167 | del self._cdr 168 | #------------------------------------------------------------------------ 169 | #------------------------------------------------------------------------ 170 | -------------------------------------------------------------------------------- /src/data_processing/models/Subject.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaderghal/ADNI_Data_processing/454462d3913d77e3bc4de2b9725b456301c7b351/src/data_processing/models/Subject.pyc -------------------------------------------------------------------------------- /src/data_processing/models/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaderghal/ADNI_Data_processing/454462d3913d77e3bc4de2b9725b456301c7b351/src/data_processing/models/__init__.py -------------------------------------------------------------------------------- /src/data_processing/models/__init__.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaderghal/ADNI_Data_processing/454462d3913d77e3bc4de2b9725b456301c7b351/src/data_processing/models/__init__.pyc -------------------------------------------------------------------------------- /src/data_processing/models/__pycache__/HippModel.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaderghal/ADNI_Data_processing/454462d3913d77e3bc4de2b9725b456301c7b351/src/data_processing/models/__pycache__/HippModel.cpython-36.pyc -------------------------------------------------------------------------------- /src/data_processing/models/__pycache__/Subject.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaderghal/ADNI_Data_processing/454462d3913d77e3bc4de2b9725b456301c7b351/src/data_processing/models/__pycache__/Subject.cpython-36.pyc -------------------------------------------------------------------------------- /src/data_processing/models/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaderghal/ADNI_Data_processing/454462d3913d77e3bc4de2b9725b456301c7b351/src/data_processing/models/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /src/data_processing/plot/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaderghal/ADNI_Data_processing/454462d3913d77e3bc4de2b9725b456301c7b351/src/data_processing/plot/__init__.py -------------------------------------------------------------------------------- /src/data_processing/plot/__init__.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaderghal/ADNI_Data_processing/454462d3913d77e3bc4de2b9725b456301c7b351/src/data_processing/plot/__init__.pyc -------------------------------------------------------------------------------- /src/data_processing/plot/__main__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | import sys, os 3 | 4 | sys.path.append('/home/karim/workspace/vscode-python/ADNI_Data_processing/src/data_processing') 5 | 6 | import config.config_read as rsd 7 | import services.tools as tls 8 | import io_data.data_acces_file as daf 9 | import matplotlib.pyplot as plt 10 | import numpy as np 11 | 12 | #------------------------------------------------------------------------------------------ 13 | # Plot slices from the selected ROI 14 | #------------------------------------------------------------------------------------------ 15 | 16 | 17 | def get_sag_slices(data_L, data_R, sag_l, sag_r): 18 | selected_data_L = data_L[sag_l[0]:sag_l[1], :, :] 19 | selected_data_R = data_R[sag_r[0]:sag_r[1], :, :] 20 | return selected_data_L, selected_data_R 21 | 22 | def get_cor_slices(data_L, data_R, cor_l, cor_r): 23 | selected_data_L = data_L[:, cor_l[0]:cor_l[1], :] 24 | selected_data_R = data_R[:, cor_r[0]:cor_r[1], :] 25 | return selected_data_L, selected_data_R 26 | 27 | def get_axi_slices(data_L, data_R, axi_l, axi_r): 28 | selected_data_L = data_L[:, :, axi_l[0]:axi_l[1]] 29 | selected_data_R = data_R[:, :, axi_r[0]:axi_r[1]] 30 | return selected_data_L, selected_data_R 31 | 32 | def plot_ROI_all(data_roi_L, data_roi_R, left_dims, right_dims): 33 | sag_l, cor_l, axi_l = left_dims 34 | sag_r, cor_r, axi_r = right_dims 35 | 36 | sag_L, sag_R = get_sag_slices(data_roi_L, data_roi_R, sag_l, sag_r) 37 | cor_L, cor_R = get_cor_slices(data_roi_L, data_roi_R, cor_l, cor_r) 38 | axi_L, axi_R = get_axi_slices(data_roi_L, data_roi_R, axi_l, axi_r) 39 | 40 | # plot 2D slice from ROI (m-1, m, m+1) 41 | for i in range(3): 42 | plt.subplot(3, 6, i+1) 43 | plt.imshow(sag_L[i, :, :], cmap='gray', origin="lower") 44 | plt.subplot(3, 6, 4+i) 45 | plt.imshow(sag_R[i, :, :], cmap='gray', origin="lower") 46 | 47 | plt.subplot(3, 6, 6+i+1) 48 | plt.imshow(cor_L[:, i, :], cmap='gray', origin="lower") 49 | plt.subplot(3, 6, 6+4+i) 50 | plt.imshow(cor_R[:, i, :], cmap='gray', origin="lower") 51 | 52 | plt.subplot(3, 6, 12+i+1) 53 | plt.imshow(axi_L[:, :, i], cmap='gray', origin="lower") 54 | plt.subplot(3, 6, 12+4+i) 55 | plt.imshow(axi_R[:, :, i], cmap='gray', origin="lower") 56 | plt.show() 57 | 58 | def get_pickle_from_folder(folder): 59 | res = [] 60 | for root, dirs, files in os.walk(folder): 61 | for file in files: 62 | if file.endswith('pkl'): 63 | res.append(os.path.join(root, file)) 64 | return res 65 | 66 | 67 | #------------------------------------------------------------------------------------------ 68 | # function::__main__ :: 69 | #------------------------------------------------------------------------------------------ 70 | def main(): 71 | binaries_classes = ['AD-NC', 'AD-MCI', 'MCI-NC'] 72 | data_params = rsd.get_all_data_params() 73 | root_path = data_params['adni_data_des'] 74 | name_cnv = root_path + tls.get_convention_name(data_params) + '/' + str(data_params['ROI_list'][data_params['ROI_selection']] + '/' + data_params['3D_or_2D']) 75 | line = name_cnv + '/' + binaries_classes[0] + '/test/' 76 | list_files = get_pickle_from_folder(line) 77 | 78 | for i in list_files: 79 | model = daf.read_model(i) 80 | print(" HIPP_L : {} - HIPP_R: {} - Vector: {} - Label: {}".format(model.hippLeft.shape, model.hippRight.shape, model.hippMetaDataVector, model.hippLabel)) 81 | # print(model) 82 | left_dims, right_dims = [[13,16],[13,16],[13,16]], [[13,16],[13,16],[13,16]] 83 | plot_ROI_all(model.hippLeft, model.hippRight, left_dims, right_dims) 84 | 85 | 86 | 87 | #------------------------------------------------------------------------------------------ 88 | # Start ->>>->>> 89 | #------------------------------------------------------------------------------------------ 90 | if __name__ == '__main__': 91 | main() 92 | -------------------------------------------------------------------------------- /src/data_processing/plot/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaderghal/ADNI_Data_processing/454462d3913d77e3bc4de2b9725b456301c7b351/src/data_processing/plot/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /src/data_processing/plot/__pycache__/plot_3D_ROI.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaderghal/ADNI_Data_processing/454462d3913d77e3bc4de2b9725b456301c7b351/src/data_processing/plot/__pycache__/plot_3D_ROI.cpython-36.pyc -------------------------------------------------------------------------------- /src/data_processing/plot/__pycache__/plot_data.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaderghal/ADNI_Data_processing/454462d3913d77e3bc4de2b9725b456301c7b351/src/data_processing/plot/__pycache__/plot_data.cpython-36.pyc -------------------------------------------------------------------------------- /src/data_processing/plot/plot_3D_ROI.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | 3 | import numpy as np 4 | import services.tools as tls 5 | import matplotlib.pyplot as plt 6 | from PIL import Image 7 | 8 | #------------------------------------------------------------------------------------------ 9 | # Plot slices from the selected ROI 10 | #------------------------------------------------------------------------------------------ 11 | 12 | 13 | def get_sag_slices(data_L, data_R, sag_l, sag_r): 14 | selected_data_L = data_L[sag_l[0]:sag_l[1], :, :] 15 | selected_data_R = data_R[sag_r[0]:sag_r[1], :, :] 16 | return selected_data_L, selected_data_R 17 | 18 | def get_cor_slices(data_L, data_R, cor_l, cor_r): 19 | selected_data_L = data_L[:, cor_l[0]:cor_l[1], :] 20 | selected_data_R = data_R[:, cor_r[0]:cor_r[1], :] 21 | return selected_data_L, selected_data_R 22 | 23 | def get_axi_slices(data_L, data_R, axi_l, axi_r): 24 | selected_data_L = data_L[:, :, axi_l[0]:axi_l[1]] 25 | selected_data_R = data_R[:, :, axi_r[0]:axi_r[1]] 26 | return selected_data_L, selected_data_R 27 | 28 | 29 | 30 | 31 | def plot_ROI_all(data_roi_L, data_roi_R, left_dims, right_dims): 32 | sag_l, cor_l, axi_l = left_dims 33 | sag_r, cor_r, axi_r = right_dims 34 | # sagittal slices 35 | sag_L, sag_R = get_sag_slices(data_roi_L, data_roi_R, sag_l, sag_r) 36 | cor_L, cor_R = get_cor_slices(data_roi_L, data_roi_R, cor_l, cor_r) 37 | axi_L, axi_R = get_axi_slices(data_roi_L, data_roi_R, axi_l, axi_r) 38 | 39 | # """ Function to display row of image slices """ 40 | # fig, axes = plt.subplots(1, len(slices)) 41 | # for i, slice in enumerate(slices): 42 | # axes[i].imshow(slice.T, cmap="gray", origin="lower") 43 | 44 | 45 | 46 | # plot 2D slice from ROI (m-1, m, m+1) 47 | for i in range(3): 48 | plt.subplot(3, 6, i+1) 49 | plt.imshow(sag_L[i, :, :], cmap='gray') 50 | plt.subplot(3, 6, 4+i) 51 | plt.imshow(sag_R[i, :, :], cmap='gray') 52 | 53 | plt.subplot(3, 6, 6+i+1) 54 | plt.imshow(cor_L[:, i, :], cmap='gray') 55 | plt.subplot(3, 6, 6+4+i) 56 | plt.imshow(cor_R[:, i, :], cmap='gray') 57 | 58 | plt.subplot(3, 6, 12+i+1) 59 | plt.imshow(axi_L[:, :, i], cmap='gray') 60 | plt.subplot(3, 6, 12+4+i) 61 | plt.imshow(axi_R[:, :, i], cmap='gray') 62 | plt.show() -------------------------------------------------------------------------------- /src/data_processing/plot/plot_data.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | 3 | import numpy as np 4 | import services.tools as tls 5 | import matplotlib.pyplot as plt 6 | from PIL import Image 7 | 8 | #------------------------------------------------------------------------------------------ 9 | # Plot slices from the selected ROI 10 | #------------------------------------------------------------------------------------------ 11 | 12 | # # Plot 13 | # def plot_ROI(data_roi, projection, slc_index_begin, slc_index_end): 14 | # if projection == 0: # sag 15 | # selected_data = data_roi[slc_index_begin:slc_index_end, :, :] 16 | # data = np.transpose(selected_data, (0, 1, 2)) # for example 3,28,28 17 | # elif projection == 1: # cor 18 | # data = data_roi[:, slc_index_begin:slc_index_end, :] 19 | # data = np.transpose(data, (1, 0, 2)) 20 | # else: # axi 21 | # data = data_roi[:, :, slc_index_begin:slc_index_end] 22 | # data = np.transpose(data, (2, 0, 1)) 23 | # # create a container to hold transposed data 24 | # container = np.zeros((data.shape[1], data.shape[2], 3)) # 28,28,3 25 | # container[:, :, 0], container[:, :, 1], container[:, :, 2] = [np.array(tls.matrix_rotation(data[i, :, :])) for i in range(3)] 26 | # # plot 2D slice from ROI (m-1, m, m+1) 27 | # for i in range(int(slc_index_end - slc_index_begin)): 28 | # plt.subplot(1, int(slc_index_end - slc_index_begin), i+1) 29 | # plt.imshow(container[:, :, i], cmap='gray') 30 | # plt.show() 31 | 32 | # def get_sag_slices(data_L, data_R, sag_l, sag_r): 33 | # # sagittal slices 34 | # selected_data_L = data_L[sag_l[0]:sag_l[1], :, :] 35 | # sag_data_L = np.transpose(selected_data_L, (0, 1, 2)) # 3,28,28 36 | # selected_data_R = data_R[sag_r[0]:sag_r[1], :, :] 37 | # sag_data_R = np.transpose(selected_data_R, (0, 1, 2)) # 3,28,28 38 | # container_L = np.zeros((sag_data_L.shape[1], sag_data_L.shape[2], 3)) # 28,28,3 39 | 40 | # print(container_L.shape, sag_l) 41 | # container_L[:, :, 0], container_L[:, :, 1], container_L[:, :, 2] = [np.array(tls.matrix_rotation(sag_data_L[i, :, :])) for i in range(3)] 42 | # container_R = np.zeros((sag_data_R.shape[1], sag_data_R.shape[2], 3)) # 28,28,3 43 | # container_R[:, :, 0], container_R[:, :, 1], container_R[:, :, 2] = [np.array(tls.matrix_rotation(sag_data_R[i, :, :])) for i in range(3)] 44 | # return container_L, container_R 45 | 46 | # def get_cor_slices(data_L, data_R, cor_l, cor_r): 47 | # # sagittal slices 48 | # selected_data_L = data_L[:, cor_l[0]:cor_l[1], :] 49 | # cor_data_L = np.transpose(selected_data_L, (1, 0, 2)) # 3,28,28 50 | # selected_data_R = data_R[:, cor_r[0]:cor_r[1], :] 51 | # cor_data_R = np.transpose(selected_data_R, (1, 0, 2)) # 3,28,28 52 | # container_L = np.zeros((cor_data_L.shape[1], cor_data_L.shape[2], 3)) # 28,28,3 53 | # container_L[:, :, 0], container_L[:, :, 1], container_L[:, :, 2] = [np.array(tls.matrix_rotation(cor_data_L[i, :, :])) for i in range(3)] 54 | # container_R = np.zeros((cor_data_R.shape[1], cor_data_R.shape[2], 3)) # 28,28,3 55 | # container_R[:, :, 0], container_R[:, :, 1], container_R[:, :, 2] = [np.array(tls.matrix_rotation(cor_data_R[i, :, :])) for i in range(3)] 56 | # return container_L, container_R 57 | 58 | # def get_axi_slices(data_L, data_R, axi_l, axi_r): 59 | # # sagittal slices 60 | # selected_data_L = data_L[:, :, axi_l[0]:axi_l[1]] 61 | # axi_data_L = np.transpose(selected_data_L, (2, 0, 1)) # 3,28,28 62 | # selected_data_R = data_R[:, :, axi_r[0]:axi_r[1]] 63 | # axi_data_R = np.transpose(selected_data_R, (2, 0, 1)) # 3,28,28 64 | # container_L = np.zeros((axi_data_L.shape[1], axi_data_L.shape[2], 3)) # 28,28,3 65 | # container_L[:, :, 0], container_L[:, :, 1], container_L[:, :, 2] = [np.array(tls.matrix_rotation(axi_data_L[i, :, :])) for i in range(3)] 66 | # container_R = np.zeros((axi_data_R.shape[1], axi_data_R.shape[2], 3)) # 28,28,3 67 | # container_R[:, :, 0], container_R[:, :, 1], container_R[:, :, 2] = [np.array(tls.matrix_rotation(axi_data_R[i, :, :])) for i in range(3)] 68 | # return container_L, container_R 69 | 70 | # def plot_ROI_all(data_roi_L, data_roi_R, left_dims, right_dims): 71 | # sag_l, cor_l, axi_l = left_dims 72 | # sag_r, cor_r, axi_r = right_dims 73 | # # sagittal slices 74 | # sag_L, sag_R = get_sag_slices(data_roi_L, data_roi_R, sag_l, sag_r) 75 | # cor_L, cor_R = get_cor_slices(data_roi_L, data_roi_R, cor_l, cor_r) 76 | # axi_L, axi_R = get_axi_slices(data_roi_L, data_roi_R, axi_l, axi_r) 77 | # # plot 2D slice from ROI (m-1, m, m+1) 78 | # for i in range(3): 79 | # plt.subplot(3, 6, i+1) 80 | # plt.imshow(sag_L[:, :, i], cmap='gray') 81 | # plt.subplot(3, 6, 4+i) 82 | # plt.imshow(sag_R[:, :, i], cmap='gray') 83 | # plt.subplot(3, 6, 6+i+1) 84 | # plt.imshow(cor_L[:, :, i], cmap='gray') 85 | # plt.subplot(3, 6, 6+4+i) 86 | # plt.imshow(cor_R[:, :, i], cmap='gray') 87 | # plt.subplot(3, 6, 12+i+1) 88 | # plt.imshow(axi_L[:, :, i], cmap='gray') 89 | # plt.subplot(3, 6, 12+4+i) 90 | # plt.imshow(axi_R[:, :, i], cmap='gray') 91 | # plt.show() 92 | 93 | 94 | 95 | 96 | # new 97 | 98 | # Plot 99 | def plot_ROI(data_roi, projection, slc_index_begin, slc_index_end): 100 | if projection == 0: # sag 101 | selected_data = data_roi[slc_index_begin:slc_index_end, :, :] 102 | data = np.transpose(selected_data, (0, 1, 2)) # for example 3,28,28 103 | elif projection == 1: # cor 104 | data = data_roi[:, slc_index_begin:slc_index_end, :] 105 | data = np.transpose(data, (1, 0, 2)) 106 | else: # axi 107 | data = data_roi[:, :, slc_index_begin:slc_index_end] 108 | data = np.transpose(data, (2, 0, 1)) 109 | # create a container to hold transposed data 110 | container = np.zeros((data.shape[1], data.shape[2], 3)) # 28,28,3 111 | container[:, :, 0], container[:, :, 1], container[:, :, 2] = [np.array(tls.matrix_rotation(data[i, :, :])) for i in range(3)] 112 | # plot 2D slice from ROI (m-1, m, m+1) 113 | for i in range(int(slc_index_end - slc_index_begin)): 114 | plt.subplot(1, int(slc_index_end - slc_index_begin), i+1) 115 | plt.imshow(container[:, :, i], cmap='gray') 116 | plt.show() 117 | 118 | def get_sag_slices(data_L, data_R, sag_l, sag_r): 119 | # sagittal slices 120 | selected_data_L = data_L[sag_l[0]:sag_l[1], :, :] 121 | sag_data_L = np.transpose(selected_data_L, (0, 1, 2)) # 3,28,28 122 | selected_data_R = data_R[sag_r[0]:sag_r[1], :, :] 123 | sag_data_R = np.transpose(selected_data_R, (0, 1, 2)) # 3,28,28 124 | container_L = np.zeros((sag_data_L.shape[1], sag_data_L.shape[2], 3)) # 28,28,3 125 | 126 | print(container_L.shape, sag_l) 127 | container_L[:, :, 0], container_L[:, :, 1], container_L[:, :, 2] = [np.array(tls.matrix_rotation(sag_data_L[i, :, :])) for i in range(3)] 128 | container_R = np.zeros((sag_data_R.shape[1], sag_data_R.shape[2], 3)) # 28,28,3 129 | container_R[:, :, 0], container_R[:, :, 1], container_R[:, :, 2] = [np.array(tls.matrix_rotation(sag_data_R[i, :, :])) for i in range(3)] 130 | return container_L, container_R 131 | 132 | def get_cor_slices(data_L, data_R, cor_l, cor_r): 133 | # sagittal slices 134 | selected_data_L = data_L[:, cor_l[0]:cor_l[1], :] 135 | cor_data_L = np.transpose(selected_data_L, (1, 0, 2)) # 3,28,28 136 | selected_data_R = data_R[:, cor_r[0]:cor_r[1], :] 137 | cor_data_R = np.transpose(selected_data_R, (1, 0, 2)) # 3,28,28 138 | container_L = np.zeros((cor_data_L.shape[1], cor_data_L.shape[2], 3)) # 28,28,3 139 | container_L[:, :, 0], container_L[:, :, 1], container_L[:, :, 2] = [np.array(tls.matrix_rotation(cor_data_L[i, :, :])) for i in range(3)] 140 | container_R = np.zeros((cor_data_R.shape[1], cor_data_R.shape[2], 3)) # 28,28,3 141 | container_R[:, :, 0], container_R[:, :, 1], container_R[:, :, 2] = [np.array(tls.matrix_rotation(cor_data_R[i, :, :])) for i in range(3)] 142 | return container_L, container_R 143 | 144 | def get_axi_slices(data_L, data_R, axi_l, axi_r): 145 | # sagittal slices 146 | selected_data_L = data_L[:, :, axi_l[0]:axi_l[1]] 147 | axi_data_L = np.transpose(selected_data_L, (2, 0, 1)) # 3,28,28 148 | selected_data_R = data_R[:, :, axi_r[0]:axi_r[1]] 149 | axi_data_R = np.transpose(selected_data_R, (2, 0, 1)) # 3,28,28 150 | container_L = np.zeros((axi_data_L.shape[1], axi_data_L.shape[2], 3)) # 28,28,3 151 | container_L[:, :, 0], container_L[:, :, 1], container_L[:, :, 2] = [np.array(tls.matrix_rotation(axi_data_L[i, :, :])) for i in range(3)] 152 | container_R = np.zeros((axi_data_R.shape[1], axi_data_R.shape[2], 3)) # 28,28,3 153 | container_R[:, :, 0], container_R[:, :, 1], container_R[:, :, 2] = [np.array(tls.matrix_rotation(axi_data_R[i, :, :])) for i in range(3)] 154 | return container_L, container_R 155 | 156 | def plot_ROI_all(data_roi_L, data_roi_R, left_dims, right_dims): 157 | sag_l, cor_l, axi_l = left_dims 158 | sag_r, cor_r, axi_r = right_dims 159 | # sagittal slices 160 | sag_L, sag_R = get_sag_slices(data_roi_L, data_roi_R, sag_l, sag_r) 161 | cor_L, cor_R = get_cor_slices(data_roi_L, data_roi_R, cor_l, cor_r) 162 | axi_L, axi_R = get_axi_slices(data_roi_L, data_roi_R, axi_l, axi_r) 163 | # plot 2D slice from ROI (m-1, m, m+1) 164 | for i in range(3): 165 | plt.subplot(3, 6, i+1) 166 | plt.imshow(sag_L[:, :, i], cmap='gray') 167 | plt.subplot(3, 6, 4+i) 168 | plt.imshow(sag_R[:, :, i], cmap='gray') 169 | plt.subplot(3, 6, 6+i+1) 170 | plt.imshow(cor_L[:, :, i], cmap='gray') 171 | plt.subplot(3, 6, 6+4+i) 172 | plt.imshow(cor_R[:, :, i], cmap='gray') 173 | plt.subplot(3, 6, 12+i+1) 174 | plt.imshow(axi_L[:, :, i], cmap='gray') 175 | plt.subplot(3, 6, 12+4+i) 176 | plt.imshow(axi_R[:, :, i], cmap='gray') 177 | plt.show() -------------------------------------------------------------------------------- /src/data_processing/plot/plot_data.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaderghal/ADNI_Data_processing/454462d3913d77e3bc4de2b9725b456301c7b351/src/data_processing/plot/plot_data.pyc -------------------------------------------------------------------------------- /src/data_processing/services/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaderghal/ADNI_Data_processing/454462d3913d77e3bc4de2b9725b456301c7b351/src/data_processing/services/__init__.py -------------------------------------------------------------------------------- /src/data_processing/services/__init__.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaderghal/ADNI_Data_processing/454462d3913d77e3bc4de2b9725b456301c7b351/src/data_processing/services/__init__.pyc -------------------------------------------------------------------------------- /src/data_processing/services/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaderghal/ADNI_Data_processing/454462d3913d77e3bc4de2b9725b456301c7b351/src/data_processing/services/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /src/data_processing/services/__pycache__/generate_sample_sets.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaderghal/ADNI_Data_processing/454462d3913d77e3bc4de2b9725b456301c7b351/src/data_processing/services/__pycache__/generate_sample_sets.cpython-36.pyc -------------------------------------------------------------------------------- /src/data_processing/services/__pycache__/process.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaderghal/ADNI_Data_processing/454462d3913d77e3bc4de2b9725b456301c7b351/src/data_processing/services/__pycache__/process.cpython-36.pyc -------------------------------------------------------------------------------- /src/data_processing/services/__pycache__/tools.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaderghal/ADNI_Data_processing/454462d3913d77e3bc4de2b9725b456301c7b351/src/data_processing/services/__pycache__/tools.cpython-36.pyc -------------------------------------------------------------------------------- /src/data_processing/services/generate_sample_sets.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaderghal/ADNI_Data_processing/454462d3913d77e3bc4de2b9725b456301c7b351/src/data_processing/services/generate_sample_sets.pyc -------------------------------------------------------------------------------- /src/data_processing/services/process.py: -------------------------------------------------------------------------------- 1 | 2 | import io_data.data_acces_file as daf 3 | import services.tools as tls 4 | import numpy as np 5 | from scipy.ndimage.filters import gaussian_filter 6 | 7 | #------------------------------------------------------------------------------------------ 8 | # compute the mean 3D cube of the Left & Right 3D cubes 9 | #------------------------------------------------------------------------------------------ 10 | def mean_hipp(mat_a, mat_b): 11 | x, y, z = mat_a.shape 12 | for i in range(x): 13 | for j in range(y): 14 | for k in range(z): 15 | mean_hipp[i, j, k] = (mat_a[i, j, k] + mat_b[i, j, k]) / 2 16 | return mean_hipp 17 | 18 | #------------------------------------------------------------------------------------------ 19 | # Flip the 3D array data along the x axis 20 | #------------------------------------------------------------------------------------------ 21 | def flip_3d(data): 22 | return data[::-1, :, :] 23 | 24 | def mean_3d_matrix(mat_a, mat_b): 25 | x, y, z = mat_a.shape 26 | mean_hipp_local = np.empty((x, y, z), np.float) 27 | for i in range(x): 28 | for j in range(y): 29 | for k in range(z): 30 | mean_hipp_local[i, j, k] = (mat_a[i, j, k] + mat_b[i, j, k]) / 2 31 | return mean_hipp_local 32 | 33 | 34 | #------------------------------------------------------------------------------------------ 35 | # crop the ROI biomarker and return its corrdinates 36 | #------------------------------------------------------------------------------------------ 37 | def crop_cubes(data_l, data_r, crp_l, crp_r): 38 | cube_hipp_l = data_l[crp_l[0]:crp_l[1], crp_l[2]:crp_l[3], crp_l[4]:crp_l[5]] 39 | cube_hipp_r = data_r[crp_r[0]:crp_r[1], crp_r[2]:crp_r[3], crp_r[4]:crp_r[5]] 40 | return cube_hipp_l, cube_hipp_r 41 | 42 | #------------------------------------------------------------------------------------------ 43 | # apply the augumentation parameters to the cube 44 | #------------------------------------------------------------------------------------------ 45 | def augmentation_cubes(data, max_shift, augm_params): 46 | # augm_params should be a tuple of 4 elements: shift_x, shift_y, shift_z, blur_sigma 47 | if data.ndim != 3 or len(augm_params) != 4: 48 | raise NameError('invalid input') 49 | shift_x = augm_params[0] 50 | shift_y = augm_params[1] 51 | shift_z = augm_params[2] 52 | blur_sigma = augm_params[3] 53 | s_x, s_y, s_z = (data.shape[0] - 2 * max_shift, data.shape[1] - 2 * max_shift, data.shape[2] - 2 * max_shift) 54 | blurred = data if blur_sigma == 0 else gaussian_filter(data, sigma=blur_sigma) 55 | sub_data_l = blurred[max_shift + shift_x: s_x + max_shift + shift_x, max_shift + shift_y: s_y + max_shift + shift_y, 56 | max_shift + shift_z: s_z + max_shift + shift_z] 57 | sub_data_r = blurred[max_shift - shift_x: s_x + max_shift - shift_x, max_shift + shift_y: s_y + max_shift + shift_y, 58 | max_shift + shift_z: s_z + max_shift + shift_z] 59 | return sub_data_l, sub_data_r # return two augmented cubes 60 | 61 | def process_mean_hippocampus(list_item, data_params): 62 | nii = "" 63 | nii = daf.get_nii_from_folder(list_item[1])[0] # get first found files (nii) from dir 64 | array = tls.nii_to_array(nii, np.float) 65 | padding_param = int(data_params['padding_size']) 66 | max_shift_param = int(data_params['shift']) 67 | # Augmentations cubes 68 | sub_l, sub_r = augmentation_cubes(array, max_shift_param, list_item[2]) 69 | roi_hipp_l_params = data_params['hipp_left'] # Hippocampus ROI corrdinates(x,x,y,y,z,z) 70 | roi_hipp_r_params = data_params['hipp_right'] 71 | new_crp_l = (roi_hipp_l_params[0] - 1 - max_shift_param - padding_param, roi_hipp_l_params[1] - 1 - max_shift_param + padding_param, 72 | roi_hipp_l_params[2] - 1 - max_shift_param - padding_param, roi_hipp_l_params[3] - 1 - max_shift_param + padding_param, 73 | roi_hipp_l_params[4] - 1 - max_shift_param - padding_param, roi_hipp_l_params[5] - 1 - max_shift_param + padding_param) 74 | new_crp_r = (roi_hipp_r_params[0] - 1 - max_shift_param - padding_param, roi_hipp_r_params[1] - 1 - max_shift_param + padding_param, 75 | roi_hipp_r_params[2] - 1 - max_shift_param - padding_param, roi_hipp_r_params[3] - 1 - max_shift_param + padding_param, 76 | roi_hipp_r_params[4] - 1 - max_shift_param - padding_param, roi_hipp_r_params[5] - 1 - max_shift_param + padding_param) 77 | roi_cube_left, roi_cube_right = crop_cubes(sub_l, sub_r, new_crp_l, new_crp_r) 78 | roi_cube_right_flipped = flip_3d(roi_cube_right) 79 | roi_hippocampus_mean = mean_3d_matrix(roi_cube_left, roi_cube_right_flipped) 80 | return roi_hippocampus_mean 81 | 82 | def process_cube_HIPP(list_item, data_params): 83 | nii = "" 84 | nii = daf.get_nii_from_folder(list_item[1])[0] # get first found file (nii) from dir 85 | array = tls.nii_to_array(nii, np.float) 86 | padding_param = int(data_params['padding_size']) 87 | max_shift_param = int(data_params['shift']) 88 | # Augmentations cubes 89 | sub_l, sub_r = augmentation_cubes(array, max_shift_param, list_item[2]) 90 | # Hippocampus ROI coordinates(x,x,y,y,z,z) 91 | roi_hipp_l_params = data_params['hipp_left'] 92 | roi_hipp_r_params = data_params['hipp_right'] 93 | new_crp_l = (roi_hipp_l_params[0] - 1 - max_shift_param - padding_param, roi_hipp_l_params[1] - 1 - max_shift_param + padding_param, 94 | roi_hipp_l_params[2] - 1 - max_shift_param - padding_param, roi_hipp_l_params[3] - 1 - max_shift_param + padding_param, 95 | roi_hipp_l_params[4] - 1 - max_shift_param - padding_param, roi_hipp_l_params[5] - 1 - max_shift_param + padding_param) 96 | 97 | new_crp_r = (roi_hipp_r_params[0] - 1 - max_shift_param - padding_param, roi_hipp_r_params[1] - 1 - max_shift_param + padding_param, 98 | roi_hipp_r_params[2] - 1 - max_shift_param - padding_param, roi_hipp_r_params[3] - 1 - max_shift_param + padding_param, 99 | roi_hipp_r_params[4] - 1 - max_shift_param - padding_param, roi_hipp_r_params[5] - 1 - max_shift_param + padding_param) 100 | return crop_cubes(sub_l, sub_r, new_crp_l, new_crp_r) 101 | 102 | def process_cube_PPC(list_item, data_params): 103 | nii = "" 104 | nii = daf.get_nii_from_folder(list_item[1])[0] # get first found files (nii) from dir 105 | array = tls.nii_to_array(nii, np.float) 106 | padding_param = int(data_params['padding_size']) 107 | max_shift_param = int(data_params['shift']) 108 | # Augmentations cubes 109 | sub_l, sub_r = augmentation_cubes(array, max_shift_param, list_item[2]) 110 | roi_ppc_l_params = data_params['ppc_left'] # PCC ROI corrdinates(x,x,y,y,z,z) 111 | roi_pcc_r_params = data_params['ppc_right'] 112 | new_crp_l = (roi_ppc_l_params[0] - 1 - max_shift_param - padding_param, roi_ppc_l_params[1] - 1 - max_shift_param + padding_param, 113 | roi_ppc_l_params[2] - 1 - max_shift_param - padding_param, roi_ppc_l_params[3] - 1 - max_shift_param + padding_param, 114 | roi_ppc_l_params[4] - 1 - max_shift_param - padding_param, roi_ppc_l_params[5] - 1 - max_shift_param + padding_param) 115 | new_crp_r = (roi_pcc_r_params[0] - 1 - max_shift_param - padding_param, roi_pcc_r_params[1] - 1 - max_shift_param + padding_param, 116 | roi_pcc_r_params[2] - 1 - max_shift_param - padding_param, roi_pcc_r_params[3] - 1 - max_shift_param + padding_param, 117 | roi_pcc_r_params[4] - 1 - max_shift_param - padding_param, roi_pcc_r_params[5] - 1 - max_shift_param + padding_param) 118 | return crop_cubes(sub_l, sub_r, new_crp_l, new_crp_r) 119 | 120 | # ########## compute desctable 121 | def computeScores(liste): 122 | age = np.asanyarray([float(liste[i].age) for i in range(len(liste))], dtype=np.float32) 123 | sex = [str(liste[i].sex) for i in range(len(liste))] 124 | mmse = np.asanyarray([float(liste[i].mmse) for i in range(len(liste))], dtype=np.float32) 125 | gds = np.asanyarray([float(liste[i].gds) for i in range(len(liste))], dtype=np.float32) 126 | cdr = np.asanyarray([float(liste[i].cdr) for i in range(len(liste))], dtype=np.float32) 127 | femaleNumber = ['F' for i in sex if 'F' in str(i)] 128 | sexRedEX = str(len(femaleNumber)) + '/' + str(len(sex) - len(femaleNumber)) 129 | ageRegEX = '[' + str(round(np.min(age), 2)) + ', ' + str(round(np.max(age) ,2)) + ']/' + str(round(np.mean(age), 2)) + '(' + str(round(np.std(age), 2)) + ')' 130 | mmseRegEX = '[' + str(round(np.min(mmse), 2)) + ', ' + str(round(np.max(mmse) ,2)) + ']/' + str(round(np.mean(mmse), 2)) + '(' + str(round(np.std(mmse), 2)) + ')' 131 | gdsRegEX = '[' + str(round(np.min(gds), 2)) + ', ' + str(round(np.max(gds) ,2)) + ']/' + str(round(np.mean(gds), 2)) + '(' + str(round(np.std(gds), 2)) + ')' 132 | cdrRegEX = '[' + str(round(np.min(cdr), 2)) + ', ' + str(round(np.max(cdr) ,2)) + ']/' + str(round(np.mean(cdr), 2)) + '(' + str(round(np.std(cdr), 2)) + ')' 133 | return [len(liste), sexRedEX, ageRegEX, mmseRegEX, gdsRegEX, cdrRegEX] 134 | 135 | def compute_demography_description(data_params): 136 | import services.generate_sample_sets as gss 137 | AD, MCI, NC = gss.get_subjects_with_classes(data_params) 138 | AD_list = [tls.getSubjectByID(data_params, str(i)) for i in AD] 139 | MCI_list = [tls.getSubjectByID(data_params, str(i)) for i in MCI] 140 | NC_list = [tls.getSubjectByID(data_params, str(i)) for i in NC] 141 | return [['AD']+ computeScores(AD_list), ['MCI'] + computeScores(MCI_list), ['NC'] + computeScores(NC_list)] 142 | -------------------------------------------------------------------------------- /src/data_processing/services/process.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaderghal/ADNI_Data_processing/454462d3913d77e3bc4de2b9725b456301c7b351/src/data_processing/services/process.pyc -------------------------------------------------------------------------------- /src/data_processing/services/tools.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaderghal/ADNI_Data_processing/454462d3913d77e3bc4de2b9725b456301c7b351/src/data_processing/services/tools.pyc -------------------------------------------------------------------------------- /src/pytorch-project/3Dconv.tgz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaderghal/ADNI_Data_processing/454462d3913d77e3bc4de2b9725b456301c7b351/src/pytorch-project/3Dconv.tgz -------------------------------------------------------------------------------- /src/pytorch-project/config/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaderghal/ADNI_Data_processing/454462d3913d77e3bc4de2b9725b456301c7b351/src/pytorch-project/config/__init__.py -------------------------------------------------------------------------------- /src/pytorch-project/config/config_init.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | 3 | # ==================================================== 4 | # Author: Karim ADERGHAL 5 | # Year: 2019 6 | # Labs: LaBRI & LabSIV 7 | # for ADNI Dataset : ADNI-1 baseline SMRI 8 | # screening selected dataset 9 | # URL: http://adni.loni.usc.edu/ 10 | # ==================================================== 11 | 12 | 13 | #------------------------------------------------------------------------------------------ 14 | # Debuging & Time Zone 15 | #------------------------------------------------------------------------------------------ 16 | DEBUG = False 17 | TIMEZONE = 'France/Bordeaux' 18 | 19 | #------------------------------------------------------------------------------------------ 20 | # Author Informations 21 | #------------------------------------------------------------------------------------------ 22 | AUTHOR_INFO = { 23 | 'author': 'Karim ADERGHAL', 24 | 'name': 'ALZ-ADNI PCS', 25 | 'version': '1.2', 26 | 'year': '2019', 27 | 'description': 'Data Extracting scripts for CNN Alzheimer\'s Disease Classification', 28 | 'url': 'http://github.com/kaderghal', 29 | 'email': 'aderghal.karim@gmail.com', 30 | 'university': 'Université de Bordeaux (Bordeaux)/ University IBN Zohr (Agadir)', 31 | 'lab': 'LaBRI & LabSIV' 32 | } 33 | 34 | #------------------------------------------------------------------------------------------ 35 | # Root path to local workspace (local Machine) 36 | #------------------------------------------------------------------------------------------ 37 | ROOT_PATH_LOCAL_MACHINE = { 38 | 'root_machine': '/home/karim/workspace/ADNI_workspace' 39 | 40 | } 41 | 42 | 43 | 44 | #------------------------------------------------------------------------------------------ 45 | # Root path to local workspace (local Machine) 46 | #------------------------------------------------------------------------------------------ 47 | ROOT_PATH_LOCAL_MACHINE = { 48 | 'root_machine': '/home/karim/workspace/ADNI_workspace' 49 | 50 | } 51 | 52 | #------------------------------------------------------------------------------------------ 53 | # Global parameters: 54 | # -> Path to the used Deep learning Framework 55 | # -> Path to the output resutls 56 | #------------------------------------------------------------------------------------------ 57 | GLOBAL_PARAMS = { 58 | 'pytorch_root': ROOT_PATH_LOCAL_MACHINE['root_machine'] + '/path/to/pythorch/', 59 | 'adni_data_src': ROOT_PATH_LOCAL_MACHINE['root_machine'] + '/results/ADNI_src/', 60 | 'adni_data_des': ROOT_PATH_LOCAL_MACHINE['root_machine'] + '/results/ADNI_des/' 61 | } 62 | 63 | 64 | training_dir = "./data/faces/training/" 65 | testing_dir = "./data/faces/testing/" 66 | train_batch_size = 64 67 | train_number_epochs = 100 68 | 69 | NETWORK_PARAMS = { 70 | 71 | 'train_folder' : GLOBAL_PARAMS['adni_data_des'] + '', 72 | 'valid_folder' :, 73 | 'test_folder' : 74 | 75 | } -------------------------------------------------------------------------------- /src/pytorch-project/dataloader/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaderghal/ADNI_Data_processing/454462d3913d77e3bc4de2b9725b456301c7b351/src/pytorch-project/dataloader/__init__.py -------------------------------------------------------------------------------- /src/pytorch-project/dataloader/data_loader.py: -------------------------------------------------------------------------------- 1 | 2 | import numpy as np 3 | import pickle 4 | import os 5 | import sys 6 | 7 | import torch 8 | import torchvision 9 | from torchvision import transforms 10 | from torch.utils.data.dataset import Dataset 11 | from torch.utils.data import Dataset, DataLoader 12 | from torch.utils.data.sampler import BatchSampler 13 | from torchvision.datasets import DatasetFolder 14 | 15 | from PIL import Image 16 | 17 | 18 | ADNI_MODEL_EXTENSIONS = ('.pkl') 19 | 20 | 21 | 22 | # 2 Class Datafolder 23 | class Dataset_ADNI_Folder(DatasetFolder): 24 | # Methodes 25 | def __init__(self, root, loader, extensions=None, transform=None, target_transform=None, is_valid_file=None): 26 | self.root = root 27 | classes, class_to_idx = self._find_classes(self.root) 28 | samples = make_dataset(self.root, class_to_idx, extensions, is_valid_file) 29 | 30 | if len(samples) == 0: 31 | raise (RuntimeError("Found 0 files in subfolders of: " + self.root + "\n" 32 | "Supported extensions are: " + ",".join(extensions))) 33 | 34 | self.loader = loader 35 | self.extensions = extensions 36 | self.classes = classes 37 | self.class_to_idx = class_to_idx 38 | self.samples = samples 39 | self.transform = transforms.Compose([transforms.ToTensor()]) 40 | self.targets = [s[1] for s in samples] 41 | 42 | # __getitem__ 43 | def __getitem__(self, index): 44 | path, target = self.samples[index] 45 | sample = self.loader(path) 46 | # sample is objet instance of HippModel (L, R, V, Label) 47 | return (sample.hippLeft, sample.hippRight, sample.hippMetaDataVector, target) 48 | 49 | # __len__ 50 | def __len__(self): 51 | return len(self.samples) 52 | 53 | # _find_classes 54 | def _find_classes(self, dir): 55 | if sys.version_info >= (3, 5): 56 | # Faster and available in Python 3.5 and above 57 | classes = [d.name for d in os.scandir(dir) if d.is_dir()] 58 | else: 59 | classes = [d for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d))] 60 | classes.sort() 61 | class_to_idx = {classes[i]: i for i in range(len(classes))} 62 | return classes, class_to_idx 63 | 64 | 65 | # 1 pickle loader (load one sample) 66 | def pickle_loader(path_file): 67 | dir_name = os.path.dirname(path_file) 68 | with open(path_file, 'rb') as f: 69 | model_adni = pickle.load(f) 70 | return model_adni 71 | 72 | # to check if the file type is allowed 73 | def has_file_allowed_extension(filename, extensions): 74 | return filename.lower().endswith(extensions) 75 | 76 | 77 | def is_image_file(filename): 78 | return has_file_allowed_extension(filename, ADNI_MODEL_EXTENSIONS) 79 | 80 | 81 | def make_dataset(dir, class_to_idx, extensions=None, is_valid_file=None): 82 | images = [] 83 | dir = os.path.expanduser(dir) 84 | if not ((extensions is None) ^ (is_valid_file is None)): 85 | raise ValueError("Both extensions and is_valid_file cannot be None or not None at the same time") 86 | if extensions is not None: 87 | def is_valid_file(x): 88 | return has_file_allowed_extension(x, extensions) 89 | for target in sorted(class_to_idx.keys()): 90 | d = os.path.join(dir, target) 91 | if not os.path.isdir(d): 92 | continue 93 | for root, _, fnames in sorted(os.walk(d)): 94 | for fname in sorted(fnames): 95 | path = os.path.join(root, fname) 96 | if is_valid_file(path): 97 | item = (path, class_to_idx[target]) 98 | images.append(item) 99 | 100 | return images -------------------------------------------------------------------------------- /src/pytorch-project/datasets/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaderghal/ADNI_Data_processing/454462d3913d77e3bc4de2b9725b456301c7b351/src/pytorch-project/datasets/__init__.py -------------------------------------------------------------------------------- /src/pytorch-project/datasets/datasets.py: -------------------------------------------------------------------------------- 1 | 2 | import numpy as np 3 | import pickle 4 | import os 5 | import sys 6 | 7 | from PIL import Image 8 | 9 | import torch 10 | import torchvision 11 | from torch.utils.data.dataset import Dataset 12 | from torch.utils.data import Dataset, DataLoader 13 | from torch.utils.data.sampler import BatchSampler 14 | from torchvision.datasets import DatasetFolder 15 | from torchvision import transforms 16 | 17 | 18 | 19 | # import models 20 | 21 | # for pickle load 22 | sys.path.append('/home/karim/workspace/vscode-python/ADNI_codesources/kaderghal/src/data_processing/') 23 | 24 | root_path = '/home/karim/workspace/ADNI_workspace/results/ADNI_des/F_28P_F10_MS2_MB10D/HIPP/3D/AD-NC/' 25 | 26 | 27 | 28 | 29 | 30 | # 1 pickle loader (load one sample) 31 | def pickle_loader(path_file): 32 | dir_name = os.path.dirname(path_file) 33 | with open(path_file, 'rb') as f: 34 | model_adni = pickle.load(f) 35 | return model_adni 36 | 37 | # to check if the file type is allowed 38 | def has_file_allowed_extension(filename, extensions): 39 | return filename.lower().endswith(extensions) 40 | 41 | 42 | def is_image_file(filename): 43 | return has_file_allowed_extension(filename, ADNI_MODEL_EXTENSIONS) 44 | 45 | # function 46 | def make_dataset(dir, class_to_idx, extensions=None, is_valid_file=None): 47 | images = [] 48 | dir = os.path.expanduser(dir) 49 | if not ((extensions is None) ^ (is_valid_file is None)): 50 | raise ValueError("Both extensions and is_valid_file cannot be None or not None at the same time") 51 | if extensions is not None: 52 | def is_valid_file(x): 53 | return has_file_allowed_extension(x, extensions) 54 | for target in sorted(class_to_idx.keys()): 55 | d = os.path.join(dir, target) 56 | if not os.path.isdir(d): 57 | continue 58 | for root, _, fnames in sorted(os.walk(d)): 59 | for fname in sorted(fnames): 60 | path = os.path.join(root, fname) 61 | if is_valid_file(path): 62 | item = (path, class_to_idx[target]) 63 | images.append(item) 64 | 65 | return images 66 | 67 | 68 | # 2 Class Datafolder 69 | class Dataset_ADNI_Folder(DatasetFolder): 70 | 71 | # Methodes 72 | def __init__(self, root, loader, extensions=None, transform=None, target_transform=None, is_valid_file=None): 73 | 74 | self.root = root 75 | classes, class_to_idx = self._find_classes(self.root) 76 | samples = make_dataset(self.root, class_to_idx, extensions, is_valid_file) 77 | 78 | if len(samples) == 0: 79 | raise (RuntimeError("Found 0 files in subfolders of: " + self.root + "\n" 80 | "Supported extensions are: " + ",".join(extensions))) 81 | 82 | self.loader = loader 83 | self.extensions = extensions 84 | self.classes = classes 85 | self.class_to_idx = class_to_idx 86 | self.samples = samples 87 | self.transform = transforms.Compose([transforms.ToTensor()]) 88 | self.targets = [s[1] for s in samples] 89 | 90 | # __getitem__ 91 | def __getitem__(self, index): 92 | path, target = self.samples[index] 93 | sample = self.loader(path) 94 | # if self.transform is not None: 95 | # sample = self.transform(sample) 96 | # if self.target_transform is not None: 97 | # target = self.target_transform(target) 98 | 99 | # sample is objet instance from HippModel (L, R, V, Label) 100 | return (sample.hippLeft, sample.hippRight, sample.hippMetaDataVector, target) 101 | 102 | # __len__ 103 | def __len__(self): 104 | return len(self.samples) 105 | 106 | # _find_classes 107 | def _find_classes(self, dir): 108 | if sys.version_info >= (3, 5): 109 | # Faster and available in Python 3.5 and above 110 | classes = [d.name for d in os.scandir(dir) if d.is_dir()] 111 | else: 112 | classes = [d for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d))] 113 | 114 | classes.sort() 115 | class_to_idx = {classes[i]: i for i in range(len(classes))} 116 | return classes, class_to_idx 117 | 118 | 119 | 120 | 121 | # __Main__ 122 | def main(): 123 | 124 | # parames 125 | params_num_workers = 4 126 | batch_size = 32 127 | 128 | # 3 dataloader 129 | 130 | train_data = Dataset_ADNI_Folder(root=root_path + 'train/', loader=pickle_loader, extensions='.pkl', transform=None) 131 | valid_data = Dataset_ADNI_Folder(root=root_path + 'valid/', loader=pickle_loader, extensions='.pkl', transform=None) 132 | test_data = Dataset_ADNI_Folder(root=root_path + 'test/' , loader=pickle_loader, extensions='.pkl', transform=None) 133 | 134 | # # device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") 135 | # # print("device: {}".format(device)) 136 | 137 | train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, shuffle=True, num_workers=params_num_workers) 138 | valid_loader = torch.utils.data.DataLoader(valid_data, batch_size=batch_size, shuffle=True, num_workers=params_num_workers) 139 | test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size, shuffle=True, num_workers=params_num_workers) 140 | 141 | 142 | index = 0 143 | for d1, d2, v, labels in valid_loader: 144 | # print("key: {} : Left {} : Right {} : Vect {} : label {}".format(index, d1.size(), d2.size(), v, labels.size())) 145 | print("key: {} - Left {} : Right {} - Vect {} : label {}".format(index, d1.size(), d2.size(), len(v), labels.size())) 146 | index+= 1 147 | 148 | 149 | if __name__ == '__main__': 150 | main() 151 | 152 | 153 | ############################################## 154 | 155 | 156 | 157 | 158 | # train_dataset = DatasetTransformer(train_dataset, transforms.ToTensor()) 159 | # valid_dataset = DatasetTransformer(valid_dataset, transforms.ToTensor()) 160 | # test_dataset = DatasetTransformer(test_dataset , transforms.ToTensor()) 161 | 162 | 163 | 164 | # train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=4) 165 | # valid_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=4) 166 | # test_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=4) 167 | -------------------------------------------------------------------------------- /src/pytorch-project/graphs/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaderghal/ADNI_Data_processing/454462d3913d77e3bc4de2b9725b456301c7b351/src/pytorch-project/graphs/__init__.py -------------------------------------------------------------------------------- /src/pytorch-project/logs/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaderghal/ADNI_Data_processing/454462d3913d77e3bc4de2b9725b456301c7b351/src/pytorch-project/logs/__init__.py -------------------------------------------------------------------------------- /src/pytorch-project/main/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaderghal/ADNI_Data_processing/454462d3913d77e3bc4de2b9725b456301c7b351/src/pytorch-project/main/__init__.py -------------------------------------------------------------------------------- /src/pytorch-project/main/__main__.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import errno 4 | import random 5 | import pickle 6 | import numpy as np 7 | 8 | from PIL import Image 9 | 10 | import torch 11 | import torchvision 12 | from torch.utils.data.dataset import Dataset 13 | from torch.utils.data import Dataset, DataLoader 14 | from torch.utils.data.sampler import BatchSampler 15 | from torchvision.datasets import DatasetFolder 16 | from torchvision import transforms 17 | import torch.nn.functional as F 18 | from torch import nn 19 | from torch import optim 20 | 21 | # from torchsummary import summary 22 | import matplotlib.pyplot as plt 23 | 24 | import torch.optim as optim 25 | 26 | 27 | # for pickle load : test exampele 28 | # sys.path.append('/home/karim/workspace/vscode-python/ADNI_codesources/kaderghal/src/data_processing/') 29 | # root_path = '/home/karim/workspace/ADNI_workspace/results/ADNI_des/F_28P_F10_MS2_MB10D/HIPP/3D/AD-NC/' 30 | 31 | # server 32 | sys.path.append('/data/ADERGHAL/code-source/ADNI_Data_processing/src/data_processing/') 33 | root_path = '/data/ADERGHAL/ADNI_workspace/results/ADNI_des/F_28P_F10_MS2_MB10D/HIPP/3D/MCI-NC/' 34 | 35 | 36 | 37 | 38 | 39 | 40 | ADNI_MODEL_EXTENSIONS = ('.pkl') 41 | 42 | # 1 pickle loader (load one sample) 43 | def pickle_loader(path_file): 44 | dir_name = os.path.dirname(path_file) 45 | with open(path_file, 'rb') as f: 46 | model_adni = pickle.load(f) 47 | return model_adni 48 | 49 | # to check if the file type is allowed 50 | def has_file_allowed_extension(filename, extensions): 51 | return filename.lower().endswith(extensions) 52 | 53 | 54 | def is_image_file(filename): 55 | return has_file_allowed_extension(filename, ADNI_MODEL_EXTENSIONS) 56 | 57 | # function 58 | def make_dataset(dir, class_to_idx, extensions=None, is_valid_file=None): 59 | images = [] 60 | dir = os.path.expanduser(dir) 61 | if not ((extensions is None) ^ (is_valid_file is None)): 62 | raise ValueError("Both extensions and is_valid_file cannot be None or not None at the same time") 63 | if extensions is not None: 64 | def is_valid_file(x): 65 | return has_file_allowed_extension(x, extensions) 66 | for target in sorted(class_to_idx.keys()): 67 | d = os.path.join(dir, target) 68 | if not os.path.isdir(d): 69 | continue 70 | for root, _, fnames in sorted(os.walk(d)): 71 | for fname in sorted(fnames): 72 | path = os.path.join(root, fname) 73 | if is_valid_file(path): 74 | item = (path, class_to_idx[target]) 75 | images.append(item) 76 | 77 | return images 78 | 79 | 80 | 81 | 82 | 83 | # 2 Class Datafolder 84 | class Dataset_ADNI_Folder(DatasetFolder): 85 | 86 | # Methodes 87 | def __init__(self, root, loader, extensions=None, transform=None, target_transform=None, is_valid_file=None): 88 | 89 | self.root = root 90 | classes, class_to_idx = self._find_classes(self.root) 91 | samples = make_dataset(self.root, class_to_idx, extensions, is_valid_file) 92 | 93 | if len(samples) == 0: 94 | raise (RuntimeError("Found 0 files in subfolders of: " + self.root + "\n" 95 | "Supported extensions are: " + ",".join(extensions))) 96 | 97 | self.loader = loader 98 | self.extensions = extensions 99 | self.classes = classes 100 | self.class_to_idx = class_to_idx 101 | self.samples = samples 102 | self.transform = transforms.Compose([transforms.ToTensor()]) 103 | self.targets = [s[1] for s in samples] 104 | 105 | # __getitem__ 106 | def __getitem__(self, index): 107 | path, target = self.samples[index] 108 | sample = self.loader(path) 109 | # if self.transform is not None: 110 | # sample = self.transform(sample) 111 | # if self.target_transform is not None: 112 | # target = self.target_transform(target) 113 | 114 | # sample is objet instance from HippModel (L, R, V, Label) 115 | return (sample.hippLeft, sample.hippRight, sample.hippMetaDataVector, target) 116 | 117 | # __len__ 118 | def __len__(self): 119 | return len(self.samples) 120 | 121 | # _find_classes 122 | def _find_classes(self, dir): 123 | if sys.version_info >= (3, 5): 124 | # Faster and available in Python 3.5 and above 125 | classes = [d.name for d in os.scandir(dir) if d.is_dir()] 126 | else: 127 | classes = [d for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d))] 128 | 129 | classes.sort() 130 | class_to_idx = {classes[i]: i for i in range(len(classes))} 131 | return classes, class_to_idx 132 | 133 | 134 | 135 | 136 | # one stream network 137 | class OneStreamNet(nn.Module): 138 | def __init__(self): 139 | super(OneStreamNet, self).__init__() 140 | 141 | self.conv1 = nn.Conv3d(1, 32, kernel_size=3 ,stride=1, padding=0) 142 | self.conv2 = nn.Conv3d(32, 64, kernel_size=3 ,stride=1, padding=0) 143 | 144 | self.pool1 = nn.MaxPool3d(kernel_size=(3,3,3), stride=2, padding=0) 145 | self.pool2 = nn.MaxPool3d(kernel_size=(3,3,3), stride=2, padding=0) 146 | 147 | self.relu1 = nn.ReLU(inplace=True) 148 | self.relu2 = nn.ReLU(inplace=True) 149 | 150 | # Defining the fully connected layers 151 | self.fc1 = nn.Linear(30000, 1024) 152 | self.fc2 = nn.Linear(1024, 2) 153 | 154 | def forward(self, x): 155 | # x = x.view(32,28,28,28) 156 | # x = x.view(x.size(0), -1) 157 | x = self.conv1(x) 158 | x = self.pool1(x) 159 | x = self.relu1(x) 160 | x = self.conv2(x) 161 | x = self.pool2(x) 162 | x = self.relu2(x) 163 | x = self.fc1(x) 164 | x = self.fc2(x) 165 | return x 166 | 167 | 168 | 169 | 170 | 171 | # 3D HIPP 172 | class HIPP3D(nn.Module): 173 | def __init__(self): 174 | super(HIPP3D, self).__init__() 175 | self.conv3d1 = nn.Conv3d(1, 32, kernel_size=(4,4,4), stride=1, padding=1) 176 | self.conv3d2 = nn.Conv3d(32, 64, kernel_size=(2,2,2), stride=1, padding=0) 177 | self.fc1 = nn.Linear(64*7*7*7, 120) 178 | self.fc2 = nn.Linear(120, 2) 179 | 180 | def forward(self, x): 181 | x = F.max_pool3d(F.relu(self.conv3d1(x)), kernel_size=(3,3,3), stride=2, padding=0) 182 | x = F.max_pool3d(F.relu(self.conv3d2(x)), kernel_size=(2,2,2), stride=2, padding=1) 183 | x = x.view(-1, self.num_flat_features(x)) 184 | x = F.relu(self.fc1(x)) 185 | x = self.fc2(x) 186 | return x 187 | 188 | def num_flat_features(self, x): 189 | size = x.size()[1:] 190 | num_features = 1 191 | for s in size: 192 | num_features *= s 193 | return num_features 194 | 195 | 196 | 197 | 198 | # Train function 199 | def train(model, device, train_loader, epoch, optimizer): 200 | pass 201 | 202 | 203 | # Test function 204 | def test(model, device, test_loader): 205 | pass 206 | 207 | 208 | 209 | #========================================================================== 210 | # Function: Main definition 211 | #========================================================================== 212 | def main(): 213 | 214 | # parames for data 215 | params_num_workers = 4 216 | batch_size = 32 217 | num_classes = 2 218 | save_frequency = 2 219 | learning_rate = 0.0001 220 | num_epochs = 30 221 | weight_decay = 0.0001 222 | 223 | device = torch.device("cuda:1" if torch.cuda.is_available() else "cpu") # PyTorch v0.4.0 224 | print("using device :", device) 225 | model = HIPP3D().to(device) 226 | 227 | 228 | 229 | # DataFolder 230 | train_data = Dataset_ADNI_Folder(root=root_path + 'train/', loader=pickle_loader, extensions='.pkl', transform=None) 231 | valid_data = Dataset_ADNI_Folder(root=root_path + 'valid/', loader=pickle_loader, extensions='.pkl', transform=None) 232 | test_data = Dataset_ADNI_Folder(root=root_path + 'test/' , loader=pickle_loader, extensions='.pkl', transform=None) 233 | 234 | # Dataloader 235 | train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, shuffle=True, num_workers=params_num_workers) 236 | valid_loader = torch.utils.data.DataLoader(valid_data, batch_size=batch_size, shuffle=True, num_workers=params_num_workers) 237 | test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size, shuffle=True, num_workers=params_num_workers) 238 | 239 | # net = LeNet() 240 | # summary(model, (1, 28, 28, 28)) 241 | 242 | criterion = nn.CrossEntropyLoss() 243 | optimizer = optim.SGD(model.parameters(), lr=learning_rate, momentum=0.9) 244 | 245 | 246 | # Train the model 247 | total_step = len(train_loader) 248 | loss_list = [] 249 | acc_list = [] 250 | 251 | running_loss = 0.0 252 | for epoch in range(num_epochs): 253 | print("epoch:", epoch) 254 | for i, (d1, d2, v, labels) in enumerate(train_loader): 255 | # print(i) 256 | # zero the parameter gradients 257 | optimizer.zero_grad() 258 | 259 | 260 | # # forward + backward + optimize 261 | d1 = torch.unsqueeze(d1, 1).to(device, dtype=torch.float) 262 | 263 | 264 | labels = labels.to(device) 265 | outputs = model(d1) 266 | loss = criterion(outputs, labels) 267 | loss.backward() 268 | optimizer.step() 269 | 270 | # Track the accuracy 271 | total = labels.size(0) 272 | _, predicted = torch.max(outputs.data, 1) 273 | correct = (predicted == labels).sum().item() 274 | acc_list.append(correct / total) 275 | 276 | if (i + 1) % 10 == 0: 277 | print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}, Accuracy: {:.2f}%' 278 | .format(epoch + 1, num_epochs, i + 1, total_step, loss.item(), (correct / total) * 100)) 279 | 280 | 281 | 282 | # # print statistics 283 | # running_loss += loss.item() 284 | # if i % 2000 == 1999: # print every 2000 mini-batches 285 | # print('[%d, %5d] loss: %.3f' % (epoch + 1, i + 1, running_loss / 2000)) 286 | # running_loss = 0.0 287 | 288 | print('Finished Training') 289 | 290 | 291 | 292 | 293 | # for i, (d1, d2, v, labels) in enumerate(train_loader): 294 | # print(i) 295 | 296 | # # Run the forward pass 297 | # d1 = torch.unsqueeze(d1, 0).to(device, dtype=torch.float) 298 | # outputs = model(d1) 299 | # loss = criterion(outputs, labels) 300 | # loss_list.append(loss.item()) 301 | 302 | # # Backprop and perform Adam optimisation 303 | # optimizer.zero_grad() 304 | # loss.backward() 305 | # optimizer.step() 306 | 307 | # # Track the accuracy 308 | # total = labels.size(0) 309 | # _, predicted = torch.max(outputs.data, 1) 310 | # correct = (predicted == labels).sum().item() 311 | # acc_list.append(correct / total) 312 | 313 | # if (i + 1) % 100 == 0: 314 | # print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}, Accuracy: {:.2f}%' 315 | # .format(epoch + 1, num_epochs, i + 1, total_step, loss.item(), 316 | # (correct / total) * 100)) 317 | 318 | 319 | 320 | 321 | 322 | 323 | # model = OneStreamNet().to(device) 324 | # summary(model, (1, 28, 28, 28)) 325 | 326 | 327 | 328 | 329 | 330 | 331 | 332 | 333 | 334 | 335 | 336 | 337 | 338 | 339 | # index = 0 340 | # for d1, d2, v, labels in valid_loader: 341 | # # print("key: {} : Left {} : Right {} : Vect {} : label {}".format(index, d1.size(), d2.size(), v, labels.size())) 342 | # print("key: {} - Left {} : Right {} - Vect {} : label {}".format(index, d1.size(), d2.size(), len(v), labels.size())) 343 | # index+= 1 344 | 345 | 346 | 347 | #========================================================================== 348 | # Start : __Main__ 349 | #========================================================================== 350 | if __name__ == '__main__': 351 | main() 352 | 353 | 354 | -------------------------------------------------------------------------------- /src/pytorch-project/models/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaderghal/ADNI_Data_processing/454462d3913d77e3bc4de2b9725b456301c7b351/src/pytorch-project/models/__init__.py -------------------------------------------------------------------------------- /src/pytorch-project/models/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaderghal/ADNI_Data_processing/454462d3913d77e3bc4de2b9725b456301c7b351/src/pytorch-project/models/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /src/pytorch-project/networks/MyModel.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | import os 7 | import sys 8 | import pickle 9 | import errno 10 | import random 11 | import numpy as np 12 | 13 | import torch 14 | import torchvision 15 | import torch.nn.functional as F 16 | from torch import nn 17 | from torch import optim 18 | from torch.utils.data.dataset import Dataset 19 | from torch.utils.data import Dataset, DataLoader 20 | from torchvision.datasets import DatasetFolder 21 | from torchvision import transforms 22 | 23 | from torchsummary import summary 24 | 25 | import matplotlib.pyplot as plt 26 | 27 | 28 | # for pickle load 29 | # sys.path.append('/home/karim/workspace/vscode/ADNI_Data_processing/src/data-processing/') # labri Machine 30 | sys.path.append('/home/karim/workspace/vscode-python/ADNI_codesources/kaderghal/src/data-processing/') # home machine 31 | 32 | 33 | # root_path = '/home/karim/workspace/ADNI_workspace/results/ADNI_des/F_28P_F1_MS2_MB10D/HIPP/3D/AD-NC/' # labri machine 34 | root_path = '/home/karim/workspace/ADNI_workspace/results/ADNI_des/F_28P_F1_MS2_MB10D/HIPP/3D/AD-NC/' # Home machine 35 | 36 | # 1 pickle loader (load one sample) 37 | def pickle_loader(path_file): 38 | dir_name = os.path.dirname(path_file) 39 | with open(path_file, 'rb') as f: 40 | model_adni = pickle.load(f) 41 | return model_adni 42 | 43 | # to check if the file type is allowed 44 | def has_file_allowed_extension(filename, extensions): 45 | return filename.lower().endswith(extensions) 46 | 47 | def is_image_file(filename): 48 | return has_file_allowed_extension(filename, ADNI_MODEL_EXTENSIONS) 49 | 50 | # function 51 | def make_dataset(dir, class_to_idx, extensions=None, is_valid_file=None): 52 | images = [] 53 | dir = os.path.expanduser(dir) 54 | if not ((extensions is None) ^ (is_valid_file is None)): 55 | raise ValueError("Both extensions and is_valid_file cannot be None or not None at the same time") 56 | if extensions is not None: 57 | def is_valid_file(x): 58 | return has_file_allowed_extension(x, extensions) 59 | for target in sorted(class_to_idx.keys()): 60 | d = os.path.join(dir, target) 61 | if not os.path.isdir(d): 62 | continue 63 | for root, _, fnames in sorted(os.walk(d)): 64 | for fname in sorted(fnames): 65 | path = os.path.join(root, fname) 66 | if is_valid_file(path): 67 | item = (path, class_to_idx[target]) 68 | images.append(item) 69 | 70 | return images 71 | 72 | # Class Datafolder 73 | class Dataset_ADNI_Folder(DatasetFolder): 74 | 75 | # Methodes 76 | def __init__(self, root, loader, extensions=None, transform=None, target_transform=None, is_valid_file=None): 77 | 78 | self.root = root 79 | classes, class_to_idx = self._find_classes(self.root) 80 | samples = make_dataset(self.root, class_to_idx, extensions, is_valid_file) 81 | 82 | if len(samples) == 0: 83 | raise (RuntimeError("Found 0 files in subfolders of: " + self.root + "\n" 84 | "Supported extensions are: " + ",".join(extensions))) 85 | 86 | self.loader = loader 87 | self.extensions = extensions 88 | self.classes = classes 89 | self.class_to_idx = class_to_idx 90 | self.samples = samples 91 | self.transform = transforms.Compose([transforms.ToTensor()]) 92 | self.targets = [s[1] for s in samples] 93 | 94 | # __getitem__ 95 | def __getitem__(self, index): 96 | path, target = self.samples[index] 97 | sample = self.loader(path) 98 | # if self.transform is not None: 99 | # sample = self.transform(sample) 100 | # if self.target_transform is not None: 101 | # target = self.target_transform(target) 102 | 103 | # sample is objet instance from HippModel (L, R, V, Label) 104 | return (sample.hippLeft, sample.hippRight, sample.hippMetaDataVector, target) 105 | 106 | # __len__ 107 | def __len__(self): 108 | return len(self.samples) 109 | 110 | # _find_classes 111 | def _find_classes(self, dir): 112 | if sys.version_info >= (3, 5): 113 | # Faster and available in Python 3.5 and above 114 | classes = [d.name for d in os.scandir(dir) if d.is_dir()] 115 | else: 116 | classes = [d for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d))] 117 | 118 | classes.sort() 119 | class_to_idx = {classes[i]: i for i in range(len(classes))} 120 | return classes, class_to_idx 121 | 122 | 123 | 124 | # one stream network 125 | class OneStreamNet(nn.Module): 126 | def __init__(self): 127 | super(OneStreamNet, self).__init__() 128 | 129 | self.conv1 = nn.Conv3d(1, 32, kernel_size=3 ,stride=1, padding=0) 130 | self.conv2 = nn.Conv3d(32, 64, kernel_size=3 ,stride=1, padding=0) 131 | 132 | self.pool1 = nn.MaxPool3d(kernel_size=(3,3,3), stride=2, padding=0) 133 | self.pool2 = nn.MaxPool3d(kernel_size=(3,3,3), stride=2, padding=0) 134 | 135 | self.relu1 = nn.ReLU(inplace=True) 136 | self.relu2 = nn.ReLU(inplace=True) 137 | 138 | # Defining the fully connected layers 139 | self.fc1 = nn.Linear(30000, 1024) 140 | self.fc2 = nn.Linear(1024, 2) 141 | 142 | def forward(self, x): 143 | # x = x.view(32,28,28,28) 144 | # x = x.view(x.size(0), -1) 145 | x = self.conv1(x) 146 | x = self.pool1(x) 147 | x = self.relu1(x) 148 | x = self.conv2(x) 149 | x = self.pool2(x) 150 | x = self.relu2(x) 151 | x = self.fc1(x) 152 | x = self.fc2(x) 153 | return x 154 | 155 | 156 | 157 | # Network 158 | class Net(nn.Module): 159 | def __init__(self): 160 | super().__init__() 161 | 162 | self.conv1 = nn.Conv2d(1, 64, 7) 163 | self.pool1 = nn.MaxPool2d(2) 164 | self.conv2 = nn.Conv2d(64, 128, 5) 165 | self.conv3 = nn.Conv2d(128, 256, 5) 166 | self.linear1 = nn.Linear(2304, 512) 167 | self.linear2 = nn.Linear(512, 2) 168 | 169 | def forward(self, data): 170 | res = [] 171 | for i in range(2): # Siamese nets; sharing weights 172 | x = data[i] 173 | x = self.conv1(x) 174 | x = F.relu(x) 175 | x = self.pool1(x) 176 | x = self.conv2(x) 177 | x = F.relu(x) 178 | x = self.conv3(x) 179 | x = F.relu(x) 180 | 181 | x = x.view(x.shape[0], -1) 182 | x = self.linear1(x) 183 | res.append(F.relu(x)) 184 | 185 | res = torch.abs(res[1] - res[0]) 186 | res = self.linear2(res) 187 | return res 188 | 189 | 190 | 191 | # Train function 192 | def train(model, device, train_loader, epoch, optimizer): 193 | pass 194 | 195 | 196 | # Test function 197 | def test(model, device, test_loader): 198 | pass 199 | 200 | #--------------------------------------------------------------- 201 | # _________________________ Main ___________________________ 202 | #--------------------------------------------------------------- 203 | def main(): 204 | 205 | # Training params 206 | num_workers = 1 207 | num_classes = 2 208 | save_frequency = 2 209 | batch_size = 1 210 | lr = 0.001 211 | num_epochs = 1 212 | weight_decay = 0.0001 213 | 214 | # dataset folder loader 215 | train_data = Dataset_ADNI_Folder(root=root_path + 'train/', loader=pickle_loader, extensions='.pkl', transform=None) 216 | valid_data = Dataset_ADNI_Folder(root=root_path + 'valid/', loader=pickle_loader, extensions='.pkl', transform=None) 217 | test_data = Dataset_ADNI_Folder(root=root_path + 'test/' , loader=pickle_loader, extensions='.pkl', transform=None) 218 | 219 | # dataloader 220 | train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, shuffle=True, num_workers=num_workers) 221 | valid_loader = torch.utils.data.DataLoader(valid_data, batch_size=batch_size, shuffle=True, num_workers=num_workers) 222 | test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size, shuffle=True, num_workers=num_workers) 223 | 224 | # select device 225 | device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') 226 | print("device: {}".format(device)) 227 | 228 | model = OneStreamNet().to(device) 229 | summary(model, (1, 28, 28, 28)) 230 | 231 | 232 | # Loss and optimizer 233 | criterion = nn.CrossEntropyLoss() 234 | optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=weight_decay) 235 | 236 | 237 | # Train the model 238 | total_step = len(train_loader) 239 | loss_list = [] 240 | acc_list = [] 241 | 242 | # for epoch in range(num_epochs): 243 | # # for i, (images, labels) in enumerate(train_loader): 244 | # for i, (d1, d2, v, labels) in enumerate(train_loader): 245 | # print(i) 246 | 247 | # # Run the forward pass 248 | # d1 = torch.unsqueeze(d1, 0).to(device, dtype=torch.float) 249 | # outputs = model(d1) 250 | # loss = criterion(outputs, labels) 251 | # loss_list.append(loss.item()) 252 | 253 | # # Backprop and perform Adam optimisation 254 | # optimizer.zero_grad() 255 | # loss.backward() 256 | # optimizer.step() 257 | 258 | # # Track the accuracy 259 | # total = labels.size(0) 260 | # _, predicted = torch.max(outputs.data, 1) 261 | # correct = (predicted == labels).sum().item() 262 | # acc_list.append(correct / total) 263 | 264 | # if (i + 1) % 100 == 0: 265 | # print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}, Accuracy: {:.2f}%' 266 | # .format(epoch + 1, num_epochs, i + 1, total_step, loss.item(), 267 | # (correct / total) * 100)) 268 | 269 | 270 | 271 | 272 | # for epoch in range(num_epochs): 273 | # train(model, device, train_loader, epoch, optimizer) 274 | # test(model, device, test_loader) 275 | # if epoch & save_frequency == 0: 276 | # torch.save(model, 'siamese_{:03}.pt'.format(epoch)) 277 | 278 | 279 | # __start__ 280 | if __name__ == '__main__': 281 | main() -------------------------------------------------------------------------------- /src/pytorch-project/networks/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaderghal/ADNI_Data_processing/454462d3913d77e3bc4de2b9725b456301c7b351/src/pytorch-project/networks/__init__.py -------------------------------------------------------------------------------- /src/pytorch-project/networks/mnist.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | import argparse 3 | import torch 4 | import torch.nn as nn 5 | import torch.nn.functional as F 6 | import torch.optim as optim 7 | from torchvision import datasets, transforms 8 | from torch.optim.lr_scheduler import StepLR 9 | 10 | 11 | class Net(nn.Module): 12 | def __init__(self): 13 | super(Net, self).__init__() 14 | self.conv1 = nn.Conv2d(1, 32, 3, 1) 15 | self.conv2 = nn.Conv2d(32, 64, 3, 1) 16 | self.dropout1 = nn.Dropout2d(0.25) 17 | self.dropout2 = nn.Dropout2d(0.5) 18 | self.fc1 = nn.Linear(9216, 128) 19 | self.fc2 = nn.Linear(128, 10) 20 | 21 | def forward(self, x): 22 | x = self.conv1(x) 23 | x = F.relu(x) 24 | x = self.conv2(x) 25 | x = F.max_pool2d(x, 2) 26 | x = self.dropout1(x) 27 | x = torch.flatten(x, 1) 28 | x = self.fc1(x) 29 | x = F.relu(x) 30 | x = self.dropout2(x) 31 | x = self.fc2(x) 32 | output = F.log_softmax(x, dim=1) 33 | return output 34 | 35 | 36 | def train(args, model, device, train_loader, optimizer, epoch): 37 | model.train() 38 | for batch_idx, (data, target) in enumerate(train_loader): 39 | data, target = data.to(device), target.to(device) 40 | optimizer.zero_grad() 41 | output = model(data) 42 | loss = F.nll_loss(output, target) 43 | loss.backward() 44 | optimizer.step() 45 | if batch_idx % args.log_interval == 0: 46 | print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( 47 | epoch, batch_idx * len(data), len(train_loader.dataset), 48 | 100. * batch_idx / len(train_loader), loss.item())) 49 | 50 | 51 | def test(args, model, device, test_loader): 52 | model.eval() 53 | test_loss = 0 54 | correct = 0 55 | with torch.no_grad(): 56 | for data, target in test_loader: 57 | data, target = data.to(device), target.to(device) 58 | output = model(data) 59 | test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss 60 | pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability 61 | correct += pred.eq(target.view_as(pred)).sum().item() 62 | 63 | test_loss /= len(test_loader.dataset) 64 | 65 | print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format( 66 | test_loss, correct, len(test_loader.dataset), 67 | 100. * correct / len(test_loader.dataset))) 68 | 69 | 70 | def main(): 71 | # Training settings 72 | parser = argparse.ArgumentParser(description='PyTorch MNIST Example') 73 | parser.add_argument('--batch-size', type=int, default=64, metavar='N', 74 | help='input batch size for training (default: 64)') 75 | parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N', 76 | help='input batch size for testing (default: 1000)') 77 | parser.add_argument('--epochs', type=int, default=14, metavar='N', 78 | help='number of epochs to train (default: 10)') 79 | parser.add_argument('--lr', type=float, default=1.0, metavar='LR', 80 | help='learning rate (default: 1.0)') 81 | parser.add_argument('--gamma', type=float, default=0.7, metavar='M', 82 | help='Learning rate step gamma (default: 0.7)') 83 | parser.add_argument('--no-cuda', action='store_true', default=False, 84 | help='disables CUDA training') 85 | parser.add_argument('--seed', type=int, default=1, metavar='S', 86 | help='random seed (default: 1)') 87 | parser.add_argument('--log-interval', type=int, default=10, metavar='N', 88 | help='how many batches to wait before logging training status') 89 | 90 | parser.add_argument('--save-model', action='store_true', default=False, 91 | help='For Saving the current Model') 92 | args = parser.parse_args() 93 | use_cuda = not args.no_cuda and torch.cuda.is_available() 94 | 95 | torch.manual_seed(args.seed) 96 | 97 | device = torch.device("cuda" if use_cuda else "cpu") 98 | 99 | kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {} 100 | train_loader = torch.utils.data.DataLoader( 101 | datasets.MNIST('../data', train=True, download=True, 102 | transform=transforms.Compose([ 103 | transforms.ToTensor(), 104 | transforms.Normalize((0.1307,), (0.3081,)) 105 | ])), 106 | batch_size=args.batch_size, shuffle=True, **kwargs) 107 | test_loader = torch.utils.data.DataLoader( 108 | datasets.MNIST('../data', train=False, transform=transforms.Compose([ 109 | transforms.ToTensor(), 110 | transforms.Normalize((0.1307,), (0.3081,)) 111 | ])), 112 | batch_size=args.test_batch_size, shuffle=True, **kwargs) 113 | 114 | model = Net().to(device) 115 | optimizer = optim.Adadelta(model.parameters(), lr=args.lr) 116 | 117 | scheduler = StepLR(optimizer, step_size=1, gamma=args.gamma) 118 | for epoch in range(1, args.epochs + 1): 119 | train(args, model, device, train_loader, optimizer, epoch) 120 | test(args, model, device, test_loader) 121 | scheduler.step() 122 | 123 | if args.save_model: 124 | torch.save(model.state_dict(), "mnist_cnn.pt") 125 | 126 | 127 | if __name__ == '__main__': 128 | main() -------------------------------------------------------------------------------- /src/pytorch-project/networks/network.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | import torch.nn as nn 4 | import torch.nn.functional as F 5 | 6 | 7 | class EmbeddingNet(nn.Module): 8 | 9 | def __init__(self): 10 | super(EmbeddingNet, self).__init__() 11 | self.convnet = nn.Sequential(nn.Conv2d(1, 32, 5), nn.PReLU(), 12 | nn.MaxPool2d(2, stride=2), 13 | nn.Conv2d(32, 64, 5), nn.PReLU(), 14 | nn.MaxPool2d(2, stride=2)) 15 | 16 | self.fc = nn.Sequential(nn.Linear(64 * 4 * 4, 256), 17 | nn.PReLU(), 18 | nn.Linear(256, 256), 19 | nn.PReLU(), 20 | nn.Linear(256, 2) 21 | ) 22 | 23 | def forward(self, x): 24 | output = self.convnet(x) 25 | output = output.view(output.size()[0], -1) 26 | output = self.fc(output) 27 | return output 28 | 29 | def get_embedding(self, x): 30 | return self.forward(x) 31 | 32 | 33 | class EmbeddingNetL2(EmbeddingNet): 34 | def __init__(self): 35 | super(EmbeddingNetL2, self).__init__() 36 | 37 | def forward(self, x): 38 | output = super(EmbeddingNetL2, self).forward(x) 39 | output /= output.pow(2).sum(1, keepdim=True).sqrt() 40 | return output 41 | 42 | def get_embedding(self, x): 43 | return self.forward(x) 44 | 45 | 46 | class ClassificationNet(nn.Module): 47 | def __init__(self, embedding_net, n_classes): 48 | super(ClassificationNet, self).__init__() 49 | self.embedding_net = embedding_net 50 | self.n_classes = n_classes 51 | self.nonlinear = nn.PReLU() 52 | self.fc1 = nn.Linear(2, n_classes) 53 | 54 | def forward(self, x): 55 | output = self.embedding_net(x) 56 | output = self.nonlinear(output) 57 | scores = F.log_softmax(self.fc1(output), dim=-1) 58 | return scores 59 | 60 | def get_embedding(self, x): 61 | return self.nonlinear(self.embedding_net(x)) 62 | 63 | 64 | class SiameseNet(nn.Module): 65 | def __init__(self, embedding_net): 66 | super(SiameseNet, self).__init__() 67 | self.embedding_net = embedding_net 68 | 69 | def forward(self, x1, x2): 70 | output1 = self.embedding_net(x1) 71 | output2 = self.embedding_net(x2) 72 | return output1, output2 73 | 74 | def get_embedding(self, x): 75 | return self.embedding_net(x) 76 | 77 | 78 | class TripletNet(nn.Module): 79 | def __init__(self, embedding_net): 80 | super(TripletNet, self).__init__() 81 | self.embedding_net = embedding_net 82 | 83 | def forward(self, x1, x2, x3): 84 | output1 = self.embedding_net(x1) 85 | output2 = self.embedding_net(x2) 86 | output3 = self.embedding_net(x3) 87 | return output1, output2, output3 88 | 89 | def get_embedding(self, x): 90 | return self.embedding_net(x) -------------------------------------------------------------------------------- /src/pytorch-project/note.txt: -------------------------------------------------------------------------------- 1 | numpy.stack 2 | 3 | https://www.kaggle.com/pinocookie/pytorch-dataset-and-dataloader 4 | 5 | https://qiita.com/JUN_NETWORKS/items/65cc313e810cc6b31098 6 | 7 | https://discuss.pytorch.org/t/save-dataset-into-pt-file/25293/5 8 | 9 | https://pytorch.org/docs/stable/data.html 10 | 11 | https://gist.github.com/kevinzakka/d33bf8d6c7f06a9d8c76d97a7879f5cb 12 | 13 | https://medium.com/@josh_2774/deep-learning-with-pytorch-9574e74d17ad 14 | 15 | https://www.kaggle.com/leighplt/pytorch-tutorial-dataset-data-preparetion-stage 16 | https://stackoverflow.com/questions/44429199/how-to-load-a-list-of-numpy-arrays-to-pytorch-dataset-loader 17 | 18 | https://github.com/xiayandi/Pytorch_text_classification 19 | 20 | https://forums.fast.ai/t/out-of-core-data-block-itemlist-backed-up-by-memmap-files/39566 21 | 22 | http://www.machinelearninguru.com/deep_learning/data_preparation/hdf5/hdf5.html 23 | 24 | https://towardsdatascience.com/hdf5-datasets-for-pytorch-631ff1d750f5 25 | 26 | https://github.com/kevinzakka/one-shot-siamese/blob/master/data_loader.py 27 | 28 | 29 | You could try to lazily load each data sample in order to avoid preloading the whole dataset. 30 | Using multiple workers might hide the loading time, so that your GPU won’t be starving. 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | # Example 42 | # I am assuming trX is a list of image arrays (1, 224, 224, 3) 43 | # of length L = 0.8 * len(files) 44 | >>> import numpy as np 45 | >>> a = np.asarray(trX) 46 | >>> a.shape # should be (L, 1, 224, 224, 3) 47 | >>> a = np.squeeze(a, axis=1) # shape should now be (L, 224, 224, 3) 48 | >>> import torch 49 | >>> b = torch.floatTensor(a) # or torch.from_numpy(a) 50 | 51 | 52 | import torch 53 | import numpy as np 54 | import torch.utils.data as utils 55 | 56 | my_x = [np.array([[1.0,2],[3,4]]),np.array([[5.,6],[7,8]])] # a list of numpy arrays 57 | my_y = [np.array([4.]), np.array([2.])] # another list of numpy arrays (targets) 58 | 59 | tensor_x = torch.stack([torch.Tensor(i) for i in my_x]) # transform to torch tensors 60 | tensor_y = torch.stack([torch.Tensor(i) for i in my_y]) 61 | 62 | my_dataset = utils.TensorDataset(tensor_x,tensor_y) # create your datset 63 | my_dataloader = utils.DataLoader(my_dataset) # create your dataloader -------------------------------------------------------------------------------- /src/pytorch-project/services/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaderghal/ADNI_Data_processing/454462d3913d77e3bc4de2b9725b456301c7b351/src/pytorch-project/services/__init__.py -------------------------------------------------------------------------------- /src/pytorch-project/test/test.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | import pickle 4 | import os 5 | import sys 6 | 7 | 8 | sys.path.append('/home/karim/workspace/vscode/ADNI_Data_processing/src/data_processing/') 9 | 10 | 11 | 12 | file_name = '/home/karim/workspace/ADNI_workspace/results/ADNI_des/F_28P_F10_MS2_MB10D/HIPP/3D/AD-MCI/test/AD/0_HIPP_alz_ADNI_1_test_AD-MCI_002_S_0619_[AD]_fliped.pkl' 13 | 14 | root = '/home/karim/workspace/ADNI_workspace/results/ADNI_des/F_28P_F10_MS2_MB10D/HIPP/3D/MCI-NC/test' 15 | 16 | 17 | 18 | # 1 pickle loader (load one sample) 19 | def pickle_loader(path_file): 20 | dir_name = os.path.dirname(path_file) 21 | with open(path_file, 'rb') as f: 22 | model_adni = pickle.load(f) 23 | return model_adni 24 | 25 | 26 | def find_classes(dir): 27 | if sys.version_info >= (3, 5): 28 | # Faster and available in Python 3.5 and above 29 | classes = [d.name for d in os.scandir(dir) if d.is_dir()] 30 | else: 31 | classes = [d for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d))] 32 | 33 | classes.sort() 34 | class_to_idx = {classes[i]: i for i in range(len(classes))} 35 | return classes, class_to_idx 36 | 37 | 38 | def has_file_allowed_extension(filename, extensions): 39 | return filename.lower().endswith(extensions) 40 | 41 | 42 | 43 | 44 | def make_dataset(dir, class_to_idx, extensions=None, is_valid_file=None): 45 | images = [] 46 | dir = os.path.expanduser(dir) 47 | if not ((extensions is None) ^ (is_valid_file is None)): 48 | raise ValueError("Both extensions and is_valid_file cannot be None or not None at the same time") 49 | if extensions is not None: 50 | def is_valid_file(x): 51 | return has_file_allowed_extension(x, extensions) 52 | for target in sorted(class_to_idx.keys()): 53 | d = os.path.join(dir, target) 54 | if not os.path.isdir(d): 55 | continue 56 | for root, _, fnames in sorted(os.walk(d)): 57 | for fname in sorted(fnames): 58 | path = os.path.join(root, fname) 59 | if is_valid_file(path): 60 | item = (path, class_to_idx[target]) 61 | images.append(item) 62 | 63 | return images 64 | 65 | 66 | 67 | 68 | 69 | c, i = find_classes(root) 70 | 71 | print(c, i) 72 | images = make_dataset(root, i, extensions='.pkl') 73 | 74 | for j in images: 75 | print(j) 76 | 77 | # print(c, i) 78 | 79 | # model = pickle_loader(file_name) 80 | 81 | # print(model.hippMetaDataVector) -------------------------------------------------------------------------------- /src/pytorch-project/test/test2.py: -------------------------------------------------------------------------------- 1 | 2 | import numpy as np 3 | import pickle 4 | import os 5 | import sys 6 | 7 | from PIL import Image 8 | 9 | import torch 10 | import torchvision 11 | from torch.utils.data.dataset import Dataset 12 | from torch.utils.data import Dataset, DataLoader 13 | from torch.utils.data.sampler import BatchSampler 14 | from torchvision.datasets import DatasetFolder 15 | from torchvision import transforms 16 | 17 | 18 | 19 | 20 | 21 | # for pickle load 22 | # sys.path.append('/home/karim/workspace/vscode/ADNI_Data_processing/src/data_processing/') 23 | root_path = '/home/karim/workspace/ADNI_workspace/results/ADNI_des/F_28P_F10_MS2_MB10D/HIPP/3D/AD-NC/' 24 | 25 | file_name = '/home/karim/workspace/ADNI_workspace/results/ADNI_des/F_28P_F10_MS2_MB10D/HIPP/3D/AD-MCI/test/AD/0_HIPP_alz_ADNI_1_test_AD-MCI_002_S_0619_[AD]_fliped.pkl' 26 | 27 | 28 | 29 | 30 | ADNI_MODEL_EXTENSIONS = ('.pkl') 31 | 32 | 33 | 34 | # 1 pickle loader (load one sample) 35 | def pickle_loader(path_file): 36 | dir_name = os.path.dirname(path_file) 37 | with open(path_file, 'rb') as f: 38 | model_adni = pickle.load(f) 39 | return model_adni 40 | 41 | 42 | pickle_loader(file_name) 43 | 44 | # # to check if the file type is allowed 45 | # def has_file_allowed_extension(filename, extensions): 46 | # return filename.lower().endswith(extensions) 47 | 48 | 49 | # def is_image_file(filename): 50 | # return has_file_allowed_extension(filename, ADNI_MODEL_EXTENSIONS) 51 | 52 | # # function 53 | # def make_dataset(dir, class_to_idx, extensions=None, is_valid_file=None): 54 | # images = [] 55 | # dir = os.path.expanduser(dir) 56 | # if not ((extensions is None) ^ (is_valid_file is None)): 57 | # raise ValueError("Both extensions and is_valid_file cannot be None or not None at the same time") 58 | # if extensions is not None: 59 | # def is_valid_file(x): 60 | # return has_file_allowed_extension(x, extensions) 61 | # for target in sorted(class_to_idx.keys()): 62 | # d = os.path.join(dir, target) 63 | # if not os.path.isdir(d): 64 | # continue 65 | # for root, _, fnames in sorted(os.walk(d)): 66 | # for fname in sorted(fnames): 67 | # path = os.path.join(root, fname) 68 | # if is_valid_file(path): 69 | # item = (path, class_to_idx[target]) 70 | # images.append(item) 71 | 72 | # return images 73 | 74 | 75 | # # 2 Class Datafolder 76 | # class Dataset_ADNI_Folder(DatasetFolder): 77 | 78 | # # Methodes 79 | # def __init__(self, root, loader, extensions=None, transform=None, target_transform=None, is_valid_file=None): 80 | 81 | # self.root = root 82 | # classes, class_to_idx = self._find_classes(self.root) 83 | # samples = make_dataset(self.root, class_to_idx, extensions, is_valid_file) 84 | 85 | # if len(samples) == 0: 86 | # raise (RuntimeError("Found 0 files in subfolders of: " + self.root + "\n" 87 | # "Supported extensions are: " + ",".join(extensions))) 88 | 89 | # self.loader = loader 90 | # self.extensions = extensions 91 | # self.classes = classes 92 | # self.class_to_idx = class_to_idx 93 | # self.samples = samples 94 | # self.transform = transforms.Compose([transforms.ToTensor()]) 95 | # self.targets = [s[1] for s in samples] 96 | 97 | # # __getitem__ 98 | # def __getitem__(self, index): 99 | # path, target = self.samples[index] 100 | # sample = self.loader(path) 101 | # # if self.transform is not None: 102 | # # sample = self.transform(sample) 103 | # # if self.target_transform is not None: 104 | # # target = self.target_transform(target) 105 | 106 | # # sample is objet instance from HippModel (L, R, V, Label) 107 | # return (sample.hippLeft, sample.hippRight, sample.hippMetaDataVector, target) 108 | 109 | # # __len__ 110 | # def __len__(self): 111 | # return len(self.samples) 112 | 113 | # # _find_classes 114 | # def _find_classes(self, dir): 115 | # if sys.version_info >= (3, 5): 116 | # # Faster and available in Python 3.5 and above 117 | # classes = [d.name for d in os.scandir(dir) if d.is_dir()] 118 | # else: 119 | # classes = [d for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d))] 120 | 121 | # classes.sort() 122 | # class_to_idx = {classes[i]: i for i in range(len(classes))} 123 | # return classes, class_to_idx 124 | 125 | 126 | 127 | 128 | # # __Main__ 129 | # def main(): 130 | 131 | # # parames 132 | # params_num_workers = 4 133 | # batch_size = 32 134 | 135 | # # 3 dataloader 136 | 137 | # train_data = Dataset_ADNI_Folder(root=root_path + 'train/', loader=pickle_loader, extensions='.pkl', transform=None) 138 | # valid_data = Dataset_ADNI_Folder(root=root_path + 'valid/', loader=pickle_loader, extensions='.pkl', transform=None) 139 | # test_data = Dataset_ADNI_Folder(root=root_path + 'test/' , loader=pickle_loader, extensions='.pkl', transform=None) 140 | 141 | # # # device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") 142 | # # # print("device: {}".format(device)) 143 | 144 | # train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, shuffle=True, num_workers=params_num_workers) 145 | # valid_loader = torch.utils.data.DataLoader(valid_data, batch_size=batch_size, shuffle=True, num_workers=params_num_workers) 146 | # test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size, shuffle=True, num_workers=params_num_workers) 147 | 148 | 149 | # index = 0 150 | # for d1, d2, v, labels in valid_loader: 151 | # # print("key: {} : Left {} : Right {} : Vect {} : label {}".format(index, d1.size(), d2.size(), v, labels.size())) 152 | # print("key: {} - Left {} : Right {} - Vect {} : label {}".format(index, d1.size(), d2.size(), len(v), labels.size())) 153 | # index+= 1 154 | 155 | 156 | # if __name__ == '__main__': 157 | # main() 158 | 159 | 160 | # ############################################## 161 | 162 | 163 | 164 | 165 | # # train_dataset = DatasetTransformer(train_dataset, transforms.ToTensor()) 166 | # # valid_dataset = DatasetTransformer(valid_dataset, transforms.ToTensor()) 167 | # # test_dataset = DatasetTransformer(test_dataset , transforms.ToTensor()) 168 | 169 | 170 | 171 | # # train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=4) 172 | # # valid_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=4) 173 | # # test_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=4) 174 | -------------------------------------------------------------------------------- /src/pytorch-project/tools/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaderghal/ADNI_Data_processing/454462d3913d77e3bc4de2b9725b456301c7b351/src/pytorch-project/tools/__init__.py -------------------------------------------------------------------------------- /src/pytorch-project/tools/dataset.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | import torch 4 | from torch.utils.data import Dataset, DataLoader 5 | 6 | import numpy as np 7 | 8 | 9 | class MyDataset(Dataset): 10 | def __init__(self, data, target, transform=None): 11 | self.data = torch.from_numpy(data).float() 12 | self.target = torch.from_numpy(target).long() 13 | self.transform = transform 14 | 15 | def __getitem__(self, index): 16 | x = self.data[index] 17 | y = self.target[index] 18 | 19 | if self.transform: 20 | x = self.transform(x) 21 | 22 | return x, y 23 | 24 | def __len__(self): 25 | return len(self.data) 26 | 27 | 28 | numpy_data = np.random.randn(100, 3, 24, 24) 29 | numpy_target = np.random.randint(0, 5, size=(100)) 30 | 31 | dataset = MyDataset(numpy_data, numpy_target) 32 | loader = DataLoader( 33 | dataset, 34 | batch_size=10, 35 | shuffle=True, 36 | num_workers=2, 37 | pin_memory=torch.cuda.is_available() 38 | ) 39 | 40 | for batch_idx, (data, target) in enumerate(loader): 41 | print('Batch idx {}, data shape {}, target shape {}'.format( 42 | batch_idx, data.shape, target.shape)) -------------------------------------------------------------------------------- /src/pytorch-project/tools/maap.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | from fastai import basic_train, data_block 3 | import numpy as np 4 | from torch import Tensor 5 | import torch 6 | import torch.optim 7 | import os 8 | import time 9 | 10 | from pathlib import Path 11 | 12 | 13 | class MemMapItemList(data_block.ItemList): 14 | def __init__(self, items, path, data_shape, dtype = np.float32, **kwargs): 15 | super().__init__(items, path, **kwargs) 16 | self.data_shape = data_shape 17 | self.copy_new.append("data_shape") 18 | self._file_process_dict = {} # Deleting this structure might cause grief when the main thread is killing the workers 19 | self._dtype = dtype 20 | 21 | def get(self, i): 22 | pid = os.getpid() 23 | mem_file = self._file_process_dict.get(pid, None) # each process owns its handler. 24 | if mem_file is None: 25 | mem_file = np.memmap(self.path, self._dtype, mode='r+', shape=self.data_shape) 26 | self._file_process_dict[pid] = mem_file 27 | idx = self.items[i] 28 | item_data = np.copy(mem_file[idx, :]) 29 | if self._dtype == np.float32: 30 | item = data_block.FloatItem(item_data) 31 | else: 32 | item = data_block.Category(item_data, item_data) 33 | return item 34 | 35 | def reconstruct(self, t: Tensor, x: Tensor = None): 36 | return data_block.FloatItem(t.cpu().numpy()) 37 | 38 | def labels_from_memmap(self, npy_memfile, data_shape, dtype=np.float32, **kwargs): 39 | y = MemMapItemList(self.items, npy_memfile, data_shape, dtype=dtype) 40 | res = self._label_list(x=self, y=y) 41 | return res 42 | 43 | @classmethod 44 | def from_memfile(cls, path, data_shape): 45 | "Constructs a MemMapItemList from a numpy mem mapped file" 46 | items = np.arange(0, data_shape[0]) 47 | return MemMapItemList(items, path, data_shape) 48 | 49 | 50 | def gen_some_data_for_io(folder, N, lx, ly): 51 | feat = np.random.rand(N, lx) 52 | feat[:, 0] = np.arange(N) 53 | target = np.random.rand(N, ly) 54 | target[:, 0] = np.arange(N) 55 | 56 | fx = folder / "x.npy" 57 | fy = folder / "y.npy" 58 | 59 | npfx = np.memmap(fx, np.float32, "w+", shape=feat.shape) 60 | npfx[:] = feat[:] 61 | npfx.flush() 62 | 63 | npfy = np.memmap(fy, np.float32, "w+", shape=target.shape) 64 | npfy[:] = target[:] 65 | npfy.flush() 66 | 67 | del npfx 68 | del npfy 69 | 70 | 71 | class Validation_Net(nn.Module): 72 | "Dummy learner. It passes the first feature from input to the output" 73 | 74 | def __init__(self, input_size=5, output_size=3): 75 | super().__init__() 76 | self.last = nn.Linear(input_size, output_size) 77 | 78 | def forward(self, x): 79 | out = self.last(x) 80 | out[:, 0] = x[:, 0] 81 | return out 82 | 83 | 84 | class Validation_Loss(torch.nn.Module): 85 | "Just makes sure that the first column from the input is identical with the target" 86 | 87 | def __init__(self): 88 | super().__init__() 89 | 90 | def forward(self, x, y): 91 | diff = x[:, 0] - y[:, 0] 92 | abs_diff = torch.abs(diff) 93 | abs_sum = torch.sum(abs_diff) 94 | if abs_sum > 0.000001: 95 | raise Exception("Input and lables are misalligned. Maybe the batch reading is wrong") 96 | dls = x - y 97 | dls = torch.sum(torch.pow(dls, 2)) 98 | return dls 99 | 100 | 101 | def train_network(folder, N, lx, ly): 102 | train_data_shape = (N, lx) 103 | test_data_shape = (N, ly) 104 | 105 | item_list = MemMapItemList.from_memfile(folder / "x.npy", data_shape=train_data_shape) 106 | splitted = item_list.random_split_by_pct(valid_pct=0.1) 107 | labeled = splitted.labels_from_memmap(folder / "y.npy", data_shape=test_data_shape) 108 | data_bunch = labeled.databunch(bs=512, num_workers=4) # Test few values to see what's best for your hw+data stack 109 | 110 | model = Validation_Net() 111 | learner = basic_train.Learner(data=data_bunch, model=model, true_wd=True, wd=0.0001, 112 | loss_func=Validation_Loss(), path=folder) 113 | 114 | learner.fit(3, lr=0.001) 115 | t0 = time.time() 116 | learner.fit(3, lr=0.001) 117 | t1 = time.time() 118 | print("Time {}".format(t1 - t0)) 119 | 120 | 121 | if __name__ == "__main__": 122 | N = 100000 123 | lx = 5 124 | ly = 3 125 | folder = Path(".") 126 | gen_some_data_for_io(folder, N, lx, ly) 127 | train_network(folder, N, lx, ly) -------------------------------------------------------------------------------- /src/pytorch-template/config/ColorPrompt.py: -------------------------------------------------------------------------------- 1 | #------------------------------------------------------------------------------------------ 2 | # Color Config: For Terminal output color 3 | #------------------------------------------------------------------------------------------ 4 | 5 | 6 | class fg: 7 | BLACK = '\033[30m' 8 | RED = '\033[31m' 9 | GREEN = '\033[32m' 10 | YELLOW = '\033[33m' 11 | BLUE = '\033[34m' 12 | MAGENTA = '\033[35m' 13 | CYAN = '\033[36m' 14 | WHITE = '\033[37m' 15 | RESET = '\033[39m' 16 | 17 | class bg: 18 | BLACK = '\033[40m' 19 | RED = '\033[41m' 20 | GREEN = '\033[42m' 21 | YELLOW = '\033[43m' 22 | BLUE = '\033[44m' 23 | MAGENTA = '\033[45m' 24 | CYAN = '\033[46m' 25 | WHITE = '\033[47m' 26 | RESET = '\033[49m' 27 | 28 | class style: 29 | BRIGHT = '\033[1m' 30 | DIM = '\033[2m' 31 | NORMAL = '\033[22m' 32 | RESET_ALL = '\033[0m' -------------------------------------------------------------------------------- /src/pytorch-template/config/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaderghal/ADNI_Data_processing/454462d3913d77e3bc4de2b9725b456301c7b351/src/pytorch-template/config/__init__.py -------------------------------------------------------------------------------- /src/pytorch-template/config/__pycache__/ColorPrompt.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaderghal/ADNI_Data_processing/454462d3913d77e3bc4de2b9725b456301c7b351/src/pytorch-template/config/__pycache__/ColorPrompt.cpython-36.pyc -------------------------------------------------------------------------------- /src/pytorch-template/config/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaderghal/ADNI_Data_processing/454462d3913d77e3bc4de2b9725b456301c7b351/src/pytorch-template/config/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /src/pytorch-template/data_loader/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaderghal/ADNI_Data_processing/454462d3913d77e3bc4de2b9725b456301c7b351/src/pytorch-template/data_loader/__init__.py -------------------------------------------------------------------------------- /src/pytorch-template/data_loader/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaderghal/ADNI_Data_processing/454462d3913d77e3bc4de2b9725b456301c7b351/src/pytorch-template/data_loader/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /src/pytorch-template/data_loader/__pycache__/data_loader.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaderghal/ADNI_Data_processing/454462d3913d77e3bc4de2b9725b456301c7b351/src/pytorch-template/data_loader/__pycache__/data_loader.cpython-36.pyc -------------------------------------------------------------------------------- /src/pytorch-template/data_loader/data_loader.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import errno 4 | import random 5 | import pickle 6 | import numpy as np 7 | 8 | from PIL import Image 9 | 10 | import torch 11 | import torchvision 12 | from torch.utils.data.dataset import Dataset 13 | from torch.utils.data import Dataset, DataLoader 14 | from torch.utils.data.sampler import BatchSampler 15 | from torchvision.datasets import DatasetFolder 16 | from torchvision import transforms 17 | import torch.nn.functional as F 18 | from torch import nn 19 | from torch import optim 20 | 21 | 22 | 23 | #from torchsummary import summary 24 | import matplotlib.pyplot as plt 25 | 26 | import torch.optim as optim 27 | 28 | 29 | ############################################################################################################### 30 | # server 31 | ############################################################################################################### 32 | sys.path.append('/data/ADERGHAL/code-source/ADNI_Data_processing/src/data_processing/') 33 | root_path = '/data/ADERGHAL/ADNI_workspace/results/ADNI_des/F_28P_F10_MS2_MB10D/HIPP/3D/AD-NC/' 34 | 35 | ############################################################################################################### 36 | # HP computer 37 | ############################################################################################################### 38 | #sys.path.append('/home/karim/workspace/vscode-python/ADNI_Data_processing/src/data_processing') 39 | #root_path = '/home/karim/workspace/ADNI_workspace/results/ADNI_des/F_28P_F100_MS2_MB10D/HIPP/3D/AD-NC/' 40 | 41 | 42 | ADNI_MODEL_EXTENSIONS = ('.pkl') 43 | 44 | # 1 pickle loader (load one sample) 45 | def pickle_loader(path_file): 46 | dir_name = os.path.dirname(path_file) 47 | with open(path_file, 'rb') as f: 48 | model_adni = pickle.load(f) 49 | return model_adni 50 | 51 | # to check if the file type is allowed 52 | def has_file_allowed_extension(filename, extensions): 53 | return filename.lower().endswith(extensions) 54 | 55 | 56 | def is_image_file(filename): 57 | return has_file_allowed_extension(filename, ADNI_MODEL_EXTENSIONS) 58 | 59 | # function 60 | def make_dataset(dir, class_to_idx, extensions=None, is_valid_file=None): 61 | images = [] 62 | dir = os.path.expanduser(dir) 63 | if not ((extensions is None) ^ (is_valid_file is None)): 64 | raise ValueError("Both extensions and is_valid_file cannot be None or not None at the same time") 65 | if extensions is not None: 66 | def is_valid_file(x): 67 | return has_file_allowed_extension(x, extensions) 68 | for target in sorted(class_to_idx.keys()): 69 | d = os.path.join(dir, target) 70 | if not os.path.isdir(d): 71 | continue 72 | for root, _, fnames in sorted(os.walk(d)): 73 | for fname in sorted(fnames): 74 | path = os.path.join(root, fname) 75 | if is_valid_file(path): 76 | item = (path, class_to_idx[target]) 77 | images.append(item) 78 | 79 | return images 80 | 81 | 82 | # 2 Class Datafolder 83 | class Dataset_ADNI_Folder(DatasetFolder): 84 | 85 | # Methodes 86 | def __init__(self, root, loader, extensions=None, transform=None, target_transform=None, is_valid_file=None): 87 | 88 | self.root = root 89 | classes, class_to_idx = self._find_classes(self.root) 90 | samples = make_dataset(self.root, class_to_idx, extensions, is_valid_file) 91 | 92 | if len(samples) == 0: 93 | raise (RuntimeError("Found 0 files in subfolders of: " + self.root + "\n" 94 | "Supported extensions are: " + ",".join(extensions))) 95 | 96 | self.loader = loader 97 | self.extensions = extensions 98 | self.classes = classes 99 | self.class_to_idx = class_to_idx 100 | self.samples = samples 101 | self.transform = transforms.Compose([transforms.ToTensor()]) 102 | self.targets = [s[1] for s in samples] 103 | 104 | # __getitem__ 105 | def __getitem__(self, index): 106 | path, target = self.samples[index] 107 | sample = self.loader(path) 108 | # if self.transform is not None: 109 | # sample = self.transform(sample) 110 | # if self.target_transform is not None: 111 | # target = self.target_transform(target) 112 | 113 | # sample is objet instance from HippModel (L, R, V, Label) 114 | return (sample.hippLeft, sample.hippRight, sample.hippMetaDataVector, target) 115 | 116 | # __len__ 117 | def __len__(self): 118 | return len(self.samples) 119 | 120 | # _find_classes 121 | def _find_classes(self, dir): 122 | if sys.version_info >= (3, 5): 123 | # Faster and available in Python 3.5 and above 124 | classes = [d.name for d in os.scandir(dir) if d.is_dir()] 125 | else: 126 | classes = [d for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d))] 127 | 128 | classes.sort() 129 | class_to_idx = {classes[i]: i for i in range(len(classes))} 130 | return classes, class_to_idx 131 | -------------------------------------------------------------------------------- /src/pytorch-template/old/__main__.py: -------------------------------------------------------------------------------- 1 | 2 | import sys 3 | 4 | import torch 5 | from models.baseline_3D_single import SE_HIPP_3D_Net 6 | from data_loader.data_loader import Dataset_ADNI_Folder 7 | from data_loader.data_loader import pickle_loader 8 | 9 | 10 | from torch import nn 11 | from torch import optim 12 | from torchsummary import summary 13 | 14 | ############################################################################################################### 15 | # server 16 | ############################################################################################################### 17 | sys.path.append('/data/ADERGHAL/code-source/ADNI_Data_processing/src/data_processing/') 18 | root_path = '/data/ADERGHAL/ADNI_workspace/results/ADNI_des/F_28P_F10_MS2_MB10D/HIPP/3D/AD-NC/' 19 | 20 | 21 | 22 | #========================================================================== 23 | # Function: Main definition 24 | #========================================================================== 25 | def main(): 26 | 27 | # parames for data 28 | id_device = 1 29 | params_num_workers = 4 30 | batch_size = 64 31 | num_classes = 2 32 | save_frequency = 2 33 | learning_rate = 0.00001 34 | num_epochs = 500 35 | weight_decay = 0.0001 36 | 37 | train_losses, test_losses = [], [] 38 | running_loss = 0 39 | steps = 0 40 | print_every = 35 # 175/5 41 | 42 | # select device 43 | device = torch.device("cuda:" + str(id_device) if torch.cuda.is_available() else "cpu") # PyTorch v0.4.0 44 | print("using device :", device) 45 | model = SE_HIPP_3D_Net().to(device) 46 | 47 | # weights initialization 48 | # model.apply(weights_init) 49 | 50 | # DataFolder 51 | train_data = Dataset_ADNI_Folder(root=root_path + 'train/', loader=pickle_loader, extensions='.pkl', transform=None) 52 | valid_data = Dataset_ADNI_Folder(root=root_path + 'valid/', loader=pickle_loader, extensions='.pkl', transform=None) 53 | test_data = Dataset_ADNI_Folder(root=root_path + 'test/' , loader=pickle_loader, extensions='.pkl', transform=None) 54 | 55 | 56 | # Dataloader 57 | train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, shuffle=True, num_workers=params_num_workers) 58 | valid_loader = torch.utils.data.DataLoader(valid_data, batch_size=batch_size, shuffle=True, num_workers=params_num_workers) 59 | test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size, shuffle=True, num_workers=params_num_workers) 60 | 61 | 62 | # net = LeNet() 63 | summary(model, (28, 28, 28)) 64 | 65 | criterion = nn.CrossEntropyLoss() 66 | optimizer = optim.SGD(model.parameters(), lr=learning_rate, momentum=0.9) 67 | 68 | #scheduler = optim.lr_scheduler.StepLR(optimizer,step_size=10, gamma=0.1) 69 | 70 | # Train the model 71 | total_step = len(train_loader) 72 | loss_list = [] 73 | acc_list = [] 74 | valid_acc = [] 75 | 76 | running_loss = 0.0 77 | for epoch in range(num_epochs): 78 | for i, (d1, d2, v, labels) in enumerate(train_loader): 79 | 80 | # 81 | steps += 1 82 | 83 | # # forward + backward + optimize 84 | # print("d1 size:", d1.size()) 85 | # d1 = torch.unsqueeze(d1, 1).to(device, dtype=torch.float) 86 | d1 = d1.to(device, dtype=torch.float) 87 | # print("d1 size:", d1.size()) 88 | labels = labels.to(device) 89 | # zero the parameter gradients 90 | optimizer.zero_grad() 91 | 92 | outputs = model(d1) 93 | loss = criterion(outputs, labels) 94 | loss.backward() 95 | optimizer.step() 96 | running_loss += loss.item() 97 | 98 | 99 | # Track the accuracy 100 | total = labels.size(0) 101 | _, predicted = torch.max(outputs.data, 1) 102 | correct = (predicted == labels).sum().item() 103 | 104 | # acc_list.append((correct / total) * 100) 105 | 106 | 107 | if steps % print_every == 0: 108 | acc_list.append((correct / total) * 100) 109 | test_loss = 0 110 | accuracy = 0 111 | model.eval() 112 | with torch.no_grad(): 113 | for i, (v_d1, v_d2, v_v, v_labels) in enumerate(valid_loader): 114 | # v_d1 = torch.unsqueeze(v_d1, 1).to(device, dtype=torch.float) 115 | v_d1 = v_d1.to(device, dtype=torch.float) 116 | v_labels = v_labels.to(device) 117 | v_outputs = model(v_d1) 118 | batch_loss = criterion(v_outputs, v_labels) 119 | test_loss += batch_loss.item() 120 | ps = torch.exp(v_outputs) 121 | top_p, top_class = ps.topk(1, dim=1) 122 | equals = top_class == v_labels.view(*top_class.shape) 123 | accuracy += torch.mean(equals.type(torch.FloatTensor)).item() 124 | 125 | 126 | # train_losses.append(running_loss/len(train_loader)) 127 | train_losses.append(running_loss/print_every) 128 | test_losses.append(test_loss/len(valid_loader)) 129 | 130 | 131 | print(f"Epoch {epoch+1}/{num_epochs}.. " 132 | f"Train loss: {running_loss/print_every:.3f}.. " 133 | f"Train accuracy: {(correct / total) * 100:.3f}.. " 134 | f"Test loss: {test_loss/len(valid_loader):.3f}.. " 135 | f"Test accuracy: {(accuracy/len(valid_loader) * 100):.3f}") 136 | 137 | valid_acc.append((accuracy/len(valid_loader) * 100)) 138 | 139 | running_loss = 0 140 | model.train() 141 | 142 | # scheduler.step() 143 | 144 | 145 | 146 | plt.plot(acc_list, label='Training accu') 147 | plt.plot(valid_acc, label='Validation accu') 148 | 149 | plt.legend(frameon=False) 150 | plt.show() 151 | 152 | 153 | plt.plot(train_losses, label='Training loss') 154 | plt.plot(test_losses, label='Validation loss') 155 | plt.legend(frameon=False) 156 | plt.show() 157 | 158 | 159 | 160 | print('Finished Training') 161 | 162 | 163 | 164 | #========================================================================== 165 | # Start : __Main__ 166 | #========================================================================== 167 | if __name__ == '__main__': 168 | main() -------------------------------------------------------------------------------- /src/pytorch-template/old/baseline/Network_num_1.py: -------------------------------------------------------------------------------- 1 | 2 | import torch.nn as nn 3 | from torch import optim 4 | from torch.utils.data import Dataset, DataLoader 5 | from torch.utils.data.dataset import Dataset 6 | from torch.utils.data.sampler import BatchSampler 7 | from torchsummary import summary 8 | from torchvision import transforms 9 | from torchvision.datasets import DatasetFolder 10 | import errno 11 | import numpy as np 12 | import os 13 | import pickle 14 | import random 15 | import sys 16 | import torch 17 | import torch.nn.functional as F 18 | import torchvision 19 | import matplotlib.pyplot as plt 20 | import math 21 | 22 | 23 | 24 | 25 | ratio = 3 # reduction ratio for SE 26 | 27 | 28 | ############################################################################################################### 29 | # server 30 | ############################################################################################################### 31 | sys.path.append('/data/ADERGHAL/code-source/ADNI_Data_processing/src/data_processing/') 32 | root_path = '/data/ADERGHAL/ADNI_workspace/results/ADNI_des/F_28P_F10_MS2_MB05D/HIPP/3D/AD-NC/' 33 | 34 | ############################################################################################################### 35 | # HP computer 36 | ############################################################################################################### 37 | #sys.path.append('/home/karim/workspace/vscode-python/ADNI_Data_processing/src/data_processing') 38 | #root_path = '/home/karim/workspace/ADNI_workspace/results/ADNI_des/F_28P_F100_MS2_MB10D/HIPP/3D/AD-NC/' 39 | 40 | 41 | ADNI_MODEL_EXTENSIONS = ('.pkl') 42 | 43 | # 1 pickle loader (load one sample) 44 | def pickle_loader(path_file): 45 | dir_name = os.path.dirname(path_file) 46 | with open(path_file, 'rb') as f: 47 | model_adni = pickle.load(f) 48 | return model_adni 49 | 50 | # to check if the file type is allowed 51 | def has_file_allowed_extension(filename, extensions): 52 | return filename.lower().endswith(extensions) 53 | 54 | 55 | def is_image_file(filename): 56 | return has_file_allowed_extension(filename, ADNI_MODEL_EXTENSIONS) 57 | 58 | # function 59 | def make_dataset(dir, class_to_idx, extensions=None, is_valid_file=None): 60 | images = [] 61 | dir = os.path.expanduser(dir) 62 | if not ((extensions is None) ^ (is_valid_file is None)): 63 | raise ValueError("Both extensions and is_valid_file cannot be None or not None at the same time") 64 | if extensions is not None: 65 | def is_valid_file(x): 66 | return has_file_allowed_extension(x, extensions) 67 | for target in sorted(class_to_idx.keys()): 68 | d = os.path.join(dir, target) 69 | if not os.path.isdir(d): 70 | continue 71 | for root, _, fnames in sorted(os.walk(d)): 72 | for fname in sorted(fnames): 73 | path = os.path.join(root, fname) 74 | if is_valid_file(path): 75 | item = (path, class_to_idx[target]) 76 | images.append(item) 77 | 78 | return images 79 | 80 | 81 | # 2 Class Datafolder 82 | class Dataset_ADNI_Folder(DatasetFolder): 83 | 84 | # Methodes 85 | def __init__(self, root, loader, extensions=None, transform=None, target_transform=None, is_valid_file=None): 86 | 87 | self.root = root 88 | classes, class_to_idx = self._find_classes(self.root) 89 | samples = make_dataset(self.root, class_to_idx, extensions, is_valid_file) 90 | 91 | if len(samples) == 0: 92 | raise (RuntimeError("Found 0 files in subfolders of: " + self.root + "\n" 93 | "Supported extensions are: " + ",".join(extensions))) 94 | 95 | self.loader = loader 96 | self.extensions = extensions 97 | self.classes = classes 98 | self.class_to_idx = class_to_idx 99 | self.samples = samples 100 | self.transform = transforms.Compose([transforms.ToTensor()]) 101 | self.targets = [s[1] for s in samples] 102 | 103 | # __getitem__ 104 | def __getitem__(self, index): 105 | path, target = self.samples[index] 106 | sample = self.loader(path) 107 | # if self.transform is not None: 108 | # sample = self.transform(sample) 109 | # if self.target_transform is not None: 110 | # target = self.target_transform(target) 111 | 112 | # sample is objet instance from HippModel (L, R, V, Label) 113 | return (sample.hippLeft, sample.hippRight, sample.hippMetaDataVector, target) 114 | 115 | # __len__ 116 | def __len__(self): 117 | return len(self.samples) 118 | 119 | # _find_classes 120 | def _find_classes(self, dir): 121 | if sys.version_info >= (3, 5): 122 | # Faster and available in Python 3.5 and above 123 | classes = [d.name for d in os.scandir(dir) if d.is_dir()] 124 | else: 125 | classes = [d for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d))] 126 | 127 | classes.sort() 128 | class_to_idx = {classes[i]: i for i in range(len(classes))} 129 | return classes, class_to_idx 130 | 131 | 132 | 133 | 134 | 135 | #============================================================================== 136 | # Network definition 137 | #============================================================================== 138 | 139 | class Network_Baseline(nn.Module): 140 | def __init__(self): 141 | super(Network_Baseline, self).__init__() 142 | 143 | self.layer1 = nn.Sequential( 144 | nn.Conv2d(28, 16, kernel_size=5, stride=1, padding=1), 145 | nn.BatchNorm2d(16), 146 | nn.ReLU() 147 | ) 148 | 149 | self.layer2 = nn.Sequential( 150 | nn.Conv2d(16, 32, kernel_size=5, stride=1, padding=0), 151 | nn.ReLU() 152 | ) 153 | 154 | self.layer3 = nn.Sequential( 155 | nn.Conv2d(32, 64, kernel_size=4, stride=1, padding=0), 156 | nn.ReLU() 157 | ) 158 | 159 | self.pool1 = nn.Sequential( 160 | nn.MaxPool2d(kernel_size=4, stride=2, padding=0) 161 | ) 162 | 163 | self.fc1 = nn.Linear(64*8*8, 120) 164 | self.dropout = nn.Dropout(0.5) 165 | self.fc2 = nn.Linear(120, 2) 166 | 167 | 168 | 169 | 170 | def forward(self, x): 171 | x = self.layer1(x) 172 | x = self.layer2(x) 173 | x = self.layer3(x) 174 | x = self.pool1(x) 175 | 176 | # print("size", x.size()) 177 | x = x.view(-1, self.num_flat_features(x)) 178 | x = self.dropout(x) 179 | # print("size", x.size()) 180 | x = F.relu(self.fc1(x)) 181 | x = self.fc2(x) 182 | return x 183 | 184 | 185 | def num_flat_features(self, x): 186 | size = x.size()[1:] 187 | num_features = 1 188 | for s in size: 189 | num_features *= s 190 | return num_features 191 | 192 | 193 | 194 | 195 | #========================================================================== 196 | # Function: Main definition 197 | #========================================================================== 198 | def main(): 199 | 200 | # parames for data 201 | id_device = 1 202 | params_num_workers = 4 203 | batch_size = 64 204 | num_classes = 2 205 | save_frequency = 2 206 | learning_rate = 0.0001 207 | num_epochs = 500 208 | weight_decay = 0.0001 209 | momentum = 0.9 210 | train_losses, test_losses = [], [] 211 | running_loss = 0 212 | steps = 0 213 | print_every = 35 # 175/5 214 | 215 | # select device 216 | device = torch.device("cuda:" + str(id_device) if torch.cuda.is_available() else "cpu") # PyTorch v0.4.0 217 | print("using device :", device) 218 | model = Network_Baseline().to(device) 219 | 220 | # weights initialization 221 | # model.apply(weights_init) 222 | 223 | # DataFolder 224 | train_data = Dataset_ADNI_Folder(root=root_path + 'train/', loader=pickle_loader, extensions='.pkl', transform=None) 225 | valid_data = Dataset_ADNI_Folder(root=root_path + 'valid/', loader=pickle_loader, extensions='.pkl', transform=None) 226 | test_data = Dataset_ADNI_Folder(root=root_path + 'test/' , loader=pickle_loader, extensions='.pkl', transform=None) 227 | 228 | 229 | # Dataloader 230 | train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, shuffle=True, num_workers=params_num_workers) 231 | valid_loader = torch.utils.data.DataLoader(valid_data, batch_size=batch_size, shuffle=True, num_workers=params_num_workers) 232 | test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size, shuffle=True, num_workers=params_num_workers) 233 | valid_loader = test_loader 234 | 235 | # net = LeNet() 236 | # summary(model, (28, 28, 28)) 237 | 238 | criterion = nn.CrossEntropyLoss() 239 | optimizer = optim.SGD(model.parameters(), lr=learning_rate, momentum=momentum, weight_decay=weight_decay) 240 | 241 | #scheduler = optim.lr_scheduler.StepLR(optimizer,step_size=10, gamma=0.1) 242 | 243 | # Train the model 244 | total_step = len(train_loader) 245 | loss_list = [] 246 | acc_list = [] 247 | valid_acc = [] 248 | 249 | running_loss = 0.0 250 | for epoch in range(num_epochs): 251 | for i, (d1, d2, v, labels) in enumerate(train_loader): 252 | 253 | # 254 | steps += 1 255 | 256 | # # forward + backward + optimize 257 | # print("d1 size:", d1.size()) 258 | # d1 = torch.unsqueeze(d1, 1).to(device, dtype=torch.float) 259 | d1 = d1.to(device, dtype=torch.float) 260 | # print("d1 size:", d1.size()) 261 | labels = labels.to(device) 262 | # zero the parameter gradients 263 | optimizer.zero_grad() 264 | 265 | outputs = model(d1) 266 | loss = criterion(outputs, labels) 267 | loss.backward() 268 | optimizer.step() 269 | running_loss += loss.item() 270 | 271 | 272 | # Track the accuracy 273 | total = labels.size(0) 274 | _, predicted = torch.max(outputs.data, 1) 275 | correct = (predicted == labels).sum().item() 276 | 277 | # acc_list.append((correct / total) * 100) 278 | 279 | 280 | if steps % print_every == 0: 281 | acc_list.append((correct / total) * 100) 282 | test_loss = 0 283 | accuracy = 0 284 | model.eval() 285 | with torch.no_grad(): 286 | for i, (v_d1, v_d2, v_v, v_labels) in enumerate(valid_loader): 287 | # v_d1 = torch.unsqueeze(v_d1, 1).to(device, dtype=torch.float) 288 | v_d1 = v_d1.to(device, dtype=torch.float) 289 | v_labels = v_labels.to(device) 290 | v_outputs = model(v_d1) 291 | batch_loss = criterion(v_outputs, v_labels) 292 | test_loss += batch_loss.item() 293 | ps = torch.exp(v_outputs) 294 | top_p, top_class = ps.topk(1, dim=1) 295 | equals = top_class == v_labels.view(*top_class.shape) 296 | accuracy += torch.mean(equals.type(torch.FloatTensor)).item() 297 | 298 | 299 | # train_losses.append(running_loss/len(train_loader)) 300 | train_losses.append(running_loss/print_every) 301 | test_losses.append(test_loss/len(valid_loader)) 302 | 303 | 304 | print(f"Epoch {epoch+1}/{num_epochs}.. " 305 | f"Train loss: {running_loss/print_every:.3f}.. " 306 | f"Train accuracy: {(correct / total) * 100:.3f}.. " 307 | f"Test loss: {test_loss/len(valid_loader):.3f}.. " 308 | f"Test accuracy: {(accuracy/len(valid_loader) * 100):.3f}") 309 | 310 | valid_acc.append((accuracy/len(valid_loader) * 100)) 311 | 312 | running_loss = 0 313 | model.train() 314 | 315 | # scheduler.step() 316 | 317 | 318 | 319 | plt.plot(acc_list, label='Training accu') 320 | plt.plot(valid_acc, label='Validation accu') 321 | 322 | plt.legend(frameon=False) 323 | plt.show() 324 | 325 | 326 | plt.plot(train_losses, label='Training loss') 327 | plt.plot(test_losses, label='Validation loss') 328 | plt.legend(frameon=False) 329 | plt.show() 330 | 331 | 332 | 333 | print('Finished Training') 334 | 335 | 336 | 337 | #========================================================================== 338 | # Start : __Main__ 339 | #========================================================================== 340 | if __name__ == '__main__': 341 | main() -------------------------------------------------------------------------------- /src/pytorch-template/old/baseline/Network_num_2.py: -------------------------------------------------------------------------------- 1 | 2 | import torch.nn as nn 3 | from torch import optim 4 | from torch.utils.data import Dataset, DataLoader 5 | from torch.utils.data.dataset import Dataset 6 | from torch.utils.data.sampler import BatchSampler 7 | from torchsummary import summary 8 | from torchvision import transforms 9 | from torchvision.datasets import DatasetFolder 10 | import errno 11 | import numpy as np 12 | import os 13 | import pickle 14 | import random 15 | import sys 16 | import torch 17 | import torch.nn.functional as F 18 | import torchvision 19 | import matplotlib.pyplot as plt 20 | import math 21 | 22 | 23 | 24 | 25 | ratio = 3 # reduction ratio for SE 26 | 27 | 28 | ############################################################################################################### 29 | # server 30 | ############################################################################################################### 31 | sys.path.append('/data/ADERGHAL/code-source/ADNI_Data_processing/src/data_processing/') 32 | root_path = '/data/ADERGHAL/ADNI_workspace/results/ADNI_des/F_28P_F10_MS2_MB05D/HIPP/3D/AD-NC/' 33 | 34 | ############################################################################################################### 35 | # HP computer 36 | ############################################################################################################### 37 | #sys.path.append('/home/karim/workspace/vscode-python/ADNI_Data_processing/src/data_processing') 38 | #root_path = '/home/karim/workspace/ADNI_workspace/results/ADNI_des/F_28P_F100_MS2_MB10D/HIPP/3D/AD-NC/' 39 | 40 | 41 | ADNI_MODEL_EXTENSIONS = ('.pkl') 42 | 43 | # 1 pickle loader (load one sample) 44 | def pickle_loader(path_file): 45 | dir_name = os.path.dirname(path_file) 46 | with open(path_file, 'rb') as f: 47 | model_adni = pickle.load(f) 48 | return model_adni 49 | 50 | # to check if the file type is allowed 51 | def has_file_allowed_extension(filename, extensions): 52 | return filename.lower().endswith(extensions) 53 | 54 | 55 | def is_image_file(filename): 56 | return has_file_allowed_extension(filename, ADNI_MODEL_EXTENSIONS) 57 | 58 | # function 59 | def make_dataset(dir, class_to_idx, extensions=None, is_valid_file=None): 60 | images = [] 61 | dir = os.path.expanduser(dir) 62 | if not ((extensions is None) ^ (is_valid_file is None)): 63 | raise ValueError("Both extensions and is_valid_file cannot be None or not None at the same time") 64 | if extensions is not None: 65 | def is_valid_file(x): 66 | return has_file_allowed_extension(x, extensions) 67 | for target in sorted(class_to_idx.keys()): 68 | d = os.path.join(dir, target) 69 | if not os.path.isdir(d): 70 | continue 71 | for root, _, fnames in sorted(os.walk(d)): 72 | for fname in sorted(fnames): 73 | path = os.path.join(root, fname) 74 | if is_valid_file(path): 75 | item = (path, class_to_idx[target]) 76 | images.append(item) 77 | 78 | return images 79 | 80 | 81 | # 2 Class Datafolder 82 | class Dataset_ADNI_Folder(DatasetFolder): 83 | 84 | # Methodes 85 | def __init__(self, root, loader, extensions=None, transform=None, target_transform=None, is_valid_file=None): 86 | 87 | self.root = root 88 | classes, class_to_idx = self._find_classes(self.root) 89 | samples = make_dataset(self.root, class_to_idx, extensions, is_valid_file) 90 | 91 | if len(samples) == 0: 92 | raise (RuntimeError("Found 0 files in subfolders of: " + self.root + "\n" 93 | "Supported extensions are: " + ",".join(extensions))) 94 | 95 | self.loader = loader 96 | self.extensions = extensions 97 | self.classes = classes 98 | self.class_to_idx = class_to_idx 99 | self.samples = samples 100 | self.transform = transforms.Compose([transforms.ToTensor()]) 101 | self.targets = [s[1] for s in samples] 102 | 103 | # __getitem__ 104 | def __getitem__(self, index): 105 | path, target = self.samples[index] 106 | sample = self.loader(path) 107 | # if self.transform is not None: 108 | # sample = self.transform(sample) 109 | # if self.target_transform is not None: 110 | # target = self.target_transform(target) 111 | 112 | # sample is objet instance from HippModel (L, R, V, Label) 113 | return (sample.hippLeft, sample.hippRight, sample.hippMetaDataVector, target) 114 | 115 | # __len__ 116 | def __len__(self): 117 | return len(self.samples) 118 | 119 | # _find_classes 120 | def _find_classes(self, dir): 121 | if sys.version_info >= (3, 5): 122 | # Faster and available in Python 3.5 and above 123 | classes = [d.name for d in os.scandir(dir) if d.is_dir()] 124 | else: 125 | classes = [d for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d))] 126 | 127 | classes.sort() 128 | class_to_idx = {classes[i]: i for i in range(len(classes))} 129 | return classes, class_to_idx 130 | 131 | 132 | 133 | 134 | 135 | #============================================================================== 136 | # Network definition 137 | #============================================================================== 138 | 139 | class Network_Baseline(nn.Module): 140 | def __init__(self): 141 | super(Network_Baseline, self).__init__() 142 | 143 | self.layer1 = nn.Sequential( 144 | nn.Conv2d(28, 32, kernel_size=7, stride=1, padding=0), 145 | nn.BatchNorm2d(32), 146 | nn.ReLU() 147 | ) 148 | 149 | self.layer2 = nn.Sequential( 150 | nn.Conv2d(32, 64, kernel_size=6, stride=1, padding=0), 151 | nn.ReLU() 152 | ) 153 | 154 | self.pool1 = nn.Sequential( 155 | nn.MaxPool2d(kernel_size=2, stride=2, padding=0) 156 | ) 157 | 158 | 159 | self.layer3 = nn.Sequential( 160 | nn.Conv2d(64, 128, kernel_size=2, stride=1, padding=0), 161 | nn.ReLU() 162 | ) 163 | 164 | 165 | 166 | self.fc1 = nn.Linear(128*7*7, 2056) 167 | self.dropout = nn.Dropout(0.5) 168 | self.fc2 = nn.Linear(2056, 2) 169 | 170 | 171 | 172 | 173 | def forward(self, x): 174 | x = self.layer1(x) 175 | x = self.layer2(x) 176 | x = self.pool1(x) 177 | x = self.layer3(x) 178 | 179 | 180 | # print("size", x.size()) 181 | x = x.view(-1, self.num_flat_features(x)) 182 | # x = self.dropout(x) 183 | # print("size", x.size()) 184 | x = F.relu(self.fc1(x)) 185 | x = self.fc2(x) 186 | return x 187 | 188 | 189 | def num_flat_features(self, x): 190 | size = x.size()[1:] 191 | num_features = 1 192 | for s in size: 193 | num_features *= s 194 | return num_features 195 | 196 | 197 | 198 | 199 | #========================================================================== 200 | # Function: Main definition 201 | #========================================================================== 202 | def main(): 203 | 204 | # parames for data 205 | id_device = 1 206 | params_num_workers = 4 207 | batch_size = 64 208 | num_classes = 2 209 | save_frequency = 2 210 | learning_rate = 0.00001 211 | num_epochs = 500 212 | weight_decay = 0.0001 213 | momentum = 0.9 214 | train_losses, test_losses = [], [] 215 | running_loss = 0 216 | steps = 0 217 | print_every = 35 # 175/5 218 | 219 | # select device 220 | device = torch.device("cuda:" + str(id_device) if torch.cuda.is_available() else "cpu") # PyTorch v0.4.0 221 | print("using device :", device) 222 | model = Network_Baseline().to(device) 223 | 224 | # weights initialization 225 | # model.apply(weights_init) 226 | 227 | # DataFolder 228 | train_data = Dataset_ADNI_Folder(root=root_path + 'train/', loader=pickle_loader, extensions='.pkl', transform=None) 229 | valid_data = Dataset_ADNI_Folder(root=root_path + 'valid/', loader=pickle_loader, extensions='.pkl', transform=None) 230 | test_data = Dataset_ADNI_Folder(root=root_path + 'test/' , loader=pickle_loader, extensions='.pkl', transform=None) 231 | 232 | 233 | # Dataloader 234 | train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, shuffle=True, num_workers=params_num_workers) 235 | valid_loader = torch.utils.data.DataLoader(valid_data, batch_size=batch_size, shuffle=True, num_workers=params_num_workers) 236 | test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size, shuffle=True, num_workers=params_num_workers) 237 | valid_loader = test_loader 238 | 239 | # net = LeNet() 240 | # summary(model, (28, 28, 28)) 241 | 242 | criterion = nn.CrossEntropyLoss() 243 | optimizer = optim.SGD(model.parameters(), lr=learning_rate, momentum=momentum, weight_decay=weight_decay) 244 | 245 | #scheduler = optim.lr_scheduler.StepLR(optimizer,step_size=10, gamma=0.1) 246 | 247 | # Train the model 248 | total_step = len(train_loader) 249 | loss_list = [] 250 | acc_list = [] 251 | valid_acc = [] 252 | 253 | running_loss = 0.0 254 | for epoch in range(num_epochs): 255 | for i, (d1, d2, v, labels) in enumerate(train_loader): 256 | 257 | # 258 | steps += 1 259 | 260 | # # forward + backward + optimize 261 | # print("d1 size:", d1.size()) 262 | # d1 = torch.unsqueeze(d1, 1).to(device, dtype=torch.float) 263 | d1 = d1.to(device, dtype=torch.float) 264 | # print("d1 size:", d1.size()) 265 | labels = labels.to(device) 266 | # zero the parameter gradients 267 | optimizer.zero_grad() 268 | 269 | outputs = model(d1) 270 | loss = criterion(outputs, labels) 271 | loss.backward() 272 | optimizer.step() 273 | running_loss += loss.item() 274 | 275 | 276 | # Track the accuracy 277 | total = labels.size(0) 278 | _, predicted = torch.max(outputs.data, 1) 279 | correct = (predicted == labels).sum().item() 280 | 281 | # acc_list.append((correct / total) * 100) 282 | 283 | 284 | if steps % print_every == 0: 285 | acc_list.append((correct / total) * 100) 286 | test_loss = 0 287 | accuracy = 0 288 | model.eval() 289 | with torch.no_grad(): 290 | for i, (v_d1, v_d2, v_v, v_labels) in enumerate(valid_loader): 291 | # v_d1 = torch.unsqueeze(v_d1, 1).to(device, dtype=torch.float) 292 | v_d1 = v_d1.to(device, dtype=torch.float) 293 | v_labels = v_labels.to(device) 294 | v_outputs = model(v_d1) 295 | batch_loss = criterion(v_outputs, v_labels) 296 | test_loss += batch_loss.item() 297 | ps = torch.exp(v_outputs) 298 | top_p, top_class = ps.topk(1, dim=1) 299 | equals = top_class == v_labels.view(*top_class.shape) 300 | accuracy += torch.mean(equals.type(torch.FloatTensor)).item() 301 | 302 | 303 | # train_losses.append(running_loss/len(train_loader)) 304 | train_losses.append(running_loss/print_every) 305 | test_losses.append(test_loss/len(valid_loader)) 306 | 307 | 308 | print(f"Epoch {epoch+1}/{num_epochs}.. " 309 | f"Train loss: {running_loss/print_every:.3f}.. " 310 | f"Train accuracy: {(correct / total) * 100:.3f}.. " 311 | f"Test loss: {test_loss/len(valid_loader):.3f}.. " 312 | f"Test accuracy: {(accuracy/len(valid_loader) * 100):.3f}") 313 | 314 | valid_acc.append((accuracy/len(valid_loader) * 100)) 315 | 316 | running_loss = 0 317 | model.train() 318 | 319 | # scheduler.step() 320 | 321 | 322 | 323 | plt.plot(acc_list, label='Training accu') 324 | plt.plot(valid_acc, label='Validation accu') 325 | 326 | plt.legend(frameon=False) 327 | plt.show() 328 | 329 | 330 | plt.plot(train_losses, label='Training loss') 331 | plt.plot(test_losses, label='Validation loss') 332 | plt.legend(frameon=False) 333 | plt.show() 334 | 335 | 336 | 337 | print('Finished Training') 338 | 339 | 340 | 341 | #========================================================================== 342 | # Start : __Main__ 343 | #========================================================================== 344 | if __name__ == '__main__': 345 | main() -------------------------------------------------------------------------------- /src/pytorch-template/old/check_devices.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | 3 | import math 4 | import torch 5 | import sys 6 | from subprocess import call 7 | import config.ColorPrompt as CP 8 | 9 | 10 | print(CP.style.BRIGHT + CP.fg.BLUE + "\n=============================================================================================") 11 | print("= Start GPU Information ") 12 | print("=============================================================================================\n" + CP.fg.WHITE + CP.style.RESET_ALL ) 13 | 14 | print('Python VERSION:', sys.version) 15 | print('pyTorch VERSION:', torch.__version__) 16 | print('CUDA VERSION') 17 | print('CUDNN VERSION:', torch.backends.cudnn.version()) 18 | print('# Number CUDA Devices:', torch.cuda.device_count()) 19 | print("\n---------------------") 20 | print("| Devices: ") 21 | print("---------------------") 22 | call(["nvidia-smi", "--format=csv", "--query-gpu=index,name,driver_version,memory.total,memory.used,memory.free"]) 23 | print ('Available devices ', torch.cuda.device_count()) 24 | print ('Current cuda device ', torch.cuda.current_device()) 25 | 26 | 27 | # device index 28 | id_device = 1 29 | 30 | print("\n---------------------") 31 | print("| Selected GPU ") 32 | print("---------------------") 33 | # setting device on GPU if available, else CPU 34 | device = torch.device('cuda:' + str(id_device) if torch.cuda.is_available() else 'cpu') 35 | print('Using device:', device) 36 | 37 | 38 | #Additional Info when using cuda 39 | if device.type == 'cuda': 40 | print("Name: ", torch.cuda.get_device_name(id_device)) 41 | print('Memory Usage:') 42 | print('Allocated:', round(torch.cuda.memory_allocated(0)/1024**3,1), 'GB') 43 | print('Cached: ', round(torch.cuda.memory_cached(0)/1024**3,1), 'GB') 44 | 45 | 46 | print(CP.style.BRIGHT + CP.fg.BLUE + "\n=============================================================================================") 47 | print("= END GPU Information ") 48 | print("=============================================================================================\n" + CP.fg.WHITE + CP.style.RESET_ALL ) 49 | 50 | 51 | 52 | -------------------------------------------------------------------------------- /src/pytorch-template/old/models/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaderghal/ADNI_Data_processing/454462d3913d77e3bc4de2b9725b456301c7b351/src/pytorch-template/old/models/__init__.py -------------------------------------------------------------------------------- /src/pytorch-template/old/models/baseline_3D_single.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import errno 4 | import random 5 | import pickle 6 | import numpy as np 7 | 8 | import torch 9 | import torchvision 10 | import torch.nn.functional as F 11 | 12 | from torch.utils.data.dataset import Dataset 13 | from torch.utils.data import Dataset, DataLoader 14 | from torch.utils.data.sampler import BatchSampler 15 | from torchvision.datasets import DatasetFolder 16 | from torchvision import transforms 17 | 18 | from torch import nn 19 | from torch import optim 20 | 21 | 22 | import matplotlib.pyplot as plt 23 | 24 | 25 | 26 | #============================================================================== 27 | # Network definition 28 | #============================================================================== 29 | 30 | class SE_HIPP_3D_Net(nn.Module): 31 | def __init__(self): 32 | super(SE_HIPP_3D_Net, self).__init__() 33 | self.conv1 = nn.Conv2d(28, 32, kernel_size=4, stride=1, padding=1) 34 | self.bn1 = nn.BatchNorm2d(32) 35 | self.relu = nn.ReLU(inplace=True) 36 | self.conv2 = nn.Conv2d(32, 64, kernel_size=2, stride=1, padding=0) 37 | self.bn2 = nn.BatchNorm2d(64) 38 | 39 | self.fc1 = nn.Linear(64*7*7, 120) 40 | self.dropout = nn.Dropout(0.5) 41 | self.fc2 = nn.Linear(120, 2) 42 | 43 | def forward(self, x): 44 | 45 | x = self.conv1(x) 46 | x = F.max_pool2d(x, kernel_size=3, stride=2, padding=0) 47 | x = self.bn1(x) 48 | x = self.relu(x) 49 | 50 | x = self.conv2(x) 51 | x = F.max_pool2d(x, kernel_size=2, stride=2, padding=1) 52 | x = self.bn2(x) 53 | x = self.relu(x) 54 | # print("size", x.size()) 55 | x = x.view(-1, self.num_flat_features(x)) 56 | x = self.dropout(x) 57 | # print("size", x.size()) 58 | x = F.relu(self.fc1(x)) 59 | x = self.fc2(x) 60 | return x 61 | 62 | def num_flat_features(self, x): 63 | size = x.size()[1:] 64 | num_features = 1 65 | for s in size: 66 | num_features *= s 67 | return num_features -------------------------------------------------------------------------------- /src/pytorch-template/old/models/senet/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaderghal/ADNI_Data_processing/454462d3913d77e3bc4de2b9725b456301c7b351/src/pytorch-template/old/models/senet/__init__.py -------------------------------------------------------------------------------- /src/pytorch-template/old/models/senet/senet_block.py: -------------------------------------------------------------------------------- 1 | from torch import nn 2 | 3 | 4 | class SELayer(nn.Module): 5 | def __init__(self, channel, reduction=16): 6 | super(SELayer, self).__init__() 7 | self.avg_pool = nn.AdaptiveAvgPool2d(1) 8 | self.fc = nn.Sequential( 9 | nn.Linear(channel, channel // reduction, bias=False), 10 | nn.ReLU(inplace=True), 11 | nn.Linear(channel // reduction, channel, bias=False), 12 | nn.Sigmoid() 13 | ) 14 | 15 | def forward(self, x): 16 | b, c, _, _ = x.size() 17 | y = self.avg_pool(x).view(b, c) 18 | y = self.fc(y).view(b, c, 1, 1) 19 | return x * y.expand_as(x) -------------------------------------------------------------------------------- /src/pytorch-template/old/senet/3D_SE_net_1.py: -------------------------------------------------------------------------------- 1 | 2 | import torch.nn as nn 3 | from torch import optim 4 | from torch.utils.data import Dataset, DataLoader 5 | from torch.utils.data.dataset import Dataset 6 | from torch.utils.data.sampler import BatchSampler 7 | from torchsummary import summary 8 | from torchvision import transforms 9 | from torchvision.datasets import DatasetFolder 10 | import errno 11 | import numpy as np 12 | import os 13 | import pickle 14 | import random 15 | import sys 16 | import torch 17 | import torch.nn.functional as F 18 | import torchvision 19 | import matplotlib.pyplot as plt 20 | import math 21 | 22 | 23 | 24 | 25 | ratio = 3 # reduction ratio for SE 26 | 27 | 28 | ############################################################################################################### 29 | # server 30 | ############################################################################################################### 31 | sys.path.append('/data/ADERGHAL/code-source/ADNI_Data_processing/src/data_processing/') 32 | root_path = '/data/ADERGHAL/ADNI_workspace/results/ADNI_des/F_28P_F10_MS2_MB05D/HIPP/3D/AD-NC/' 33 | 34 | ############################################################################################################### 35 | # HP computer 36 | ############################################################################################################### 37 | #sys.path.append('/home/karim/workspace/vscode-python/ADNI_Data_processing/src/data_processing') 38 | #root_path = '/home/karim/workspace/ADNI_workspace/results/ADNI_des/F_28P_F100_MS2_MB10D/HIPP/3D/AD-NC/' 39 | 40 | 41 | ADNI_MODEL_EXTENSIONS = ('.pkl') 42 | 43 | # 1 pickle loader (load one sample) 44 | def pickle_loader(path_file): 45 | dir_name = os.path.dirname(path_file) 46 | with open(path_file, 'rb') as f: 47 | model_adni = pickle.load(f) 48 | return model_adni 49 | 50 | # to check if the file type is allowed 51 | def has_file_allowed_extension(filename, extensions): 52 | return filename.lower().endswith(extensions) 53 | 54 | 55 | def is_image_file(filename): 56 | return has_file_allowed_extension(filename, ADNI_MODEL_EXTENSIONS) 57 | 58 | # function 59 | def make_dataset(dir, class_to_idx, extensions=None, is_valid_file=None): 60 | images = [] 61 | dir = os.path.expanduser(dir) 62 | if not ((extensions is None) ^ (is_valid_file is None)): 63 | raise ValueError("Both extensions and is_valid_file cannot be None or not None at the same time") 64 | if extensions is not None: 65 | def is_valid_file(x): 66 | return has_file_allowed_extension(x, extensions) 67 | for target in sorted(class_to_idx.keys()): 68 | d = os.path.join(dir, target) 69 | if not os.path.isdir(d): 70 | continue 71 | for root, _, fnames in sorted(os.walk(d)): 72 | for fname in sorted(fnames): 73 | path = os.path.join(root, fname) 74 | if is_valid_file(path): 75 | item = (path, class_to_idx[target]) 76 | images.append(item) 77 | 78 | return images 79 | 80 | 81 | # 2 Class Datafolder 82 | class Dataset_ADNI_Folder(DatasetFolder): 83 | 84 | # Methodes 85 | def __init__(self, root, loader, extensions=None, transform=None, target_transform=None, is_valid_file=None): 86 | 87 | self.root = root 88 | classes, class_to_idx = self._find_classes(self.root) 89 | samples = make_dataset(self.root, class_to_idx, extensions, is_valid_file) 90 | 91 | if len(samples) == 0: 92 | raise (RuntimeError("Found 0 files in subfolders of: " + self.root + "\n" 93 | "Supported extensions are: " + ",".join(extensions))) 94 | 95 | self.loader = loader 96 | self.extensions = extensions 97 | self.classes = classes 98 | self.class_to_idx = class_to_idx 99 | self.samples = samples 100 | self.transform = transforms.Compose([transforms.ToTensor()]) 101 | self.targets = [s[1] for s in samples] 102 | 103 | # __getitem__ 104 | def __getitem__(self, index): 105 | path, target = self.samples[index] 106 | sample = self.loader(path) 107 | # if self.transform is not None: 108 | # sample = self.transform(sample) 109 | # if self.target_transform is not None: 110 | # target = self.target_transform(target) 111 | 112 | # sample is objet instance from HippModel (L, R, V, Label) 113 | return (sample.hippLeft, sample.hippRight, sample.hippMetaDataVector, target) 114 | 115 | # __len__ 116 | def __len__(self): 117 | return len(self.samples) 118 | 119 | # _find_classes 120 | def _find_classes(self, dir): 121 | if sys.version_info >= (3, 5): 122 | # Faster and available in Python 3.5 and above 123 | classes = [d.name for d in os.scandir(dir) if d.is_dir()] 124 | else: 125 | classes = [d for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d))] 126 | 127 | classes.sort() 128 | class_to_idx = {classes[i]: i for i in range(len(classes))} 129 | return classes, class_to_idx 130 | 131 | 132 | 133 | 134 | 135 | #============================================================================== 136 | # Network definition 137 | #============================================================================== 138 | class SEWrapper(nn.Module): 139 | def __init__(self, channels, ratio=2): 140 | super(SEWrapper, self).__init__() 141 | 142 | self.linear = nn.Sequential(nn.Linear(channels, channels // ratio), 143 | nn.ReLU(), 144 | nn.Linear(channels // ratio, channels), 145 | nn.Sigmoid()) 146 | 147 | def forward(self, input): 148 | 149 | sq = input.mean(-1).mean(-1) 150 | ex = self.linear(sq) 151 | 152 | return input * ex.unsqueeze(-1).unsqueeze(-1) 153 | 154 | 155 | 156 | 157 | 158 | class SeNet3Hipp(nn.Module): 159 | def __init__(self): 160 | super(SeNet3Hipp, self).__init__() 161 | 162 | self.layer1 = nn.Sequential( 163 | nn.Conv2d(28, 32, kernel_size=7, stride=1, padding=0), 164 | SEWrapper(32, ratio), 165 | nn.BatchNorm2d(32), 166 | nn.MaxPool2d(kernel_size=2, stride=2, padding=0), 167 | nn.ReLU()) 168 | 169 | 170 | self.fc1 = nn.Linear(32*11*11, 120) 171 | self.dropout = nn.Dropout(0.5) 172 | self.fc2 = nn.Linear(120, 2) 173 | 174 | 175 | def forward(self, x): 176 | x = self.layer1(x) 177 | # x = self.layer2(x) 178 | # print("size", x.size()) 179 | x = x.view(-1, self.num_flat_features(x)) 180 | x = self.dropout(x) 181 | # print("size", x.size()) 182 | x = F.relu(self.fc1(x)) 183 | x = self.fc2(x) 184 | return x 185 | 186 | 187 | def num_flat_features(self, x): 188 | size = x.size()[1:] 189 | num_features = 1 190 | for s in size: 191 | num_features *= s 192 | return num_features 193 | 194 | 195 | 196 | 197 | #========================================================================== 198 | # Function: Main definition 199 | #========================================================================== 200 | def main(): 201 | 202 | # parames for data 203 | id_device = 1 204 | params_num_workers = 4 205 | batch_size = 64 206 | num_classes = 2 207 | save_frequency = 2 208 | learning_rate = 0.0001 209 | num_epochs = 500 210 | weight_decay = 0.0001 211 | momentum = 0.9 212 | train_losses, test_losses = [], [] 213 | running_loss = 0 214 | steps = 0 215 | print_every = 35 # 175/5 216 | 217 | # select device 218 | device = torch.device("cuda:" + str(id_device) if torch.cuda.is_available() else "cpu") # PyTorch v0.4.0 219 | print("using device :", device) 220 | model = SeNet3Hipp().to(device) 221 | 222 | # weights initialization 223 | # model.apply(weights_init) 224 | 225 | # DataFolder 226 | train_data = Dataset_ADNI_Folder(root=root_path + 'train/', loader=pickle_loader, extensions='.pkl', transform=None) 227 | valid_data = Dataset_ADNI_Folder(root=root_path + 'valid/', loader=pickle_loader, extensions='.pkl', transform=None) 228 | test_data = Dataset_ADNI_Folder(root=root_path + 'test/' , loader=pickle_loader, extensions='.pkl', transform=None) 229 | 230 | 231 | # Dataloader 232 | train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, shuffle=True, num_workers=params_num_workers) 233 | valid_loader = torch.utils.data.DataLoader(valid_data, batch_size=batch_size, shuffle=True, num_workers=params_num_workers) 234 | test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size, shuffle=True, num_workers=params_num_workers) 235 | valid_loader = test_loader 236 | 237 | # net = LeNet() 238 | # summary(model, (28, 28, 28)) 239 | 240 | criterion = nn.CrossEntropyLoss() 241 | # optimizer = optim.SGD(model.parameters(), lr=learning_rate, momentum=momentum, weight_decay=weight_decay) 242 | optimizer = optim.Adam(model.parameters(), lr=learning_rate) 243 | #scheduler = optim.lr_scheduler.StepLR(optimizer,step_size=10, gamma=0.1) 244 | 245 | # Train the model 246 | total_step = len(train_loader) 247 | loss_list = [] 248 | acc_list = [] 249 | valid_acc = [] 250 | 251 | running_loss = 0.0 252 | for epoch in range(num_epochs): 253 | for i, (d1, d2, v, labels) in enumerate(train_loader): 254 | 255 | # 256 | steps += 1 257 | 258 | # # forward + backward + optimize 259 | # print("d1 size:", d1.size()) 260 | # d1 = torch.unsqueeze(d1, 1).to(device, dtype=torch.float) 261 | d1 = d1.to(device, dtype=torch.float) 262 | # print("d1 size:", d1.size()) 263 | labels = labels.to(device) 264 | # zero the parameter gradients 265 | optimizer.zero_grad() 266 | 267 | outputs = model(d1) 268 | loss = criterion(outputs, labels) 269 | loss.backward() 270 | optimizer.step() 271 | running_loss += loss.item() 272 | 273 | 274 | # Track the accuracy 275 | total = labels.size(0) 276 | _, predicted = torch.max(outputs.data, 1) 277 | correct = (predicted == labels).sum().item() 278 | 279 | # acc_list.append((correct / total) * 100) 280 | 281 | 282 | if steps % print_every == 0: 283 | acc_list.append((correct / total) * 100) 284 | test_loss = 0 285 | accuracy = 0 286 | model.eval() 287 | with torch.no_grad(): 288 | for i, (v_d1, v_d2, v_v, v_labels) in enumerate(valid_loader): 289 | # v_d1 = torch.unsqueeze(v_d1, 1).to(device, dtype=torch.float) 290 | v_d1 = v_d1.to(device, dtype=torch.float) 291 | v_labels = v_labels.to(device) 292 | v_outputs = model(v_d1) 293 | batch_loss = criterion(v_outputs, v_labels) 294 | test_loss += batch_loss.item() 295 | ps = torch.exp(v_outputs) 296 | top_p, top_class = ps.topk(1, dim=1) 297 | equals = top_class == v_labels.view(*top_class.shape) 298 | accuracy += torch.mean(equals.type(torch.FloatTensor)).item() 299 | 300 | 301 | # train_losses.append(running_loss/len(train_loader)) 302 | train_losses.append(running_loss/print_every) 303 | test_losses.append(test_loss/len(valid_loader)) 304 | 305 | 306 | print(f"Epoch {epoch+1}/{num_epochs}.. " 307 | f"Train loss: {running_loss/print_every:.3f}.. " 308 | f"Train accuracy: {(correct / total) * 100:.3f}.. " 309 | f"Test loss: {test_loss/len(valid_loader):.3f}.. " 310 | f"Test accuracy: {(accuracy/len(valid_loader) * 100):.3f}") 311 | 312 | valid_acc.append((accuracy/len(valid_loader) * 100)) 313 | 314 | running_loss = 0 315 | model.train() 316 | 317 | # scheduler.step() 318 | 319 | 320 | 321 | plt.plot(acc_list, label='Training accu') 322 | plt.plot(valid_acc, label='Validation accu') 323 | 324 | plt.legend(frameon=False) 325 | plt.show() 326 | 327 | 328 | plt.plot(train_losses, label='Training loss') 329 | plt.plot(test_losses, label='Validation loss') 330 | plt.legend(frameon=False) 331 | plt.show() 332 | 333 | 334 | 335 | print('Finished Training') 336 | 337 | 338 | 339 | #========================================================================== 340 | # Start : __Main__ 341 | #========================================================================== 342 | if __name__ == '__main__': 343 | main() -------------------------------------------------------------------------------- /src/pytorch-template/old/senet/squeeze_and_excitation.py: -------------------------------------------------------------------------------- 1 | 2 | from enum import Enum 3 | import torch 4 | from torch import nn as nn 5 | from torch.nn import functional as F 6 | 7 | 8 | class ChannelSELayer3D(nn.Module): 9 | """ 10 | 3D extension of Squeeze-and-Excitation (SE) block described in: 11 | *Hu et al., Squeeze-and-Excitation Networks, arXiv:1709.01507* 12 | *Zhu et al., AnatomyNet, arXiv:arXiv:1808.05238* 13 | """ 14 | 15 | def __init__(self, num_channels, reduction_ratio=2): 16 | """ 17 | :param num_channels: No of input channels 18 | :param reduction_ratio: By how much should the num_channels should be reduced 19 | """ 20 | super(ChannelSELayer3D, self).__init__() 21 | self.avg_pool = nn.AdaptiveAvgPool3d(1) 22 | num_channels_reduced = num_channels // reduction_ratio 23 | self.reduction_ratio = reduction_ratio 24 | self.fc1 = nn.Linear(num_channels, num_channels_reduced, bias=True) 25 | self.fc2 = nn.Linear(num_channels_reduced, num_channels, bias=True) 26 | self.relu = nn.ReLU() 27 | self.sigmoid = nn.Sigmoid() 28 | 29 | def forward(self, input_tensor): 30 | """ 31 | :param input_tensor: X, shape = (batch_size, num_channels, D, H, W) 32 | :return: output tensor 33 | """ 34 | batch_size, num_channels, D, H, W = input_tensor.size() 35 | # Average along each channel 36 | squeeze_tensor = self.avg_pool(input_tensor) 37 | 38 | # channel excitation 39 | fc_out_1 = self.relu(self.fc1(squeeze_tensor.view(batch_size, num_channels))) 40 | fc_out_2 = self.sigmoid(self.fc2(fc_out_1)) 41 | 42 | output_tensor = torch.mul(input_tensor, fc_out_2.view(batch_size, num_channels, 1, 1, 1)) 43 | 44 | return output_tensor 45 | 46 | 47 | class SpatialSELayer3D(nn.Module): 48 | """ 49 | 3D extension of SE block -- squeezing spatially and exciting channel-wise described in: 50 | *Roy et al., Concurrent Spatial and Channel Squeeze & Excitation in Fully Convolutional Networks, MICCAI 2018* 51 | """ 52 | 53 | def __init__(self, num_channels): 54 | """ 55 | :param num_channels: No of input channels 56 | """ 57 | super(SpatialSELayer3D, self).__init__() 58 | self.conv = nn.Conv3d(num_channels, 1, 1) 59 | self.sigmoid = nn.Sigmoid() 60 | 61 | def forward(self, input_tensor, weights=None): 62 | """ 63 | :param weights: weights for few shot learning 64 | :param input_tensor: X, shape = (batch_size, num_channels, D, H, W) 65 | :return: output_tensor 66 | """ 67 | # channel squeeze 68 | batch_size, channel, D, H, W = input_tensor.size() 69 | 70 | if weights: 71 | weights = weights.view(1, channel, 1, 1) 72 | out = F.conv2d(input_tensor, weights) 73 | else: 74 | out = self.conv(input_tensor) 75 | 76 | squeeze_tensor = self.sigmoid(out) 77 | 78 | # spatial excitation 79 | output_tensor = torch.mul(input_tensor, squeeze_tensor.view(batch_size, 1, D, H, W)) 80 | 81 | return output_tensor 82 | 83 | 84 | class ChannelSpatialSELayer3D(nn.Module): 85 | """ 86 | 3D extension of concurrent spatial and channel squeeze & excitation: 87 | *Roy et al., Concurrent Spatial and Channel Squeeze & Excitation in Fully Convolutional Networks, arXiv:1803.02579* 88 | """ 89 | 90 | def __init__(self, num_channels, reduction_ratio=2): 91 | """ 92 | :param num_channels: No of input channels 93 | :param reduction_ratio: By how much should the num_channels should be reduced 94 | """ 95 | super(ChannelSpatialSELayer3D, self).__init__() 96 | self.cSE = ChannelSELayer3D(num_channels, reduction_ratio) 97 | self.sSE = SpatialSELayer3D(num_channels) 98 | 99 | def forward(self, input_tensor): 100 | """ 101 | :param input_tensor: X, shape = (batch_size, num_channels, D, H, W) 102 | :return: output_tensor 103 | """ 104 | output_tensor = torch.max(self.cSE(input_tensor), self.sSE(input_tensor)) 105 | return output_tensor 106 | 107 | 108 | class ProjectExciteLayer(nn.Module): 109 | """ 110 | Project & Excite Module, specifically designed for 3D inputs 111 | *quote* 112 | """ 113 | 114 | def __init__(self, num_channels, reduction_ratio=2): 115 | """ 116 | :param num_channels: No of input channels 117 | :param reduction_ratio: By how much should the num_channels should be reduced 118 | """ 119 | super(ProjectExciteLayer, self).__init__() 120 | num_channels_reduced = num_channels // reduction_ratio 121 | self.reduction_ratio = reduction_ratio 122 | self.relu = nn.ReLU() 123 | self.conv_c = nn.Conv3d(in_channels=num_channels, out_channels=num_channels_reduced, kernel_size=1, stride=1) 124 | self.conv_cT = nn.Conv3d(in_channels=num_channels_reduced, out_channels=num_channels, kernel_size=1, stride=1) 125 | self.sigmoid = nn.Sigmoid() 126 | 127 | def forward(self, input_tensor): 128 | """ 129 | :param input_tensor: X, shape = (batch_size, num_channels, D, H, W) 130 | :return: output tensor 131 | """ 132 | batch_size, num_channels, D, H, W = input_tensor.size() 133 | 134 | # Project: 135 | # Average along channels and different axes 136 | squeeze_tensor_w = F.adaptive_avg_pool3d(input_tensor, (1, 1, W)) 137 | 138 | squeeze_tensor_h = F.adaptive_avg_pool3d(input_tensor, (1, H, 1)) 139 | 140 | squeeze_tensor_d = F.adaptive_avg_pool3d(input_tensor, (D, 1, 1)) 141 | 142 | # tile tensors to original size and add: 143 | final_squeeze_tensor = sum([squeeze_tensor_w.view(batch_size, num_channels, 1, 1, W), 144 | squeeze_tensor_h.view(batch_size, num_channels, 1, H, 1), 145 | squeeze_tensor_d.view(batch_size, num_channels, D, 1, 1)]) 146 | 147 | # Excitation: 148 | final_squeeze_tensor = self.sigmoid(self.conv_cT(self.relu(self.conv_c(final_squeeze_tensor)))) 149 | output_tensor = torch.mul(input_tensor, final_squeeze_tensor) 150 | 151 | return output_tensor 152 | 153 | 154 | class SELayer3D(Enum): 155 | """ 156 | Enum restricting the type of SE Blockes available. So that type checking can be adding when adding these blocks to 157 | a neural network:: 158 | if self.se_block_type == se.SELayer3D.CSE3D.value: 159 | self.SELayer = se.ChannelSELayer3D(params['num_filters']) 160 | elif self.se_block_type == se.SELayer3D.SSE3D.value: 161 | self.SELayer = se.SpatialSELayer3D(params['num_filters']) 162 | elif self.se_block_type == se.SELayer3D.CSSE3D.value: 163 | self.SELayer = se.ChannelSpatialSELayer3D(params['num_filters']) 164 | elif self.se_block_type == se.SELayer3D.PE.value: 165 | self.SELayer = se.ProjectExcite(params['num_filters') 166 | """ 167 | NONE = 'NONE' 168 | CSE3D = 'CSE3D' 169 | SSE3D = 'SSE3D' 170 | CSSE3D = 'CSSE3D' 171 | PE = 'PE' -------------------------------------------------------------------------------- /src/pytorch-template/tools/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaderghal/ADNI_Data_processing/454462d3913d77e3bc4de2b9725b456301c7b351/src/pytorch-template/tools/__init__.py --------------------------------------------------------------------------------