├── Data Usage Protocol.pdf ├── LICENSE ├── MMPD_Release_Agreement.pdf ├── README.md ├── gif ├── Incandescent_R.gif ├── Incandescent_S.gif ├── Incandescent_T.gif ├── Incandescent_W.gif ├── LED-high_R.gif ├── LED-high_S.gif ├── LED-high_T.gif ├── LED-high_W.gif ├── LED-low_R.gif ├── LED-low_S.gif ├── LED-low_T.gif ├── LED-low_W.gif ├── Nature_R.gif ├── Nature_S.gif ├── Nature_T.gif └── Nature_W.gif ├── meta_label.csv ├── rppg-Toolbox_MMPD ├── README.md ├── config.py ├── configs │ ├── infer_configs │ │ ├── MMPD_UNSUPERVISED.yaml │ │ ├── PURE_MMPD_TSCAN_BASIC.yaml │ │ ├── PURE_UBFC_DEEPPHYS_BASIC.yaml │ │ ├── PURE_UBFC_EFFICIENTPHYS.yaml │ │ ├── PURE_UBFC_PHYSNET_BASIC.yaml │ │ ├── PURE_UBFC_TSCAN_BASIC.yaml │ │ ├── PURE_UNSUPERVISED.yaml │ │ ├── SCAMPS_MMPD_TSCAN_BASIC.yaml │ │ ├── SCAMPS_PURE_DEEPPHYS_BASIC.yaml │ │ ├── SCAMPS_PURE_EFFICIENTPHYS.yaml │ │ ├── SCAMPS_PURE_PHYSNET_BASIC.yaml │ │ ├── SCAMPS_PURE_TSCAN_BASIC.yaml │ │ ├── SCAMPS_UBFC_DEEPPHYS_BASIC.yaml │ │ ├── SCAMPS_UBFC_EFFICIENTPHYS.yaml │ │ ├── SCAMPS_UBFC_PHYSNET_BASIC.yaml │ │ ├── SCAMPS_UBFC_TSCAN_BASIC.yaml │ │ ├── UBFC_MMPD_TSCAN_BASIC.yaml │ │ ├── UBFC_PURE_DEEPPHYS_BASIC.yaml │ │ ├── UBFC_PURE_EFFICIENTPHYS.yaml │ │ ├── UBFC_PURE_PHYSNET_BASIC.yaml │ │ ├── UBFC_PURE_TSCAN_BASIC.yaml │ │ └── UBFC_UNSUPERVISED.yaml │ └── train_configs │ │ ├── MMPD_MMPD_UBFC_TSCAN_BASIC.yaml │ │ ├── PURE_PURE_MMPD_TSCAN_BASIC.yaml │ │ ├── PURE_PURE_UBFC_DEEPPHYS_BASIC.yaml │ │ ├── PURE_PURE_UBFC_EFFICIENTPHYS.yaml │ │ ├── PURE_PURE_UBFC_PHYSNET_BASIC.yaml │ │ ├── PURE_PURE_UBFC_TSCAN_BASIC.yaml │ │ ├── SCAMPS_SCAMPS_MMPD_TSCAN_BASIC.yaml │ │ ├── SCAMPS_SCAMPS_PURE_DEEPPHYS_BASIC.yaml │ │ ├── SCAMPS_SCAMPS_PURE_PHYSNET_BASIC.yaml │ │ ├── SCAMPS_SCAMPS_PURE_TSCAN_BASIC.yaml │ │ ├── SCAMPS_SCAMPS_UBFC_DEEPPHYS_BASIC.yaml │ │ ├── SCAMPS_SCAMPS_UBFC_EFFICIENTPHYS.yaml │ │ ├── SCAMPS_SCAMPS_UBFC_PHYSNET_BASIC.yaml │ │ ├── SCAMPS_SCAMPS_UBFC_TSCAN_BASIC.yaml │ │ ├── UBFC_UBFC_MMPD_TSCAN_BASIC.yaml │ │ ├── UBFC_UBFC_PURE_DEEPPHYS_BASIC.yaml │ │ ├── UBFC_UBFC_PURE_EFFICIENTPHYS.yaml │ │ ├── UBFC_UBFC_PURE_PHYSNET_BASIC.yaml │ │ └── UBFC_UBFC_PURE_TSCAN_BASIC.yaml ├── dataset │ ├── __init__.py │ ├── data_loader │ │ ├── BaseLoader.py │ │ ├── COHFACELoader.py │ │ ├── MMPDLoader.py │ │ ├── PURELoader.py │ │ ├── SCAMPSLoader.py │ │ ├── UBFCLoader.py │ │ └── __init__.py │ └── haarcascade_frontalface_default.xml ├── evaluation │ ├── metrics.py │ └── post_process.py ├── final_model_release │ ├── PURE_DeepPhys.pth │ ├── PURE_EfficientPhys.pth │ ├── PURE_PhysNet_DiffNormalized.pth │ ├── PURE_TSCAN.pth │ ├── SCAMPS_DeepPhys.pth │ ├── SCAMPS_EfficientPhys.pth │ ├── SCAMPS_PhysNet_DiffNormalied.pth │ ├── SCAMPS_TSCAN.pth │ └── UBFC_DeepPhys.pth ├── main.py ├── neural_methods │ ├── __init__.py │ ├── loss │ │ ├── NegPearsonLoss.py │ │ ├── PhysNetNegPearsonLoss.py │ │ └── __init__.py │ ├── model │ │ ├── DeepPhys.py │ │ ├── EfficientPhys.py │ │ ├── PhysNet.py │ │ ├── TS_CAN.py │ │ └── __init__.py │ └── trainer │ │ ├── BaseTrainer.py │ │ ├── DeepPhysTrainer.py │ │ ├── EfficientPhysTrainer.py │ │ ├── PhysnetTrainer.py │ │ ├── TscanTrainer.py │ │ └── __init__.py ├── requirements.txt ├── setup.sh ├── unsupervised_methods │ ├── __init__.py │ ├── methods │ │ ├── CHROME_DEHAAN.py │ │ ├── GREEN.py │ │ ├── ICA_POH.py │ │ ├── LGI.py │ │ ├── PBV.py │ │ └── POS_WANG.py │ ├── unsupervised_predictor.py │ └── utils.py └── wip │ └── label │ ├── COHFACE_Comparison.csv │ ├── COHFACE_Comparison.xlsx │ ├── PURE_Comparison.csv │ ├── PURE_Comparison.xlsx │ ├── UBFC_Comparison.csv │ ├── UBFC_Comparison.xlsx │ ├── __init__.py │ ├── comparision_PURE.csv │ ├── comparison_COHFACE.csv │ ├── comparison_UBFC.csv │ └── read_gt_hr.py ├── size_MMPD.csv └── size_mini_MMPD.csv /Data Usage Protocol.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/THU-CS-PI/MMPD_rPPG_dataset/b88d2e248cf4d0b362c90492d189d7bd68ae85e7/Data Usage Protocol.pdf -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 McJackTang 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /MMPD_Release_Agreement.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/THU-CS-PI/MMPD_rPPG_dataset/b88d2e248cf4d0b362c90492d189d7bd68ae85e7/MMPD_Release_Agreement.pdf -------------------------------------------------------------------------------- /gif/Incandescent_R.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/THU-CS-PI/MMPD_rPPG_dataset/b88d2e248cf4d0b362c90492d189d7bd68ae85e7/gif/Incandescent_R.gif -------------------------------------------------------------------------------- /gif/Incandescent_S.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/THU-CS-PI/MMPD_rPPG_dataset/b88d2e248cf4d0b362c90492d189d7bd68ae85e7/gif/Incandescent_S.gif -------------------------------------------------------------------------------- /gif/Incandescent_T.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/THU-CS-PI/MMPD_rPPG_dataset/b88d2e248cf4d0b362c90492d189d7bd68ae85e7/gif/Incandescent_T.gif -------------------------------------------------------------------------------- /gif/Incandescent_W.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/THU-CS-PI/MMPD_rPPG_dataset/b88d2e248cf4d0b362c90492d189d7bd68ae85e7/gif/Incandescent_W.gif -------------------------------------------------------------------------------- /gif/LED-high_R.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/THU-CS-PI/MMPD_rPPG_dataset/b88d2e248cf4d0b362c90492d189d7bd68ae85e7/gif/LED-high_R.gif -------------------------------------------------------------------------------- /gif/LED-high_S.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/THU-CS-PI/MMPD_rPPG_dataset/b88d2e248cf4d0b362c90492d189d7bd68ae85e7/gif/LED-high_S.gif -------------------------------------------------------------------------------- /gif/LED-high_T.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/THU-CS-PI/MMPD_rPPG_dataset/b88d2e248cf4d0b362c90492d189d7bd68ae85e7/gif/LED-high_T.gif -------------------------------------------------------------------------------- /gif/LED-high_W.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/THU-CS-PI/MMPD_rPPG_dataset/b88d2e248cf4d0b362c90492d189d7bd68ae85e7/gif/LED-high_W.gif -------------------------------------------------------------------------------- /gif/LED-low_R.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/THU-CS-PI/MMPD_rPPG_dataset/b88d2e248cf4d0b362c90492d189d7bd68ae85e7/gif/LED-low_R.gif -------------------------------------------------------------------------------- /gif/LED-low_S.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/THU-CS-PI/MMPD_rPPG_dataset/b88d2e248cf4d0b362c90492d189d7bd68ae85e7/gif/LED-low_S.gif -------------------------------------------------------------------------------- /gif/LED-low_T.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/THU-CS-PI/MMPD_rPPG_dataset/b88d2e248cf4d0b362c90492d189d7bd68ae85e7/gif/LED-low_T.gif -------------------------------------------------------------------------------- /gif/LED-low_W.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/THU-CS-PI/MMPD_rPPG_dataset/b88d2e248cf4d0b362c90492d189d7bd68ae85e7/gif/LED-low_W.gif -------------------------------------------------------------------------------- /gif/Nature_R.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/THU-CS-PI/MMPD_rPPG_dataset/b88d2e248cf4d0b362c90492d189d7bd68ae85e7/gif/Nature_R.gif -------------------------------------------------------------------------------- /gif/Nature_S.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/THU-CS-PI/MMPD_rPPG_dataset/b88d2e248cf4d0b362c90492d189d7bd68ae85e7/gif/Nature_S.gif -------------------------------------------------------------------------------- /gif/Nature_T.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/THU-CS-PI/MMPD_rPPG_dataset/b88d2e248cf4d0b362c90492d189d7bd68ae85e7/gif/Nature_T.gif -------------------------------------------------------------------------------- /gif/Nature_W.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/THU-CS-PI/MMPD_rPPG_dataset/b88d2e248cf4d0b362c90492d189d7bd68ae85e7/gif/Nature_W.gif -------------------------------------------------------------------------------- /rppg-Toolbox_MMPD/configs/infer_configs/MMPD_UNSUPERVISED.yaml: -------------------------------------------------------------------------------- 1 | BASE: [''] 2 | TOOLBOX_MODE: "unsupervised_method" # "train_and_test" or "only_test" or "unsupervised_method" 3 | UNSUPERVISED: 4 | METHOD: ["ICA", "POS", "CHROM", "GREEN", "LGI", "PBV"] 5 | METRICS: ['MAE','RMSE','MAPE','Pearson'] 6 | DATA: 7 | INFO: 8 | LIGHT: [1, 2, 3, 4] # 'LED-low','LED-high','Incandescent','Nature' 9 | MOTION: [1, 2, 3, 4] # 'Stationary', 'Rotation', 'Talking', 'Walking' 10 | EXERCISE: [ 2] # True, False 11 | SKIN_COLOR: [3, 4] # 3, 4, 5, 6 12 | GENDER: [1, 2] # 'male','female' 13 | GLASSER: [1, 2] # True, False 14 | HAIR_COVER: [1, 2] # True, False 15 | MAKEUP: [1, 2] # True, False 16 | FS: 30 17 | DATASET: MMPD 18 | DO_PREPROCESS: False # if first time, should be true 19 | DATA_FORMAT: NDHWC 20 | DATA_PATH: "/data/rPPG_dataset/mat_dataset" # Raw dataset path, need to be updated 21 | CACHED_PATH: "/data/rPPG_dataset/processed_dataset" # Processed dataset save path, need to be updated 22 | EXP_DATA_NAME: "" 23 | BEGIN: 0.0 24 | END: 1.0 25 | PREPROCESS : 26 | DATA_TYPE: ['Raw'] 27 | LABEL_TYPE: Raw 28 | DO_CHUNK: False 29 | CHUNK_LENGTH: 180 30 | DYNAMIC_DETECTION: False 31 | DYNAMIC_DETECTION_FREQUENCY : 180 32 | CROP_FACE: True 33 | LARGE_FACE_BOX: True 34 | LARGE_BOX_COEF: 1.5 35 | H: 72 36 | W: 72 37 | INFERENCE: 38 | EVALUATION_METHOD: "FFT" # "FFT" or "peak detection" 39 | 40 | -------------------------------------------------------------------------------- /rppg-Toolbox_MMPD/configs/infer_configs/PURE_MMPD_TSCAN_BASIC.yaml: -------------------------------------------------------------------------------- 1 | BASE: [''] 2 | TOOLBOX_MODE: "only_test" # "train_and_test" or "only_test" 3 | TEST: 4 | METRICS: ['MAE','RMSE','MAPE','Pearson'] 5 | USE_LAST_EPOCH: True # to use provided validation dataset to find the best epoch, should be false 6 | DATA: 7 | INFO: 8 | LIGHT: [1, 2, 3, 4] # 'LED-low','LED-high','Incandescent','Nature' 9 | MOTION: [1, 2, 3, 4] # 'Stationary', 'Rotation', 'Talking', 'Walking' 10 | EXERCISE: [ 2] # True, False 11 | SKIN_COLOR: [3, 4] # 3, 4, 5, 6 12 | GENDER: [1, 2] # 'male','female' 13 | GLASSER: [1, 2] # True, False 14 | HAIR_COVER: [1, 2] # True, False 15 | MAKEUP: [1, 2] # True, False 16 | FS: 30 17 | DATASET: MMPD 18 | DO_PREPROCESS: False # if first time, should be true 19 | DATA_FORMAT: NDCHW 20 | DATA_PATH: "/data/rPPG_dataset/mat_dataset" # Raw dataset path, need to be updated 21 | CACHED_PATH: "/data/rPPG_dataset/processed_dataset" # Processed dataset save path, need to be updated 22 | EXP_DATA_NAME: "" 23 | BEGIN: 0.0 24 | END: 1.0 25 | PREPROCESS: 26 | DATA_TYPE: [ 'DiffNormalized','Standardized' ] 27 | LABEL_TYPE: DiffNormalized 28 | DO_CHUNK: True 29 | CHUNK_LENGTH: 180 30 | DYNAMIC_DETECTION: False 31 | DYNAMIC_DETECTION_FREQUENCY : 180 32 | CROP_FACE: True 33 | LARGE_FACE_BOX: True 34 | LARGE_BOX_COEF: 1.5 35 | H: 72 36 | W: 72 37 | DEVICE: cuda:0 38 | NUM_OF_GPU_TRAIN: 1 39 | LOG: 40 | PATH: runs/exp 41 | MODEL: 42 | DROP_RATE: 0.2 43 | NAME: Tscan 44 | TSCAN: 45 | FRAME_DEPTH: 10 46 | INFERENCE: 47 | BATCH_SIZE: 4 48 | EVALUATION_METHOD: "FFT" # "FFT" or "peak detection" 49 | MODEL_PATH: "/data/rPPG-Toolbox_MMPD/final_model_release/PURE_TSCAN.pth" 50 | -------------------------------------------------------------------------------- /rppg-Toolbox_MMPD/configs/infer_configs/PURE_UBFC_DEEPPHYS_BASIC.yaml: -------------------------------------------------------------------------------- 1 | BASE: [''] 2 | TOOLBOX_MODE: "only_test" # "train_and_test" or "only_test" 3 | TEST: 4 | METRICS: ['MAE','RMSE','MAPE','Pearson'] 5 | USE_LAST_EPOCH: True 6 | DATA: 7 | FS: 30 8 | DATASET: UBFC 9 | DO_PREPROCESS: False # if first time, should be true 10 | DATA_FORMAT: NDCHW 11 | DATA_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/UBFC/RawData" # need to be updated 12 | CACHED_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/rppg_toolbox/PreprocessedData" # Processed dataset save path, need to be updated 13 | EXP_DATA_NAME: "" 14 | BEGIN: 0.0 15 | END: 1.0 16 | PREPROCESS: 17 | DATA_TYPE: [ 'DiffNormalized','Standardized' ] 18 | LABEL_TYPE: DiffNormalized 19 | DO_CHUNK: True 20 | CHUNK_LENGTH: 180 21 | DYNAMIC_DETECTION: False 22 | DYNAMIC_DETECTION_FREQUENCY : 180 23 | CROP_FACE: True 24 | LARGE_FACE_BOX: True 25 | LARGE_BOX_COEF: 1.5 26 | H: 72 27 | W: 72 28 | DEVICE: cuda:0 29 | NUM_OF_GPU_TRAIN: 1 30 | LOG: 31 | PATH: runs/exp 32 | MODEL: 33 | DROP_RATE: 0.2 34 | NAME: DeepPhys 35 | INFERENCE: 36 | BATCH_SIZE: 4 37 | EVALUATION_METHOD: FFT # "FFT" or "peak detection" 38 | MODEL_PATH: "final_model_release/PURE_DeepPhys.pth" 39 | 40 | -------------------------------------------------------------------------------- /rppg-Toolbox_MMPD/configs/infer_configs/PURE_UBFC_EFFICIENTPHYS.yaml: -------------------------------------------------------------------------------- 1 | BASE: [''] 2 | TOOLBOX_MODE: "only_test" # "train_and_test" or "only_test" 3 | TEST: 4 | METRICS: ['MAE','RMSE','MAPE','Pearson'] 5 | USE_LAST_EPOCH: True 6 | DATA: 7 | FS: 30 8 | DATASET: UBFC 9 | DO_PREPROCESS: False # if first time, should be true 10 | DATA_FORMAT: NDCHW 11 | DATA_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/UBFC/RawData" # Raw dataset path, need to be updated 12 | CACHED_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/rppg_toolbox/PreprocessedData" # Processed dataset save path, need to be updated 13 | EXP_DATA_NAME: "" 14 | BEGIN: 0.0 15 | END: 1.0 16 | PREPROCESS: 17 | DATA_TYPE: ['Standardized' ] 18 | LABEL_TYPE: DiffNormalized 19 | DO_CHUNK: True 20 | CHUNK_LENGTH: 180 21 | DYNAMIC_DETECTION: False 22 | DYNAMIC_DETECTION_FREQUENCY : 180 23 | CROP_FACE: True 24 | LARGE_FACE_BOX: True 25 | LARGE_BOX_COEF: 1.5 26 | H: 72 27 | W: 72 28 | DEVICE: cuda:0 29 | NUM_OF_GPU_TRAIN: 1 30 | LOG: 31 | PATH: runs/exp 32 | MODEL: 33 | DROP_RATE: 0.2 34 | NAME: EfficientPhys 35 | EFFICIENTPHYS: 36 | FRAME_DEPTH: 10 37 | INFERENCE: 38 | BATCH_SIZE: 4 39 | EVALUATION_METHOD: 'FFT' # "FFT" or "peak detection" 40 | MODEL_PATH: "./final_model_release/PURE_EfficientPhys.pth" 41 | -------------------------------------------------------------------------------- /rppg-Toolbox_MMPD/configs/infer_configs/PURE_UBFC_PHYSNET_BASIC.yaml: -------------------------------------------------------------------------------- 1 | BASE: [''] 2 | TOOLBOX_MODE: "only_test" # "train_and_test" or "only_test" 3 | TEST: 4 | METRICS: ['MAE','RMSE','MAPE','Pearson'] 5 | USE_LAST_EPOCH: True 6 | DATA: 7 | FS: 30 8 | DATASET: UBFC 9 | DO_PREPROCESS: False # if first time, should be true 10 | DATA_FORMAT: NCDHW 11 | DATA_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/UBFC/RawData" # Raw dataset path, need to be updated 12 | CACHED_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/rppg_toolbox/PreprocessedData" # Processed dataset save path, need to be updated 13 | EXP_DATA_NAME: "" 14 | BEGIN: 0.0 15 | END: 1.0 16 | PREPROCESS : 17 | DATA_TYPE: ['DiffNormalized'] #if use physnet, should be DiffNormalized 18 | LABEL_TYPE: DiffNormalized 19 | DO_CHUNK: True 20 | CHUNK_LENGTH: 128 #only support for factor of 512 21 | DYNAMIC_DETECTION: False 22 | DYNAMIC_DETECTION_FREQUENCY : 128 23 | CROP_FACE: True 24 | LARGE_FACE_BOX: True 25 | LARGE_BOX_COEF: 1.5 26 | H: 72 27 | W: 72 28 | DEVICE: cuda:0 29 | NUM_OF_GPU_TRAIN: 1 30 | LOG: 31 | PATH: runs/exp 32 | MODEL: 33 | DROP_RATE: 0.2 34 | NAME: Physnet 35 | PHYSNET: 36 | FRAME_NUM: 128 37 | INFERENCE: 38 | BATCH_SIZE: 4 39 | EVALUATION_METHOD: FFT # "FFT" or "peak detection" 40 | MODEL_PATH: "./final_model_release/PURE_PhysNet_Normalized.pth" 41 | -------------------------------------------------------------------------------- /rppg-Toolbox_MMPD/configs/infer_configs/PURE_UBFC_TSCAN_BASIC.yaml: -------------------------------------------------------------------------------- 1 | BASE: [''] 2 | TOOLBOX_MODE: "only_test" # "train_and_test" or "only_test" 3 | TEST: 4 | METRICS: ['MAE','RMSE','MAPE','Pearson'] 5 | USE_LAST_EPOCH: True 6 | DATA: 7 | FS: 30 8 | DATASET: UBFC 9 | DO_PREPROCESS: False # if first time, should be true 10 | DATA_FORMAT: NDCHW 11 | DATA_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/UBFC/RawData" # Raw dataset path, need to be updated 12 | CACHED_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/rppg_toolbox/PreprocessedData" # Processed dataset save path, need to be updated 13 | EXP_DATA_NAME: "" 14 | BEGIN: 0.0 15 | END: 1.0 16 | PREPROCESS: 17 | DATA_TYPE: [ 'DiffNormalized','Standardized' ] 18 | LABEL_TYPE: DiffNormalized 19 | DO_CHUNK: True 20 | CHUNK_LENGTH: 180 21 | DYNAMIC_DETECTION: False 22 | DYNAMIC_DETECTION_FREQUENCY : 180 23 | CROP_FACE: True 24 | LARGE_FACE_BOX: True 25 | LARGE_BOX_COEF: 1.5 26 | H: 72 27 | W: 72 28 | DEVICE: cuda:0 29 | NUM_OF_GPU_TRAIN: 1 30 | LOG: 31 | PATH: runs/exp 32 | MODEL: 33 | DROP_RATE: 0.2 34 | NAME: Tscan 35 | TSCAN: 36 | FRAME_DEPTH: 10 37 | INFERENCE: 38 | BATCH_SIZE: 4 39 | EVALUATION_METHOD: FFT # "FFT" or "peak detection" 40 | MODEL_PATH: "./final_model_release/PURE_TSCAN.pth" 41 | -------------------------------------------------------------------------------- /rppg-Toolbox_MMPD/configs/infer_configs/PURE_UNSUPERVISED.yaml: -------------------------------------------------------------------------------- 1 | BASE: [''] 2 | TOOLBOX_MODE: "unsupervised_method" # "train_and_test" or "only_test" or "unsupervised_method" 3 | UNSUPERVISED: 4 | METHOD: ["ICA", "POS", "CHROM", "GREEN", "LGI", "PBV"] 5 | METRICS: ['MAE','RMSE','MAPE','Pearson'] 6 | DATA: 7 | FS: 30 8 | DATASET: PURE 9 | DO_PREPROCESS: False # if first time, should be true 10 | DATA_FORMAT: NDHWC 11 | DATA_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/PURE/RawData" # Raw dataset path, need to be updated 12 | CACHED_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/rppg_toolbox/PreprocessedData" # Processed dataset save path, need to be updated 13 | EXP_DATA_NAME: "" 14 | BEGIN: 0.0 15 | END: 1.0 16 | PREPROCESS : 17 | DATA_TYPE: ['Raw'] 18 | LABEL_TYPE: Raw 19 | DO_CHUNK: False 20 | CHUNK_LENGTH: 180 21 | DYNAMIC_DETECTION: False 22 | DYNAMIC_DETECTION_FREQUENCY : 180 23 | CROP_FACE: True 24 | LARGE_FACE_BOX: True 25 | LARGE_BOX_COEF: 1.5 26 | H: 72 27 | W: 72 28 | INFERENCE: 29 | EVALUATION_METHOD: "FFT" # "FFT" or "peak detection" 30 | 31 | -------------------------------------------------------------------------------- /rppg-Toolbox_MMPD/configs/infer_configs/SCAMPS_MMPD_TSCAN_BASIC.yaml: -------------------------------------------------------------------------------- 1 | BASE: [''] 2 | TOOLBOX_MODE: "only_test" # "train_and_test" or "only_test" 3 | TEST: 4 | METRICS: ['MAE','RMSE','MAPE','Pearson'] 5 | USE_LAST_EPOCH: True # to use provided validation dataset to find the best epoch, should be false 6 | DATA: 7 | INFO: 8 | LIGHT: [1, 2, 3, 4] # 'LED-low','LED-high','Incandescent','Nature' 9 | MOTION: [1, 2, 3, 4] # 'Stationary', 'Rotation', 'Talking', 'Walking' 10 | EXERCISE: [ 2] # True, False 11 | SKIN_COLOR: [3, 4] # 3, 4, 5, 6 12 | GENDER: [1, 2] # 'male','female' 13 | GLASSER: [1, 2] # True, False 14 | HAIR_COVER: [1, 2] # True, False 15 | MAKEUP: [1, 2] # True, False 16 | FS: 30 17 | DATASET: MMPD 18 | DO_PREPROCESS: False # if first time, should be true 19 | DATA_FORMAT: NDCHW 20 | DATA_PATH: "/data/rPPG_dataset/mat_dataset" # Raw dataset path, need to be updated 21 | CACHED_PATH: "/data/rPPG_dataset/processed_dataset" # Processed dataset save path, need to be updated 22 | EXP_DATA_NAME: "" 23 | BEGIN: 0.0 24 | END: 1.0 25 | PREPROCESS: 26 | DATA_TYPE: [ 'DiffNormalized','Standardized' ] 27 | LABEL_TYPE: DiffNormalized 28 | DO_CHUNK: True 29 | CHUNK_LENGTH: 180 30 | DYNAMIC_DETECTION: False 31 | DYNAMIC_DETECTION_FREQUENCY : 180 32 | CROP_FACE: True 33 | LARGE_FACE_BOX: True 34 | LARGE_BOX_COEF: 1.5 35 | H: 72 36 | W: 72 37 | DEVICE: cuda:0 38 | NUM_OF_GPU_TRAIN: 1 39 | LOG: 40 | PATH: runs/exp 41 | MODEL: 42 | DROP_RATE: 0.2 43 | NAME: Tscan 44 | TSCAN: 45 | FRAME_DEPTH: 10 46 | INFERENCE: 47 | BATCH_SIZE: 4 48 | EVALUATION_METHOD: "FFT" # "FFT" or "peak detection" 49 | MODEL_PATH: "/data/rPPG-Toolbox_MMPD/final_model_release/SCAMPS_TSCAN.pth" 50 | -------------------------------------------------------------------------------- /rppg-Toolbox_MMPD/configs/infer_configs/SCAMPS_PURE_DEEPPHYS_BASIC.yaml: -------------------------------------------------------------------------------- 1 | BASE: [''] 2 | TOOLBOX_MODE: "only_test" # "train_and_test" or "only_test" 3 | TEST: 4 | METRICS: ['MAE','RMSE','MAPE','Pearson'] 5 | USE_LAST_EPOCH: True 6 | DATA: 7 | FS: 30 8 | DATASET: PURE 9 | DO_PREPROCESS: False # if first time, should be true 10 | DATA_FORMAT: NDCHW 11 | DATA_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/PURE/RawData" # Raw dataset path, need to be updated 12 | CACHED_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/rppg_toolbox/PreprocessedData" # Processed dataset save path, need to be updated 13 | EXP_DATA_NAME: "" 14 | BEGIN: 0.0 15 | END: 1.0 16 | PREPROCESS: 17 | DATA_TYPE: [ 'DiffNormalized','Standardized' ] 18 | LABEL_TYPE: DiffNormalized 19 | DO_CHUNK: True 20 | CHUNK_LENGTH: 180 21 | DYNAMIC_DETECTION: False 22 | DYNAMIC_DETECTION_FREQUENCY : 180 23 | CROP_FACE: True 24 | LARGE_FACE_BOX: True 25 | LARGE_BOX_COEF: 1.5 26 | H: 72 27 | W: 72 28 | DEVICE: cuda:0 29 | NUM_OF_GPU_TRAIN: 1 30 | LOG: 31 | PATH: runs/exp 32 | MODEL: 33 | DROP_RATE: 0.2 34 | NAME: DeepPhys 35 | INFERENCE: 36 | BATCH_SIZE: 4 37 | EVALUATION_METHOD: FFT # "FFT" or "peak detection" 38 | MODEL_PATH: "./final_model_release/SCAMPS_DeepPhys.pth" 39 | 40 | -------------------------------------------------------------------------------- /rppg-Toolbox_MMPD/configs/infer_configs/SCAMPS_PURE_EFFICIENTPHYS.yaml: -------------------------------------------------------------------------------- 1 | BASE: [''] 2 | TOOLBOX_MODE: "only_test" # "train_and_test" or "only_test" 3 | TEST: 4 | METRICS: ['MAE','RMSE','MAPE','Pearson'] 5 | USE_LAST_EPOCH: True 6 | DATA: 7 | FS: 30 8 | DATASET: PURE 9 | DO_PREPROCESS: False # if first time, should be true 10 | DATA_FORMAT: NDCHW 11 | DATA_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/PURE/RawData" # Raw dataset path, need to be updated 12 | CACHED_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/rppg_toolbox/PreprocessedData" # Processed dataset save path, need to be updated 13 | EXP_DATA_NAME: "" 14 | BEGIN: 0.0 15 | END: 1.0 16 | PREPROCESS: 17 | DATA_TYPE: ['Standardized'] 18 | LABEL_TYPE: DiffNormalized 19 | DO_CHUNK: True 20 | CHUNK_LENGTH: 180 21 | DYNAMIC_DETECTION: False 22 | DYNAMIC_DETECTION_FREQUENCY : 180 23 | CROP_FACE: True 24 | LARGE_FACE_BOX: True 25 | LARGE_BOX_COEF: 1.5 26 | H: 72 27 | W: 72 28 | DEVICE: cuda:0 29 | NUM_OF_GPU_TRAIN: 1 30 | LOG: 31 | PATH: runs/exp 32 | MODEL: 33 | DROP_RATE: 0.2 34 | NAME: EfficientPhys 35 | EFFICIENTPHYS: 36 | FRAME_DEPTH: 10 37 | INFERENCE: 38 | BATCH_SIZE: 4 39 | EVALUATION_METHOD: FFT # "FFT" or "peak detection" 40 | MODEL_PATH: "./final_model_release/SCAMPS_EfficientPhys.pth" 41 | 42 | -------------------------------------------------------------------------------- /rppg-Toolbox_MMPD/configs/infer_configs/SCAMPS_PURE_PHYSNET_BASIC.yaml: -------------------------------------------------------------------------------- 1 | BASE: [''] 2 | TOOLBOX_MODE: "only_test" # "train_and_test" or "only_test" 3 | TEST: 4 | METRICS: ['MAE','RMSE','MAPE','Pearson'] 5 | USE_LAST_EPOCH: True 6 | DATA: 7 | FS: 30 8 | DATASET: PURE 9 | DO_PREPROCESS: False # if first time, should be true 10 | DATA_FORMAT: NCDHW 11 | DATA_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/PURE/RawData" # Raw dataset path, need to be updated 12 | CACHED_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/rppg_toolbox/PreprocessedData" # Processed dataset save path, need to be updated 13 | EXP_DATA_NAME: "" 14 | BEGIN: 0.0 15 | END: 1.0 16 | PREPROCESS : 17 | DATA_TYPE: ['DiffNormalized'] #if use physnet, should be DiffNormalized 18 | LABEL_TYPE: DiffNormalized 19 | DO_CHUNK: True 20 | CHUNK_LENGTH: 128 #only support for factor of 512 21 | DYNAMIC_DETECTION: False 22 | DYNAMIC_DETECTION_FREQUENCY : 128 23 | CROP_FACE: True 24 | LARGE_FACE_BOX: True 25 | LARGE_BOX_COEF: 1.5 26 | H: 72 27 | W: 72 28 | DEVICE: cuda:0 29 | NUM_OF_GPU_TRAIN: 1 30 | LOG: 31 | PATH: runs/exp 32 | MODEL: 33 | DROP_RATE: 0.2 34 | NAME: Physnet 35 | PHYSNET: 36 | FRAME_NUM: 128 37 | INFERENCE: 38 | BATCH_SIZE: 4 39 | EVALUATION_METHOD: FFT # "FFT" or "peak detection" 40 | MODEL_PATH: "./final_model_release/SCAMPS_PhysNet_Normalied.pth" 41 | 42 | -------------------------------------------------------------------------------- /rppg-Toolbox_MMPD/configs/infer_configs/SCAMPS_PURE_TSCAN_BASIC.yaml: -------------------------------------------------------------------------------- 1 | BASE: [''] 2 | TOOLBOX_MODE: "only_test" # "train_and_test" or "only_test" 3 | TEST: 4 | METRICS: ['MAE','RMSE','MAPE','Pearson'] 5 | USE_LAST_EPOCH: True 6 | DATA: 7 | FS: 30 8 | DATASET: PURE 9 | DO_PREPROCESS: False # if first time, should be true 10 | DATA_FORMAT: NDCHW 11 | DATA_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/PURE/RawData" # Raw dataset path, need to be updated 12 | CACHED_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/rppg_toolbox/PreprocessedData" # Processed dataset save path, need to be updated 13 | EXP_DATA_NAME: "" 14 | BEGIN: 0.0 15 | END: 1.0 16 | PREPROCESS: 17 | DATA_TYPE: [ 'DiffNormalized','Standardized' ] 18 | LABEL_TYPE: DiffNormalized 19 | DO_CHUNK: True 20 | CHUNK_LENGTH: 180 21 | DYNAMIC_DETECTION: False 22 | DYNAMIC_DETECTION_FREQUENCY : 180 23 | CROP_FACE: True 24 | LARGE_FACE_BOX: True 25 | LARGE_BOX_COEF: 1.5 26 | H: 72 27 | W: 72 28 | DEVICE: cuda:0 29 | NUM_OF_GPU_TRAIN: 1 30 | LOG: 31 | PATH: runs/exp 32 | MODEL: 33 | DROP_RATE: 0.2 34 | NAME: Tscan 35 | TSCAN: 36 | FRAME_DEPTH: 10 37 | INFERENCE: 38 | BATCH_SIZE: 4 39 | EVALUATION_METHOD: FFT # "FFT" or "peak detection" 40 | MODEL_PATH: "./final_model_release/SCAMPS_TSCAN.pth" 41 | 42 | -------------------------------------------------------------------------------- /rppg-Toolbox_MMPD/configs/infer_configs/SCAMPS_UBFC_DEEPPHYS_BASIC.yaml: -------------------------------------------------------------------------------- 1 | BASE: [''] 2 | TOOLBOX_MODE: "only_test" # "train_and_test" or "only_test" 3 | TEST: 4 | METRICS: ['MAE','RMSE','MAPE','Pearson'] 5 | USE_LAST_EPOCH: True 6 | DATA: 7 | FS: 30 8 | DATASET: UBFC 9 | DO_PREPROCESS: False # if first time, should be true 10 | DATA_FORMAT: NDCHW 11 | DATA_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/UBFC/RawData" # Raw dataset path, need to be updated 12 | CACHED_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/rppg_toolbox/PreprocessedData" # Processed dataset save path, need to be updated 13 | EXP_DATA_NAME: "" 14 | BEGIN: 0.0 15 | END: 1.0 16 | PREPROCESS: 17 | DATA_TYPE: [ 'DiffNormalized','Standardized' ] 18 | LABEL_TYPE: DiffNormalized 19 | DO_CHUNK: True 20 | CHUNK_LENGTH: 180 21 | DYNAMIC_DETECTION: False 22 | DYNAMIC_DETECTION_FREQUENCY : 180 23 | CROP_FACE: True 24 | LARGE_FACE_BOX: True 25 | LARGE_BOX_COEF: 1.5 26 | H: 72 27 | W: 72 28 | DEVICE: cuda:0 29 | NUM_OF_GPU_TRAIN: 1 30 | LOG: 31 | PATH: runs/exp 32 | MODEL: 33 | DROP_RATE: 0.2 34 | NAME: DeepPhys 35 | INFERENCE: 36 | BATCH_SIZE: 4 37 | EVALUATION_METHOD: FFT # "FFT" or "peak detection" 38 | MODEL_PATH: "./final_model_release/SCAMPS_DeepPhys.pth" 39 | 40 | -------------------------------------------------------------------------------- /rppg-Toolbox_MMPD/configs/infer_configs/SCAMPS_UBFC_EFFICIENTPHYS.yaml: -------------------------------------------------------------------------------- 1 | BASE: [''] 2 | TOOLBOX_MODE: "only_test" # "train_and_test" or "only_test" 3 | TEST: 4 | METRICS: ['MAE','RMSE','MAPE','Pearson'] 5 | USE_LAST_EPOCH: True 6 | DATA: 7 | FS: 30 8 | DATASET: UBFC 9 | DO_PREPROCESS: False # if first time, should be true 10 | DATA_FO 11 | DATA_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/UBFC/RawData" # Raw dataset path, need to be updated 12 | CACHED_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/rppg_toolbox/PreprocessedData" # Processed dataset save path, need to be updated 13 | EXP_DATA_NAME: "" 14 | BEGIN: 0.0 15 | END: 1.0 16 | PREPROCESS: 17 | DATA_TYPE: ['Standardized'] 18 | LABEL_TYPE: DiffNormalized 19 | DO_CHUNK: True 20 | CHUNK_LENGTH: 180 21 | DYNAMIC_DETECTION: False 22 | DYNAMIC_DETECTION_FREQUENCY : 180 23 | CROP_FACE: True 24 | LARGE_FACE_BOX: True 25 | LARGE_BOX_COEF: 1.5 26 | H: 72 27 | W: 72 28 | DEVICE: cuda:0 29 | NUM_OF_GPU_TRAIN: 1 30 | LOG: 31 | PATH: runs/exp 32 | MODEL: 33 | DROP_RATE: 0.2 34 | NAME: EfficientPhys 35 | EFFICIENTPHYS: 36 | FRAME_DEPTH: 10 37 | INFERENCE: 38 | BATCH_SIZE: 4 39 | EVALUATION_METHOD: FFT # "FFT" or "peak detection" 40 | MODEL_PATH: "./final_model_release/SCAMPS_EfficientPhys.pth" 41 | 42 | -------------------------------------------------------------------------------- /rppg-Toolbox_MMPD/configs/infer_configs/SCAMPS_UBFC_PHYSNET_BASIC.yaml: -------------------------------------------------------------------------------- 1 | BASE: [''] 2 | TOOLBOX_MODE: "only_test" # "train_and_test" or "only_test" 3 | TEST: 4 | METRICS: ['MAE','RMSE','MAPE','Pearson'] 5 | USE_LAST_EPOCH: True 6 | DATA: 7 | FS: 30 8 | DATASET: UBFC 9 | DO_PREPROCESS: False # if first time, should be true 10 | DATA_FORMAT: NCDHW 11 | DATA_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/UBFC/RawData" # Raw dataset path, need to be updated 12 | CACHED_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/rppg_toolbox/PreprocessedData" # Processed dataset save path, need to be updated 13 | EXP_DATA_NAME: "" 14 | BEGIN: 0.0 15 | END: 1.0 16 | PREPROCESS : 17 | DATA_TYPE: ['DiffNormalized'] #if use physnet, should be DiffNormalized 18 | LABEL_TYPE: DiffNormalized 19 | DO_CHUNK: True 20 | CHUNK_LENGTH: 128 #only support for factor of 512 21 | DYNAMIC_DETECTION: False 22 | DYNAMIC_DETECTION_FREQUENCY : 128 23 | CROP_FACE: True 24 | LARGE_FACE_BOX: True 25 | LARGE_BOX_COEF: 1.5 26 | H: 72 27 | W: 72 28 | DEVICE: cuda:0 29 | NUM_OF_GPU_TRAIN: 1 30 | LOG: 31 | PATH: runs/exp 32 | MODEL: 33 | DROP_RATE: 0.2 34 | NAME: Physnet 35 | PHYSNET: 36 | FRAME_NUM: 128 37 | INFERENCE: 38 | BATCH_SIZE: 4 39 | EVALUATION_METHOD: FFT # "FFT" or "peak detection" 40 | MODEL_PATH: "./final_model_release/SCAMPS_PhysNet_Normalied.pth" 41 | -------------------------------------------------------------------------------- /rppg-Toolbox_MMPD/configs/infer_configs/SCAMPS_UBFC_TSCAN_BASIC.yaml: -------------------------------------------------------------------------------- 1 | BASE: [''] 2 | TOOLBOX_MODE: "only_test" # "train_and_test" or "only_test" 3 | TEST: 4 | METRICS: ['MAE','RMSE','MAPE','Pearson'] 5 | USE_LAST_EPOCH: True 6 | DATA: 7 | FS: 30 8 | DATASET: UBFC 9 | DO_PREPROCESS: False # if first time, should be true 10 | DATA_FORMAT: NDCHW 11 | DATA_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/UBFC/RawData" # Raw dataset path, need to be updated 12 | CACHED_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/rppg_toolbox/PreprocessedData" # Processed dataset save path, need to be updated 13 | EXP_DATA_NAME: "" 14 | BEGIN: 0.0 15 | END: 1.0 16 | PREPROCESS: 17 | DATA_TYPE: [ 'DiffNormalized','Standardized' ] 18 | LABEL_TYPE: DiffNormalized 19 | DO_CHUNK: True 20 | CHUNK_LENGTH: 180 21 | DYNAMIC_DETECTION: False 22 | DYNAMIC_DETECTION_FREQUENCY : 180 23 | CROP_FACE: True 24 | LARGE_FACE_BOX: True 25 | LARGE_BOX_COEF: 1.5 26 | H: 72 27 | W: 72 28 | DEVICE: cuda:0 29 | NUM_OF_GPU_TRAIN: 1 30 | LOG: 31 | PATH: runs/exp 32 | MODEL: 33 | DROP_RATE: 0.2 34 | NAME: Tscan 35 | TSCAN: 36 | FRAME_DEPTH: 10 37 | INFERENCE: 38 | BATCH_SIZE: 4 39 | EVALUATION_METHOD: FFT # "FFT" or "peak detection" 40 | MODEL_PATH: "./final_model_release/SCAMPS_TSCAN.pth" 41 | 42 | -------------------------------------------------------------------------------- /rppg-Toolbox_MMPD/configs/infer_configs/UBFC_MMPD_TSCAN_BASIC.yaml: -------------------------------------------------------------------------------- 1 | BASE: [''] 2 | TOOLBOX_MODE: "only_test" # "train_and_test" or "only_test" 3 | TEST: 4 | METRICS: ['MAE','RMSE','MAPE','Pearson'] 5 | USE_LAST_EPOCH: True # to use provided validation dataset to find the best epoch, should be false 6 | DATA: 7 | INFO: 8 | LIGHT: [1, 2, 3, 4] # 'LED-low','LED-high','Incandescent','Nature' 9 | MOTION: [1, 2, 3, 4] # 'Stationary', 'Rotation', 'Talking', 'Walking' 10 | EXERCISE: [ 2] # True, False 11 | SKIN_COLOR: [3, 4] # 3, 4, 5, 6 12 | GENDER: [1, 2] # 'male','female' 13 | GLASSER: [1, 2] # True, False 14 | HAIR_COVER: [1, 2] # True, False 15 | MAKEUP: [1, 2] # True, False 16 | FS: 30 17 | DATASET: MMPD 18 | DO_PREPROCESS: False # if first time, should be true 19 | DATA_FORMAT: NDCHW 20 | DATA_PATH: "/data/rPPG_dataset/mat_dataset" # Raw dataset path, need to be updated 21 | CACHED_PATH: "/data/rPPG_dataset/processed_dataset" # Processed dataset save path, need to be updated 22 | EXP_DATA_NAME: "" 23 | BEGIN: 0.0 24 | END: 1.0 25 | PREPROCESS: 26 | DATA_TYPE: [ 'DiffNormalized','Standardized' ] 27 | LABEL_TYPE: DiffNormalized 28 | DO_CHUNK: True 29 | CHUNK_LENGTH: 180 30 | DYNAMIC_DETECTION: False 31 | DYNAMIC_DETECTION_FREQUENCY : 180 32 | CROP_FACE: True 33 | LARGE_FACE_BOX: True 34 | LARGE_BOX_COEF: 1.5 35 | H: 72 36 | W: 72 37 | DEVICE: cuda:0 38 | NUM_OF_GPU_TRAIN: 1 39 | LOG: 40 | PATH: runs/exp 41 | MODEL: 42 | DROP_RATE: 0.2 43 | NAME: Tscan 44 | TSCAN: 45 | FRAME_DEPTH: 10 46 | INFERENCE: 47 | BATCH_SIZE: 4 48 | EVALUATION_METHOD: "FFT" # "FFT" or "peak detection" 49 | MODEL_PATH: "/data/rPPG-Toolbox_MMPD/final_model_release/UBFC_TSCAN.pth" 50 | -------------------------------------------------------------------------------- /rppg-Toolbox_MMPD/configs/infer_configs/UBFC_PURE_DEEPPHYS_BASIC.yaml: -------------------------------------------------------------------------------- 1 | BASE: [''] 2 | TOOLBOX_MODE: "only_test" # "train_and_test" or "only_test" 3 | TEST: 4 | METRICS: ['MAE','RMSE','MAPE','Pearson'] 5 | USE_LAST_EPOCH: True 6 | DATA: 7 | FS: 30 8 | DATASET: PURE 9 | DO_PREPROCESS: False # if first time, should be true 10 | DATA_FORMAT: NDCHW 11 | DATA_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/PURE/RawData" # Raw dataset path, need to be updated 12 | CACHED_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/rppg_toolbox/PreprocessedData" # Processed dataset save path, need to be updated 13 | EXP_DATA_NAME: "" 14 | BEGIN: 0.0 15 | END: 1.0 16 | PREPROCESS: 17 | DATA_TYPE: [ 'DiffNormalized','Standardized' ] 18 | LABEL_TYPE: DiffNormalized 19 | DO_CHUNK: True 20 | CHUNK_LENGTH: 180 21 | DYNAMIC_DETECTION: False 22 | DYNAMIC_DETECTION_FREQUENCY : 180 23 | CROP_FACE: True 24 | LARGE_FACE_BOX: True 25 | LARGE_BOX_COEF: 1.5 26 | H: 72 27 | W: 72 28 | DEVICE: cuda:0 29 | NUM_OF_GPU_TRAIN: 1 30 | LOG: 31 | PATH: runs/exp 32 | MODEL: 33 | DROP_RATE: 0.2 34 | NAME: DeepPhys 35 | INFERENCE: 36 | BATCH_SIZE: 4 37 | EVALUATION_METHOD: FFT # "FFT" or "peak detection" 38 | MODEL_PATH: "./final_model_release/UBFC_DeepPhys.pth" 39 | 40 | -------------------------------------------------------------------------------- /rppg-Toolbox_MMPD/configs/infer_configs/UBFC_PURE_EFFICIENTPHYS.yaml: -------------------------------------------------------------------------------- 1 | BASE: [''] 2 | TOOLBOX_MODE: "only_test" # "train_and_test" or "only_test" 3 | TEST: 4 | METRICS: ['MAE','RMSE','MAPE','Pearson'] 5 | USE_LAST_EPOCH: True 6 | DATA: 7 | FS: 30 8 | DATASET: PURE 9 | DO_PREPROCESS: False # if first time, should be true 10 | DATA_FORMAT: NDCHW 11 | DATA_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/PURE/RawData" # Raw dataset path, need to be updated 12 | CACHED_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/rppg_toolbox/PreprocessedData" # Processed dataset save path, need to be updated 13 | EXP_DATA_NAME: "" 14 | BEGIN: 0.0 15 | END: 1.0 16 | PREPROCESS: 17 | DATA_TYPE: ['Standardized'] 18 | LABEL_TYPE: DiffNormalized 19 | DO_CHUNK: True 20 | CHUNK_LENGTH: 180 21 | DYNAMIC_DETECTION: False 22 | DYNAMIC_DETECTION_FREQUENCY : 180 23 | CROP_FACE: True 24 | LARGE_FACE_BOX: True 25 | LARGE_BOX_COEF: 1.5 26 | H: 72 27 | W: 72 28 | DEVICE: cuda:0 29 | NUM_OF_GPU_TRAIN: 1 30 | LOG: 31 | PATH: runs/exp 32 | MODEL: 33 | DROP_RATE: 0.2 34 | NAME: EfficientPhys 35 | EFFICIENTPHYS: 36 | FRAME_DEPTH: 10 37 | INFERENCE: 38 | BATCH_SIZE: 4 39 | EVALUATION_METHOD: FFT # "FFT" or "peak detection" 40 | MODEL_PATH: "./final_model_release/UBFC_EfficientPhys.pth" 41 | 42 | -------------------------------------------------------------------------------- /rppg-Toolbox_MMPD/configs/infer_configs/UBFC_PURE_PHYSNET_BASIC.yaml: -------------------------------------------------------------------------------- 1 | BASE: [''] 2 | TOOLBOX_MODE: "only_test" # "train_and_test" or "only_test" 3 | TEST: 4 | METRICS: ['MAE','RMSE','MAPE','Pearson'] 5 | USE_LAST_EPOCH: True 6 | DATA: 7 | FS: 30 8 | DATASET: PURE 9 | DO_PREPROCESS: False # if first time, should be true 10 | DATA_FORMAT: NCDHW 11 | DATA_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/PURE/RawData" # Raw dataset path, need to be updated 12 | CACHED_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/rppg_toolbox/PreprocessedData" # Processed dataset save path, need to be updated 13 | EXP_DATA_NAME: "" 14 | BEGIN: 0.0 15 | END: 1.0 16 | PREPROCESS : 17 | DATA_TYPE: ['DiffNormalized'] #if use physnet, should be DiffNormalized 18 | LABEL_TYPE: DiffNormalized 19 | DO_CHUNK: True 20 | CHUNK_LENGTH: 128 #only support for factor of 512 21 | DYNAMIC_DETECTION: False 22 | DYNAMIC_DETECTION_FREQUENCY : 128 23 | CROP_FACE: True 24 | LARGE_FACE_BOX: True 25 | LARGE_BOX_COEF: 1.5 26 | H: 72 27 | W: 72 28 | DEVICE: cuda:0 29 | NUM_OF_GPU_TRAIN: 1 30 | LOG: 31 | PATH: runs/exp 32 | MODEL: 33 | DROP_RATE: 0.2 34 | NAME: Physnet 35 | PHYSNET: 36 | FRAME_NUM: 128 37 | INFERENCE: 38 | BATCH_SIZE: 4 39 | EVALUATION_METHOD: FFT # "FFT" or "peak detection" 40 | MODEL_PATH: "./final_model_release/UBFC_PhysNet_Normalized.pth" 41 | -------------------------------------------------------------------------------- /rppg-Toolbox_MMPD/configs/infer_configs/UBFC_PURE_TSCAN_BASIC.yaml: -------------------------------------------------------------------------------- 1 | BASE: [''] 2 | TOOLBOX_MODE: "only_test" # "train_and_test" or "only_test" 3 | TEST: 4 | METRICS: ['MAE','RMSE','MAPE','Pearson'] 5 | USE_LAST_EPOCH: True 6 | DATA: 7 | FS: 30 8 | DATASET: PURE 9 | DO_PREPROCESS: False # if first time, should be true 10 | DATA_FORMAT: NDCHW 11 | DATA_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/PURE/RawData" # Raw dataset path, need to be updated 12 | CACHED_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/rppg_toolbox/PreprocessedData" # Processed dataset save path, need to be updated 13 | EXP_DATA_NAME: "" 14 | BEGIN: 0.0 15 | END: 1.0 16 | PREPROCESS: 17 | DATA_TYPE: [ 'DiffNormalized','Standardized' ] 18 | LABEL_TYPE: DiffNormalized 19 | DO_CHUNK: True 20 | CHUNK_LENGTH: 180 21 | DYNAMIC_DETECTION: False 22 | DYNAMIC_DETECTION_FREQUENCY : 180 23 | CROP_FACE: True 24 | LARGE_FACE_BOX: True 25 | LARGE_BOX_COEF: 1.5 26 | H: 72 27 | W: 72 28 | DEVICE: cuda:0 29 | NUM_OF_GPU_TRAIN: 1 30 | LOG: 31 | PATH: runs/exp 32 | MODEL: 33 | DROP_RATE: 0.2 34 | NAME: Tscan 35 | TSCAN: 36 | FRAME_DEPTH: 10 37 | INFERENCE: 38 | BATCH_SIZE: 4 39 | EVALUATION_METHOD: FFT # "FFT" or "peak detection" 40 | MODEL_PATH: "./final_model_release/UBFC_TSCAN.pth" 41 | 42 | -------------------------------------------------------------------------------- /rppg-Toolbox_MMPD/configs/infer_configs/UBFC_UNSUPERVISED.yaml: -------------------------------------------------------------------------------- 1 | BASE: [''] 2 | TOOLBOX_MODE: "unsupervised_method" # "train_and_test" or "only_test" or "unsupervised_method" 3 | UNSUPERVISED: 4 | METHOD: ["ICA", "POS", "CHROM", "GREEN", "LGI", "PBV"] 5 | METRICS: ['MAE','RMSE','MAPE','Pearson'] 6 | DATA: 7 | FS: 30 8 | DATASET: UBFC 9 | DO_PREPROCESS: False # if first time, should be true 10 | DATA_FORMAT: NDHWC 11 | DATA_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/UBFC/RawData" # Raw dataset path, need to be updated 12 | CACHED_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/rppg_toolbox/PreprocessedData" # Processed dataset save path, need to be updated 13 | EXP_DATA_NAME: "" 14 | BEGIN: 0.0 15 | END: 1.0 16 | PREPROCESS : 17 | DATA_TYPE: ['Raw'] 18 | LABEL_TYPE: Raw 19 | DO_CHUNK: False 20 | CHUNK_LENGTH: 180 21 | DYNAMIC_DETECTION: False 22 | DYNAMIC_DETECTION_FREQUENCY : 180 23 | CROP_FACE: True 24 | LARGE_FACE_BOX: True 25 | LARGE_BOX_COEF: 1.5 26 | H: 72 27 | W: 72 28 | INFERENCE: 29 | EVALUATION_METHOD: "FFT" # "FFT" or "peak detection" 30 | 31 | -------------------------------------------------------------------------------- /rppg-Toolbox_MMPD/configs/train_configs/MMPD_MMPD_UBFC_TSCAN_BASIC.yaml: -------------------------------------------------------------------------------- 1 | BASE: [''] 2 | TOOLBOX_MODE: "train_and_test" # "train_and_test" or "only_test" 3 | TRAIN: 4 | BATCH_SIZE: 4 5 | EPOCHS: 20 6 | LR: 9e-3 7 | MODEL_FILE_NAME: MMPD_MMPD_UBFC_tscan 8 | DATA: 9 | INFO: 10 | LIGHT: [1,2,3,4] # 'LED-low','LED-high','Incandescent','Nature' 11 | MOTION: [1,2,3] # 'Stationary', 'Rotation', 'Talking', 'Walking' 12 | EXERCISE: [2] # True, False 13 | SKIN_COLOR: [3] # 3, 4, 5, 6 14 | GENDER: [1, 2] # 'male','female' 15 | GLASSER: [1, 2] # True, False 16 | HAIR_COVER: [1, 2] # True, False 17 | MAKEUP: [1, 2] # True, False 18 | FS: 30 19 | DATASET: MMPD 20 | DO_PREPROCESS: True # if first time, should be true 21 | DATA_FORMAT: NDCHW 22 | DATA_PATH: "/data/rPPG_dataset/MMPD" # Raw dataset path, need to be updated 23 | CACHED_PATH: "/data/rPPG_dataset/processed_dataset" # Processed dataset save path, need to be updated 24 | EXP_DATA_NAME: "" 25 | BEGIN: 0.0 26 | END: 0.8 27 | PREPROCESS: 28 | DATA_TYPE: [ 'DiffNormalized','Standardized' ] 29 | LABEL_TYPE: DiffNormalized 30 | DO_CHUNK: True 31 | CHUNK_LENGTH: 180 32 | DYNAMIC_DETECTION: False 33 | DYNAMIC_DETECTION_FREQUENCY : 180 34 | CROP_FACE: True 35 | LARGE_FACE_BOX: True 36 | LARGE_BOX_COEF: 1.5 37 | H: 72 38 | W: 72 39 | VALID: 40 | DATA: 41 | INFO: 42 | LIGHT: [1,2,3,4] # 'LED-low','LED-high','Incandescent','Nature' 43 | MOTION: [1,2,3] # 'Stationary', 'Rotation', 'Talking', 'Walking' 44 | EXERCISE: [2] # True, False 45 | SKIN_COLOR: [3] # 3, 4, 5, 6 46 | GENDER: [1, 2] # 'male','female' 47 | GLASSER: [1, 2] # True, False 48 | HAIR_COVER: [1, 2] # True, False 49 | MAKEUP: [1, 2] # True, False 50 | FS: 30 51 | DATASET: MMPD 52 | DO_PREPROCESS: True # if first time, should be true 53 | DATA_FORMAT: NDCHW 54 | DATA_PATH: "/data/rPPG_dataset/MMPD" # Raw dataset path, need to be updated 55 | CACHED_PATH: "/data/rPPG_dataset/processed_dataset" # Processed dataset save path, need to be updated 56 | EXP_DATA_NAME: "" 57 | BEGIN: 0.8 58 | END: 1.0 59 | PREPROCESS: 60 | DATA_TYPE: [ 'DiffNormalized','Standardized' ] 61 | LABEL_TYPE: DiffNormalized 62 | DO_CHUNK: True 63 | CHUNK_LENGTH: 180 64 | DYNAMIC_DETECTION: False 65 | DYNAMIC_DETECTION_FREQUENCY : 180 66 | CROP_FACE: True 67 | LARGE_FACE_BOX: True 68 | LARGE_BOX_COEF: 1.5 69 | H: 72 70 | W: 72 71 | TEST: 72 | METRICS: ['MAE','RMSE','MAPE','Pearson'] 73 | USE_LAST_EPOCH: True # to use provided validation dataset to find the best epoch, should be false 74 | DATA: 75 | FS: 30 76 | DATASET: UBFC 77 | DO_PREPROCESS: False # if first time, should be true 78 | DATA_FORMAT: NDCHW 79 | DATA_PATH: "/data1/acsp/toolbox_data/UBFC/RawData/" # Raw dataset path, need to be updated 80 | CACHED_PATH: "/data/rPPG_dataset/processed_dataset" # Processed dataset save path, need to be updated 81 | EXP_DATA_NAME: "" 82 | BEGIN: 0.0 83 | END: 1.0 84 | PREPROCESS : 85 | DATA_TYPE: [ 'DiffNormalized','Standardized' ] 86 | LABEL_TYPE: DiffNormalized 87 | DO_CHUNK: True 88 | CHUNK_LENGTH: 180 89 | DYNAMIC_DETECTION: False 90 | DYNAMIC_DETECTION_FREQUENCY : 180 91 | CROP_FACE: True 92 | LARGE_FACE_BOX: True 93 | LARGE_BOX_COEF: 1.5 94 | H: 72 95 | W: 72 96 | DEVICE: cuda:0 97 | NUM_OF_GPU_TRAIN: 1 98 | LOG: 99 | PATH: runs/exp 100 | MODEL: 101 | DROP_RATE: 0.2 102 | NAME: Tscan 103 | TSCAN: 104 | FRAME_DEPTH: 10 105 | INFERENCE: 106 | BATCH_SIZE: 4 107 | EVALUATION_METHOD: "FFT" # "FFT" or "peak detection" 108 | MODEL_PATH: "" 109 | -------------------------------------------------------------------------------- /rppg-Toolbox_MMPD/configs/train_configs/PURE_PURE_MMPD_TSCAN_BASIC.yaml: -------------------------------------------------------------------------------- 1 | BASE: [''] 2 | TOOLBOX_MODE: "only_test" # "train_and_test" or "only_test" 3 | TRAIN: 4 | BATCH_SIZE: 4 5 | EPOCHS: 30 6 | LR: 9e-3 7 | MODEL_FILE_NAME: PURE_PURE_MMPD_tscan 8 | DATA: 9 | FS: 30 10 | DATASET: PURE 11 | DO_PREPROCESS: False # if first time, should be true 12 | DATA_FORMAT: NDCHW 13 | DATA_PATH: "/data1/acsp/toolbox_data/PURE/RawData" # Raw dataset path, need to be updated 14 | CACHED_PATH: "/data/rPPG_dataset/processed_dataset_makeup" # Processed dataset save path, need to be updated 15 | EXP_DATA_NAME: "" 16 | BEGIN: 0.0 17 | END: 0.8 18 | PREPROCESS : 19 | DATA_TYPE: ['DiffNormalized','Standardized'] 20 | LABEL_TYPE: DiffNormalized 21 | DO_CHUNK: True 22 | CHUNK_LENGTH: 180 23 | DYNAMIC_DETECTION: False 24 | DYNAMIC_DETECTION_FREQUENCY : 180 25 | CROP_FACE: True 26 | LARGE_FACE_BOX: True 27 | LARGE_BOX_COEF: 1.5 28 | H: 72 29 | W: 72 30 | VALID: 31 | DATA: 32 | FS: 30 33 | DATASET: PURE 34 | DO_PREPROCESS: False # if first time, should be true 35 | DATA_FORMAT: NDCHW 36 | DATA_PATH: "/data1/acsp/toolbox_data/PURE/RawData" # Raw dataset path, need to be updated 37 | CACHED_PATH: "/data/rPPG_dataset/processed_dataset_makeup" # Processed dataset save path, need to be updated 38 | EXP_DATA_NAME: "" 39 | BEGIN: 0.8 40 | END: 1.0 41 | PREPROCESS: 42 | DATA_TYPE: [ 'DiffNormalized','Standardized' ] 43 | LABEL_TYPE: DiffNormalized 44 | DO_CHUNK: True 45 | CHUNK_LENGTH: 180 46 | DYNAMIC_DETECTION: False 47 | DYNAMIC_DETECTION_FREQUENCY : 180 48 | CROP_FACE: True 49 | LARGE_FACE_BOX: True 50 | LARGE_BOX_COEF: 1.5 51 | H: 72 52 | W: 72 53 | TEST: 54 | METRICS: ['MAE','RMSE','MAPE','Pearson'] 55 | USE_LAST_EPOCH: True # to use provided validation dataset to find the best epoch, should be false 56 | DATA: 57 | INFO: 58 | LIGHT: [1, 2, 3] # 'LED-low','LED-high','Incandescent','Nature' 59 | MOTION: [1] # 'Stationary', 'Rotation', 'Talking', 'Walking' 60 | EXERCISE: [2] # True, False 61 | SKIN_COLOR: [3] # 3, 4, 5, 6 62 | GENDER: [1, 2] # 'male','female' 63 | GLASSER: [1, 2] # True, False 64 | HAIR_COVER: [1, 2] # True, False 65 | MAKEUP: [1, 2] # True, False 66 | FS: 30 67 | DATASET: MMPD 68 | DO_PREPROCESS: False # if first time, should be true 69 | DATA_FORMAT: NDCHW 70 | DATA_PATH: "/data/rPPG_dataset/mat_dataset" # Raw dataset path, need to be updated 71 | CACHED_PATH: "/data/rPPG_dataset/processed_dataset" # Processed dataset save path, need to be updated 72 | EXP_DATA_NAME: "" 73 | BEGIN: 0.0 74 | END: 1.0 75 | PREPROCESS: 76 | DATA_TYPE: [ 'DiffNormalized','Standardized' ] 77 | LABEL_TYPE: DiffNormalized 78 | DO_CHUNK: True 79 | CHUNK_LENGTH: 180 80 | DYNAMIC_DETECTION: False 81 | DYNAMIC_DETECTION_FREQUENCY : 180 82 | CROP_FACE: True 83 | LARGE_FACE_BOX: True 84 | LARGE_BOX_COEF: 1.5 85 | H: 72 86 | W: 72 87 | DEVICE: cuda:0 88 | NUM_OF_GPU_TRAIN: 1 89 | LOG: 90 | PATH: runs/exp 91 | MODEL: 92 | DROP_RATE: 0.2 93 | NAME: Tscan 94 | TSCAN: 95 | FRAME_DEPTH: 10 96 | INFERENCE: 97 | BATCH_SIZE: 4 98 | EVALUATION_METHOD: "FFT" # "FFT" or "peak detection" 99 | MODEL_PATH: "/data/rPPG-Toolbox_MMPD/final_model_release/PURE_TSCAN.pth" 100 | -------------------------------------------------------------------------------- /rppg-Toolbox_MMPD/configs/train_configs/PURE_PURE_UBFC_DEEPPHYS_BASIC.yaml: -------------------------------------------------------------------------------- 1 | BASE: [''] 2 | TOOLBOX_MODE: "train_and_test" # "train_and_test" or "only_test" 3 | TRAIN: 4 | BATCH_SIZE: 4 5 | EPOCHS: 30 6 | LR: 9e-3 7 | MODEL_FILE_NAME: PURE_PURE_UBFC_deepphys 8 | DATA: 9 | FS: 30 10 | DATASET: PURE 11 | DO_PREPROCESS: False # if first time, should be true 12 | DATA_FORMAT: NDCHW 13 | DATA_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/PURE/RawData" # Raw dataset path, need to be updated 14 | CACHED_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/rppg_toolbox/PreprocessedData" # Processed dataset save path, need to be updated 15 | EXP_DATA_NAME: "" 16 | BEGIN: 0.0 17 | END: 0.8 18 | PREPROCESS : 19 | DATA_TYPE: ['DiffNormalized','Standardized'] 20 | LABEL_TYPE: DiffNormalized 21 | DO_CHUNK: True 22 | CHUNK_LENGTH: 180 23 | DYNAMIC_DETECTION: False 24 | DYNAMIC_DETECTION_FREQUENCY : 180 25 | CROP_FACE: True 26 | LARGE_FACE_BOX: True 27 | LARGE_BOX_COEF: 1.5 28 | H: 72 29 | W: 72 30 | VALID: 31 | DATA: 32 | FS: 30 33 | DATASET: PURE 34 | DO_PREPROCESS: False # if first time, should be true 35 | DATA_FORMAT: NDCHW 36 | DATA_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/PURE/RawData" # Raw dataset path, need to be updated 37 | CACHED_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/rppg_toolbox/PreprocessedData" # Processed dataset save path, need to be updated 38 | EXP_DATA_NAME: "" 39 | BEGIN: 0.8 40 | END: 1.0 41 | PREPROCESS: 42 | DATA_TYPE: [ 'DiffNormalized','Standardized' ] 43 | LABEL_TYPE: DiffNormalized 44 | DO_CHUNK: True 45 | CHUNK_LENGTH: 180 46 | DYNAMIC_DETECTION: False 47 | DYNAMIC_DETECTION_FREQUENCY : 180 48 | CROP_FACE: True 49 | LARGE_FACE_BOX: True 50 | LARGE_BOX_COEF: 1.5 51 | H: 72 52 | W: 72 53 | TEST: 54 | METRICS: ['MAE','RMSE','MAPE','Pearson'] 55 | USE_LAST_EPOCH: True # to use provided validation dataset to find the best epoch, should be false 56 | DATA: 57 | FS: 30 58 | DATASET: UBFC 59 | DO_PREPROCESS: False # if first time, should be true 60 | DATA_FORMAT: NDCHW 61 | DATA_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/UBFC/RawData" # need to be updated 62 | CACHED_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/rppg_toolbox/PreprocessedData" # Processed dataset save path, need to be updated 63 | EXP_DATA_NAME: "" 64 | BEGIN: 0.0 65 | END: 1.0 66 | PREPROCESS: 67 | DATA_TYPE: [ 'DiffNormalized','Standardized' ] 68 | LABEL_TYPE: DiffNormalized 69 | DO_CHUNK: True 70 | CHUNK_LENGTH: 180 71 | DYNAMIC_DETECTION: False 72 | DYNAMIC_DETECTION_FREQUENCY : 180 73 | CROP_FACE: True 74 | LARGE_FACE_BOX: True 75 | LARGE_BOX_COEF: 1.5 76 | H: 72 77 | W: 72 78 | DEVICE: cuda:0 79 | NUM_OF_GPU_TRAIN: 1 80 | LOG: 81 | PATH: runs/exp 82 | MODEL: 83 | DROP_RATE: 0.2 84 | NAME: DeepPhys 85 | INFERENCE: 86 | BATCH_SIZE: 4 87 | EVALUATION_METHOD: "FFT" # "FFT" or "peak detection" 88 | MODEL_PATH: "" 89 | 90 | -------------------------------------------------------------------------------- /rppg-Toolbox_MMPD/configs/train_configs/PURE_PURE_UBFC_EFFICIENTPHYS.yaml: -------------------------------------------------------------------------------- 1 | BASE: [''] 2 | TOOLBOX_MODE: "train_and_test" # "train_and_test" or "only_test" 3 | TRAIN: 4 | BATCH_SIZE: 4 5 | EPOCHS: 30 6 | LR: 9e-3 7 | MODEL_FILE_NAME: PURE_PURE_UBFC_efficientphys 8 | DATA: 9 | FS: 30 10 | DATASET: PURE 11 | DO_PREPROCESS: False # if first time, should be true 12 | DATA_FORMAT: NDCHW 13 | DATA_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/PURE/RawData" # Raw dataset path, need to be updated 14 | CACHED_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/rppg_toolbox/PreprocessedData" # Processed dataset save path, need to be updated 15 | EXP_DATA_NAME: "" 16 | BEGIN: 0.0 17 | END: 0.8 18 | PREPROCESS : 19 | DATA_TYPE: ['Standardized'] 20 | LABEL_TYPE: DiffNormalized 21 | DO_CHUNK: True 22 | CHUNK_LENGTH: 180 23 | DYNAMIC_DETECTION: False 24 | DYNAMIC_DETECTION_FREQUENCY : 180 25 | CROP_FACE: True 26 | LARGE_FACE_BOX: True 27 | LARGE_BOX_COEF: 1.5 28 | H: 72 29 | W: 72 30 | VALID: 31 | DATA: 32 | FS: 30 33 | DATASET: PURE 34 | DO_PREPROCESS: False # if first time, should be true 35 | DATA_FORMAT: NDCHW 36 | DATA_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/PURE/RawData" # Raw dataset path, need to be updated 37 | CACHED_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/rppg_toolbox/PreprocessedData" # Processed dataset save path, need to be updated 38 | EXP_DATA_NAME: "" 39 | BEGIN: 0.8 40 | END: 1.0 41 | PREPROCESS: 42 | DATA_TYPE: ['Standardized' ] 43 | LABEL_TYPE: DiffNormalized 44 | DO_CHUNK: True 45 | CHUNK_LENGTH: 180 46 | DYNAMIC_DETECTION: False 47 | DYNAMIC_DETECTION_FREQUENCY : 180 48 | CROP_FACE: True 49 | LARGE_FACE_BOX: True 50 | LARGE_BOX_COEF: 1.5 51 | H: 72 52 | W: 72 53 | TEST: 54 | METRICS: ['MAE','RMSE','MAPE','Pearson'] 55 | USE_LAST_EPOCH: True # to use provided validation dataset to find the best epoch, should be false 56 | DATA: 57 | FS: 30 58 | DATASET: UBFC 59 | DO_PREPROCESS: False # if first time, should be true 60 | DATA_FORMAT: NDCHW 61 | DATA_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/UBFC/RawData" # Raw dataset path, need to be updated 62 | CACHED_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/rppg_toolbox/PreprocessedData" # Processed dataset save path, need to be updated 63 | EXP_DATA_NAME: "" 64 | BEGIN: 0.0 65 | END: 1.0 66 | PREPROCESS: 67 | DATA_TYPE: ['Standardized' ] 68 | LABEL_TYPE: DiffNormalized 69 | DO_CHUNK: True 70 | CHUNK_LENGTH: 180 71 | DYNAMIC_DETECTION: False 72 | DYNAMIC_DETECTION_FREQUENCY : 180 73 | CROP_FACE: True 74 | LARGE_FACE_BOX: True 75 | LARGE_BOX_COEF: 1.5 76 | H: 72 77 | W: 72 78 | DEVICE: cuda:0 79 | NUM_OF_GPU_TRAIN: 1 80 | LOG: 81 | PATH: runs/exp 82 | MODEL: 83 | DROP_RATE: 0.2 84 | NAME: EfficientPhys 85 | EFFICIENTPHYS: 86 | FRAME_DEPTH: 10 87 | INFERENCE: 88 | BATCH_SIZE: 4 89 | EVALUATION_METHOD: "FFT" # "FFT" or "peak detection" 90 | MODEL_PATH: "" 91 | -------------------------------------------------------------------------------- /rppg-Toolbox_MMPD/configs/train_configs/PURE_PURE_UBFC_PHYSNET_BASIC.yaml: -------------------------------------------------------------------------------- 1 | BASE: [''] 2 | TOOLBOX_MODE: "train_and_test" # "train_and_test" or "only_test" 3 | TRAIN: 4 | BATCH_SIZE: 4 5 | EPOCHS: 30 6 | LR: 9e-3 7 | MODEL_FILE_NAME: PURE_PURE_UBFC_physnet_diffnormalized 8 | DATA: 9 | FS: 30 10 | DATASET: PURE 11 | DO_PREPROCESS: False # if first time, should be true 12 | DATA_FORMAT: NCDHW 13 | DATA_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/PURE/RawData" # Raw dataset path, need to be updated 14 | CACHED_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/rppg_toolbox/PreprocessedData" # Processed dataset save path, need to be updated 15 | EXP_DATA_NAME: "" 16 | BEGIN: 0.0 17 | END: 0.8 18 | PREPROCESS : 19 | DATA_TYPE: ['DiffNormalized'] #if use physnet, should be DiffNormalized 20 | LABEL_TYPE: DiffNormalized 21 | DO_CHUNK: True 22 | CHUNK_LENGTH: 128 #only support for factor of 512 23 | DYNAMIC_DETECTION: False 24 | DYNAMIC_DETECTION_FREQUENCY : 128 25 | CROP_FACE: True 26 | LARGE_FACE_BOX: True 27 | LARGE_BOX_COEF: 1.5 28 | H: 72 29 | W: 72 30 | VALID: 31 | DATA: 32 | FS: 30 33 | DATASET: PURE 34 | DO_PREPROCESS: False # if first time, should be true 35 | DATA_FORMAT: NCDHW 36 | DATA_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/PURE/RawData" # Raw dataset path, need to be updated 37 | CACHED_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/rppg_toolbox/PreprocessedData" # Processed dataset save path, need to be updated 38 | EXP_DATA_NAME: "" 39 | BEGIN: 0.8 40 | END: 1.0 41 | PREPROCESS : 42 | DATA_TYPE: ['DiffNormalized'] #if use physnet, should be DiffNormalized 43 | LABEL_TYPE: DiffNormalized 44 | DO_CHUNK: True 45 | CHUNK_LENGTH: 128 #only support for factor of 512 46 | DYNAMIC_DETECTION: False 47 | DYNAMIC_DETECTION_FREQUENCY : 128 48 | CROP_FACE: True 49 | LARGE_FACE_BOX: True 50 | LARGE_BOX_COEF: 1.5 51 | H: 72 52 | W: 72 53 | TEST: 54 | METRICS: ['MAE','RMSE','MAPE','Pearson'] 55 | USE_LAST_EPOCH: True # to use provided validation dataset to find the best epoch, should be false 56 | DATA: 57 | FS: 30 58 | DATASET: UBFC 59 | DO_PREPROCESS: False # if first time, should be true 60 | DATA_FORMAT: NCDHW 61 | DATA_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/PURE/RawData" # Raw dataset path, need to be updated 62 | CACHED_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/rppg_toolbox/PreprocessedData" # Processed dataset save path, need to be updated 63 | EXP_DATA_NAME: "" 64 | BEGIN: 0.0 65 | END: 1.0 66 | PREPROCESS : 67 | DATA_TYPE: ['DiffNormalized'] #if use physnet, should be DiffNormalized 68 | LABEL_TYPE: DiffNormalized 69 | DO_CHUNK: True 70 | CHUNK_LENGTH: 128 #only support for factor of 512 71 | DYNAMIC_DETECTION: False 72 | DYNAMIC_DETECTION_FREQUENCY : 128 73 | CROP_FACE: True 74 | LARGE_FACE_BOX: True 75 | LARGE_BOX_COEF: 1.5 76 | H: 72 77 | W: 72 78 | DEVICE: cuda:0 79 | NUM_OF_GPU_TRAIN: 1 80 | LOG: 81 | PATH: runs/exp 82 | MODEL: 83 | DROP_RATE: 0.2 84 | NAME: Physnet 85 | PHYSNET: 86 | FRAME_NUM: 128 87 | INFERENCE: 88 | BATCH_SIZE: 4 89 | EVALUATION_METHOD: "FFT" # "FFT" or "peak detection" 90 | MODEL_PATH: "" 91 | -------------------------------------------------------------------------------- /rppg-Toolbox_MMPD/configs/train_configs/PURE_PURE_UBFC_TSCAN_BASIC.yaml: -------------------------------------------------------------------------------- 1 | BASE: [''] 2 | TOOLBOX_MODE: "train_and_test" # "train_and_test" or "only_test" 3 | TRAIN: 4 | BATCH_SIZE: 4 5 | EPOCHS: 30 6 | LR: 9e-3 7 | MODEL_FILE_NAME: PURE_PURE_UBFC_tscan 8 | DATA: 9 | FS: 30 10 | DATASET: PURE 11 | DO_PREPROCESS: True # if first time, should be true 12 | DATA_FORMAT: NDCHW 13 | DATA_PATH: "/data1/acsp/toolbox_data/PURE/RawData" # Raw dataset path, need to be updated 14 | CACHED_PATH: "/data/rPPG_dataset/processed_dataset_makeup" # Processed dataset save path, need to be updated 15 | EXP_DATA_NAME: "" 16 | BEGIN: 0.0 17 | END: 0.8 18 | PREPROCESS : 19 | DATA_TYPE: ['DiffNormalized','Standardized'] 20 | LABEL_TYPE: DiffNormalized 21 | DO_CHUNK: True 22 | CHUNK_LENGTH: 180 23 | DYNAMIC_DETECTION: False 24 | DYNAMIC_DETECTION_FREQUENCY : 180 25 | CROP_FACE: True 26 | LARGE_FACE_BOX: True 27 | LARGE_BOX_COEF: 1.5 28 | H: 72 29 | W: 72 30 | VALID: 31 | DATA: 32 | FS: 30 33 | DATASET: PURE 34 | DO_PREPROCESS: True # if first time, should be true 35 | DATA_FORMAT: NDCHW 36 | DATA_PATH: "/data1/acsp/toolbox_data/PURE/RawData" # Raw dataset path, need to be updated 37 | CACHED_PATH: "/data/rPPG_dataset/processed_dataset_makeup" # Processed dataset save path, need to be updated 38 | EXP_DATA_NAME: "" 39 | BEGIN: 0.8 40 | END: 1.0 41 | PREPROCESS: 42 | DATA_TYPE: [ 'DiffNormalized','Standardized' ] 43 | LABEL_TYPE: DiffNormalized 44 | DO_CHUNK: True 45 | CHUNK_LENGTH: 180 46 | DYNAMIC_DETECTION: False 47 | DYNAMIC_DETECTION_FREQUENCY : 180 48 | CROP_FACE: True 49 | LARGE_FACE_BOX: True 50 | LARGE_BOX_COEF: 1.5 51 | H: 72 52 | W: 72 53 | TEST: 54 | METRICS: ['MAE','RMSE','MAPE','Pearson'] 55 | USE_LAST_EPOCH: True # to use provided validation dataset to find the best epoch, should be false 56 | DATA: 57 | FS: 30 58 | DATASET: UBFC 59 | DO_PREPROCESS: True # if first time, should be true 60 | DATA_FORMAT: NDCHW 61 | DATA_PATH: "/data1/acsp/toolbox_data/UBFC/RawData/" # need to be updated 62 | CACHED_PATH: "/data/rPPG_dataset/processed_dataset_makeup" # Processed dataset save path, need to be updated 63 | EXP_DATA_NAME: "" 64 | BEGIN: 0.0 65 | END: 1.0 66 | PREPROCESS: 67 | DATA_TYPE: [ 'DiffNormalized','Standardized' ] 68 | LABEL_TYPE: DiffNormalized 69 | DO_CHUNK: True 70 | CHUNK_LENGTH: 180 71 | DYNAMIC_DETECTION: False 72 | DYNAMIC_DETECTION_FREQUENCY : 180 73 | CROP_FACE: True 74 | LARGE_FACE_BOX: True 75 | LARGE_BOX_COEF: 1.5 76 | H: 72 77 | W: 72 78 | DEVICE: cuda:0 79 | NUM_OF_GPU_TRAIN: 1 80 | LOG: 81 | PATH: runs/exp 82 | MODEL: 83 | DROP_RATE: 0.2 84 | NAME: Tscan 85 | TSCAN: 86 | FRAME_DEPTH: 10 87 | INFERENCE: 88 | BATCH_SIZE: 4 89 | EVALUATION_METHOD: "FFT" # "FFT" or "peak detection" 90 | MODEL_PATH: "" 91 | -------------------------------------------------------------------------------- /rppg-Toolbox_MMPD/configs/train_configs/SCAMPS_SCAMPS_MMPD_TSCAN_BASIC.yaml: -------------------------------------------------------------------------------- 1 | BASE: [''] 2 | TOOLBOX_MODE: "train_and_test" # "train_and_test" or "only_test" 3 | TRAIN: 4 | BATCH_SIZE: 4 5 | EPOCHS: 30 6 | LR: 9e-3 7 | MODEL_FILE_NAME: SCAMPS_SCAMPS_MMPD_tscan 8 | DATA: 9 | FS: 30 10 | DATASET: SCAMPS 11 | DO_PREPROCESS: False # if first time, should be true 12 | DATA_FORMAT: NDCHW 13 | DATA_PATH: "/data1/acsp/toolbox_data/scamps/RawData/Train/" # Raw dataset path, need to be updated 14 | CACHED_PATH: "/data1/acsp/finalpreprocess/" # Processed dataset save path, need to be updated 15 | EXP_DATA_NAME: "" 16 | BEGIN: 0.0 17 | END: 0.8 18 | PREPROCESS : 19 | DATA_TYPE: ['DiffNormalized','Standardized'] 20 | LABEL_TYPE: DiffNormalized 21 | DO_CHUNK: True 22 | CHUNK_LENGTH: 180 23 | DYNAMIC_DETECTION: False 24 | DYNAMIC_DETECTION_FREQUENCY : 180 25 | CROP_FACE: True 26 | LARGE_FACE_BOX: True 27 | LARGE_BOX_COEF: 1.5 28 | H: 72 29 | W: 72 30 | VALID: 31 | DATA: 32 | FS: 30 33 | DATASET: SCAMPS 34 | DO_PREPROCESS: False # if first time, should be true 35 | DATA_FORMAT: NDCHW 36 | DATA_PATH: "/data1/acsp/toolbox_data/scamps/RawData/Train/" # Raw dataset path, need to be updated 37 | CACHED_PATH: "/data1/acsp/finalpreprocess/" # Processed dataset save path, need to be updated 38 | EXP_DATA_NAME: "" 39 | BEGIN: 0.8 40 | END: 1.0 41 | PREPROCESS: 42 | DATA_TYPE: [ 'DiffNormalized','Standardized' ] 43 | LABEL_TYPE: DiffNormalized 44 | DO_CHUNK: True 45 | CHUNK_LENGTH: 180 46 | DYNAMIC_DETECTION: False 47 | DYNAMIC_DETECTION_FREQUENCY : 180 48 | CROP_FACE: True 49 | LARGE_FACE_BOX: True 50 | LARGE_BOX_COEF: 1.5 51 | H: 72 52 | W: 72 53 | TEST: 54 | METRICS: ['MAE','RMSE','MAPE','Pearson'] 55 | USE_LAST_EPOCH: True # to use provided validation dataset to find the best epoch, should be false 56 | DATA: 57 | INFO: 58 | LIGHT: [1, 2, 3, 4] # 'LED-low','LED-high','Incandescent','Nature' 59 | MOTION: [1, 2, 3, 4] # 'Stationary', 'Rotation', 'Talking', 'Walking' 60 | EXERCISE: [1, 2] # True, False 61 | SKIN_COLOR: [3, 4, 5, 6] # 3, 4, 5, 6 62 | GENDER: [1, 2] # 'male','female' 63 | GLASSER: [1, 2] # True, False 64 | HAIR_COVER: [1, 2] # True, False 65 | MAKEUP: [1, 2] # True, False 66 | FS: 30 67 | DATASET: MMPD 68 | DO_PREPROCESS: True # if first time, should be true 69 | DATA_FORMAT: NDCHW 70 | DATA_PATH: "/data/rPPG_dataset/mat_dataset" # Raw dataset path, need to be updated 71 | CACHED_PATH: "/data/rPPG_dataset/processed_dataset" # Processed dataset save path, need to be updated 72 | EXP_DATA_NAME: "" 73 | BEGIN: 0.0 74 | END: 1.0 75 | PREPROCESS: 76 | DATA_TYPE: [ 'DiffNormalized','Standardized' ] 77 | LABEL_TYPE: DiffNormalized 78 | DO_CHUNK: True 79 | CHUNK_LENGTH: 180 80 | DYNAMIC_DETECTION: False 81 | DYNAMIC_DETECTION_FREQUENCY : 180 82 | CROP_FACE: True 83 | LARGE_FACE_BOX: True 84 | LARGE_BOX_COEF: 1.5 85 | H: 72 86 | W: 72 87 | DEVICE: cuda:0 88 | NUM_OF_GPU_TRAIN: 1 89 | LOG: 90 | PATH: runs/exp 91 | MODEL: 92 | DROP_RATE: 0.2 93 | NAME: Tscan 94 | TSCAN: 95 | FRAME_DEPTH: 10 96 | INFERENCE: 97 | BATCH_SIZE: 4 98 | EVALUATION_METHOD: "FFT" # "FFT" or "peak detection" 99 | MODEL_PATH: "/data/rPPG-Toolbox_MMPD/final_model_release/SCAMPS_TSCAN.pth" 100 | -------------------------------------------------------------------------------- /rppg-Toolbox_MMPD/configs/train_configs/SCAMPS_SCAMPS_PURE_DEEPPHYS_BASIC.yaml: -------------------------------------------------------------------------------- 1 | BASE: [''] 2 | TOOLBOX_MODE: "train_and_test" # "train_and_test" or "only_test" 3 | TRAIN: 4 | BATCH_SIZE: 4 5 | EPOCHS: 30 6 | LR: 9e-3 7 | MODEL_FILE_NAME: SCAMPS_SCAMPS_PURE_deepphys 8 | DATA: 9 | FS: 30 10 | DATASET: SCAMPS 11 | DO_PREPROCESS: False # if first time, should be true 12 | DATA_FORMAT: NDCHW 13 | DATA_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/SCAMPS/RawData/Train" # Raw dataset path, need to be updated 14 | CACHED_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/rppg_toolbox/PreprocessedData" # Processed dataset save path, need to be updated 15 | EXP_DATA_NAME: "" 16 | BEGIN: 0.0 17 | END: 1.0 18 | PREPROCESS : 19 | DATA_TYPE: ['DiffNormalized','Standardized'] 20 | LABEL_TYPE: DiffNormalized 21 | DO_CHUNK: True 22 | CHUNK_LENGTH: 180 23 | DYNAMIC_DETECTION: False 24 | DYNAMIC_DETECTION_FREQUENCY : 180 25 | CROP_FACE: True 26 | LARGE_FACE_BOX: True 27 | LARGE_BOX_COEF: 1.5 28 | H: 72 29 | W: 72 30 | VALID: 31 | DATA: 32 | FS: 30 33 | DATASET: SCAMPS 34 | DO_PREPROCESS: False # if first time, should be true 35 | DATA_FORMAT: NDCHW 36 | DATA_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/SCAMPS/RawData/Val" # Raw dataset path, need to be updated 37 | CACHED_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/rppg_toolbox/PreprocessedData" # Processed dataset save path, need to be updated 38 | EXP_DATA_NAME: "" 39 | BEGIN: 0.0 40 | END: 1.0 41 | PREPROCESS: 42 | DATA_TYPE: [ 'DiffNormalized','Standardized' ] 43 | LABEL_TYPE: DiffNormalized 44 | DO_CHUNK: True 45 | CHUNK_LENGTH: 180 46 | DYNAMIC_DETECTION: False 47 | DYNAMIC_DETECTION_FREQUENCY : 180 48 | CROP_FACE: True 49 | LARGE_FACE_BOX: True 50 | LARGE_BOX_COEF: 1.5 51 | H: 72 52 | W: 72 53 | TEST: 54 | METRICS: ['MAE','RMSE','MAPE','Pearson'] 55 | USE_LAST_EPOCH: True # to use provided validation dataset to find the best epoch, should be false 56 | DATA: 57 | FS: 30 58 | DATASET: PURE 59 | DO_PREPROCESS: False # if first time, should be true 60 | DATA_FORMAT: NDCHW 61 | DATA_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/PURE/RawData" # Raw dataset path, need to be updated 62 | CACHED_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/rppg_toolbox/PreprocessedData" # Processed dataset save path, need to be updated 63 | EXP_DATA_NAME: "" 64 | BEGIN: 0.0 65 | END: 1.0 66 | PREPROCESS: 67 | DATA_TYPE: [ 'DiffNormalized','Standardized' ] 68 | LABEL_TYPE: DiffNormalized 69 | DO_CHUNK: True 70 | CHUNK_LENGTH: 180 71 | DYNAMIC_DETECTION: False 72 | DYNAMIC_DETECTION_FREQUENCY : 180 73 | CROP_FACE: True 74 | LARGE_FACE_BOX: True 75 | LARGE_BOX_COEF: 1.5 76 | H: 72 77 | W: 72 78 | DEVICE: cuda:0 79 | NUM_OF_GPU_TRAIN: 1 80 | LOG: 81 | PATH: runs/exp 82 | MODEL: 83 | DROP_RATE: 0.2 84 | NAME: DeepPhys 85 | INFERENCE: 86 | BATCH_SIZE: 4 87 | EVALUATION_METHOD: "FFT" # "FFT" or "peak detection" 88 | MODEL_PATH: "" 89 | 90 | -------------------------------------------------------------------------------- /rppg-Toolbox_MMPD/configs/train_configs/SCAMPS_SCAMPS_PURE_PHYSNET_BASIC.yaml: -------------------------------------------------------------------------------- 1 | BASE: [''] 2 | TOOLBOX_MODE: "train_and_test" # "train_and_test" or "only_test" 3 | TRAIN: 4 | BATCH_SIZE: 4 5 | EPOCHS: 30 6 | LR: 9e-3 7 | MODEL_FILE_NAME: SCAMPS_SCAMPS_PURE_physnet 8 | DATA: 9 | FS: 30 10 | DATASET: SCAMPS 11 | DO_PREPROCESS: False # if first time, should be true 12 | DATA_FORMAT: NCDHW 13 | DATA_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/SCAMPS/RawData/Train" # Raw dataset path, need to be updated 14 | CACHED_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/rppg_toolbox/PreprocessedData" # Processed dataset save path, need to be updated 15 | EXP_DATA_NAME: "" 16 | BEGIN: 0.0 17 | END: 1.0 18 | PREPROCESS : 19 | DATA_TYPE: ['DiffNormalized'] #if use physnet, should be DiffNormalized 20 | LABEL_TYPE: DiffNormalized 21 | DO_CHUNK: True 22 | CHUNK_LENGTH: 128 #only support for factor of 512 23 | DYNAMIC_DETECTION: False 24 | DYNAMIC_DETECTION_FREQUENCY : 128 25 | CROP_FACE: True 26 | LARGE_FACE_BOX: True 27 | LARGE_BOX_COEF: 1.5 28 | H: 72 29 | W: 72 30 | VALID: 31 | DATA: 32 | FS: 30 33 | DATASET: SCAMPS 34 | DO_PREPROCESS: False # if first time, should be true 35 | DATA_FORMAT: NCDHW 36 | DATA_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/SCAMPS/RawData/Val" # Raw dataset path, need to be updated 37 | CACHED_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/rppg_toolbox/PreprocessedData" # Processed dataset save path, need to be updated 38 | EXP_DATA_NAME: "" 39 | BEGIN: 0.0 40 | END: 1.0 41 | PREPROCESS : 42 | DATA_TYPE: ['DiffNormalized'] #if use physnet, should be DiffNormalized 43 | LABEL_TYPE: DiffNormalized 44 | DO_CHUNK: True 45 | CHUNK_LENGTH: 128 #only support for factor of 512 46 | DYNAMIC_DETECTION: False 47 | DYNAMIC_DETECTION_FREQUENCY : 128 48 | CROP_FACE: True 49 | LARGE_FACE_BOX: True 50 | LARGE_BOX_COEF: 1.5 51 | H: 72 52 | W: 72 53 | TEST: 54 | METRICS: ['MAE','RMSE','MAPE','Pearson'] 55 | USE_LAST_EPOCH: True # to use provided validation dataset to find the best epoch, should be false 56 | DATA: 57 | FS: 30 58 | DATASET: PURE 59 | DO_PREPROCESS: False # if first time, should be true 60 | DATA_FORMAT: NCDHW 61 | DATA_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/PURE/RawData" # Raw dataset path, need to be updated 62 | CACHED_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/rppg_toolbox/PreprocessedData" # Processed dataset save path, need to be updated 63 | EXP_DATA_NAME: "" 64 | BEGIN: 0.0 65 | END: 1.0 66 | PREPROCESS : 67 | DATA_TYPE: ['DiffNormalized'] #if use physnet, should be DiffNormalized 68 | LABEL_TYPE: DiffNormalized 69 | DO_CHUNK: True 70 | CHUNK_LENGTH: 128 #only support for factor of 512 71 | DYNAMIC_DETECTION: False 72 | DYNAMIC_DETECTION_FREQUENCY : 128 73 | CROP_FACE: True 74 | LARGE_FACE_BOX: True 75 | LARGE_BOX_COEF: 1.5 76 | H: 72 77 | W: 72 78 | DEVICE: cuda:0 79 | NUM_OF_GPU_TRAIN: 1 80 | LOG: 81 | PATH: runs/exp 82 | MODEL: 83 | DROP_RATE: 0.2 84 | NAME: Physnet 85 | PHYSNET: 86 | FRAME_NUM: 128 87 | INFERENCE: 88 | BATCH_SIZE: 4 89 | EVALUATION_METHOD: "FFT" # "FFT" or "peak detection" 90 | MODEL_PATH: "" 91 | 92 | -------------------------------------------------------------------------------- /rppg-Toolbox_MMPD/configs/train_configs/SCAMPS_SCAMPS_PURE_TSCAN_BASIC.yaml: -------------------------------------------------------------------------------- 1 | BASE: [''] 2 | TOOLBOX_MODE: "train_and_test" # "train_and_test" or "only_test" 3 | TRAIN: 4 | BATCH_SIZE: 4 5 | EPOCHS: 30 6 | LR: 9e-3 7 | MODEL_FILE_NAME: SCAMPS_SCAMPS_PURE_tscan 8 | DATA: 9 | FS: 30 10 | DATASET: SCAMPS 11 | DO_PREPROCESS: False # if first time, should be true 12 | DATA_FORMAT: NDCHW 13 | DATA_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/SCAMPS/RawData/Train" # Raw dataset path, need to be updated 14 | CACHED_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/rppg_toolbox/PreprocessedData" # Processed dataset save path, need to be updated 15 | EXP_DATA_NAME: "" 16 | BEGIN: 0.0 17 | END: 1.0 18 | PREPROCESS : 19 | DATA_TYPE: ['DiffNormalized','Standardized'] 20 | LABEL_TYPE: DiffNormalized 21 | DO_CHUNK: True 22 | CHUNK_LENGTH: 180 23 | DYNAMIC_DETECTION: False 24 | DYNAMIC_DETECTION_FREQUENCY : 180 25 | CROP_FACE: True 26 | LARGE_FACE_BOX: True 27 | LARGE_BOX_COEF: 1.5 28 | H: 72 29 | W: 72 30 | VALID: 31 | DATA: 32 | FS: 30 33 | DATASET: SCAMPS 34 | DO_PREPROCESS: False # if first time, should be true 35 | DATA_FORMAT: NDCHW 36 | DATA_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/SCAMPS/RawData/Val" # Raw dataset path, need to be updated 37 | CACHED_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/rppg_toolbox/PreprocessedData" # Processed dataset save path, need to be updated 38 | EXP_DATA_NAME: "" 39 | BEGIN: 0.0 40 | END: 1.0 41 | PREPROCESS: 42 | DATA_TYPE: [ 'DiffNormalized','Standardized' ] 43 | LABEL_TYPE: DiffNormalized 44 | DO_CHUNK: True 45 | CHUNK_LENGTH: 180 46 | DYNAMIC_DETECTION: False 47 | DYNAMIC_DETECTION_FREQUENCY : 180 48 | CROP_FACE: True 49 | LARGE_FACE_BOX: True 50 | LARGE_BOX_COEF: 1.5 51 | H: 72 52 | W: 72 53 | TEST: 54 | METRICS: ['MAE','RMSE','MAPE','Pearson'] 55 | USE_LAST_EPOCH: True # to use provided validation dataset to find the best epoch, should be false 56 | DATA: 57 | FS: 30 58 | DATASET: PURE 59 | DO_PREPROCESS: False # if first time, should be true 60 | DATA_FORMAT: NDCHW 61 | DATA_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/PURE/RawData" # Raw dataset path, need to be updated 62 | CACHED_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/rppg_toolbox/PreprocessedData" # Processed dataset save path, need to be updated 63 | EXP_DATA_NAME: "" 64 | BEGIN: 0.0 65 | END: 1.0 66 | PREPROCESS: 67 | DATA_TYPE: [ 'DiffNormalized','Standardized' ] 68 | LABEL_TYPE: DiffNormalized 69 | DO_CHUNK: True 70 | CHUNK_LENGTH: 180 71 | DYNAMIC_DETECTION: False 72 | DYNAMIC_DETECTION_FREQUENCY : 180 73 | CROP_FACE: True 74 | LARGE_FACE_BOX: True 75 | LARGE_BOX_COEF: 1.5 76 | H: 72 77 | W: 72 78 | DEVICE: cuda:0 79 | NUM_OF_GPU_TRAIN: 1 80 | LOG: 81 | PATH: runs/exp 82 | MODEL: 83 | DROP_RATE: 0.2 84 | NAME: Tscan 85 | TSCAN: 86 | FRAME_DEPTH: 10 87 | INFERENCE: 88 | BATCH_SIZE: 4 89 | EVALUATION_METHOD: "FFT" # "FFT" or "peak detection" 90 | MODEL_PATH: "" 91 | 92 | -------------------------------------------------------------------------------- /rppg-Toolbox_MMPD/configs/train_configs/SCAMPS_SCAMPS_UBFC_DEEPPHYS_BASIC.yaml: -------------------------------------------------------------------------------- 1 | BASE: [''] 2 | TOOLBOX_MODE: "train_and_test" # "train_and_test" or "only_test" 3 | TRAIN: 4 | BATCH_SIZE: 4 5 | EPOCHS: 30 6 | LR: 9e-3 7 | MODEL_FILE_NAME: SCAMPS_SCAMPS_UBFC_deepphys 8 | DATA: 9 | FS: 30 10 | DATASET: SCAMPS 11 | DO_PREPROCESS: False # if first time, should be true 12 | DATA_FORMAT: NDCHW 13 | DATA_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/SCAMPS/RawData/Train" # Raw dataset path, need to be updated 14 | CACHED_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/rppg_toolbox/PreprocessedData" # Processed dataset save path, need to be updated 15 | EXP_DATA_NAME: "" 16 | BEGIN: 0.0 17 | END: 1.0 18 | PREPROCESS : 19 | DATA_TYPE: ['DiffNormalized','Standardized'] 20 | LABEL_TYPE: DiffNormalized 21 | DO_CHUNK: True 22 | CHUNK_LENGTH: 180 23 | DYNAMIC_DETECTION: False 24 | DYNAMIC_DETECTION_FREQUENCY : 180 25 | CROP_FACE: True 26 | LARGE_FACE_BOX: True 27 | LARGE_BOX_COEF: 1.5 28 | H: 72 29 | W: 72 30 | VALID: 31 | DATA: 32 | FS: 30 33 | DATASET: SCAMPS 34 | DO_PREPROCESS: False # if first time, should be true 35 | DATA_FORMAT: NDCHW 36 | DATA_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/SCAMPS/RawData/Val" # Raw dataset path, need to be updated 37 | CACHED_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/rppg_toolbox/PreprocessedData" # Processed dataset save path, need to be updated 38 | EXP_DATA_NAME: "" 39 | BEGIN: 0.0 40 | END: 1.0 41 | PREPROCESS: 42 | DATA_TYPE: [ 'DiffNormalized','Standardized' ] 43 | LABEL_TYPE: DiffNormalized 44 | DO_CHUNK: True 45 | CHUNK_LENGTH: 180 46 | DYNAMIC_DETECTION: False 47 | DYNAMIC_DETECTION_FREQUENCY : 180 48 | CROP_FACE: True 49 | LARGE_FACE_BOX: True 50 | LARGE_BOX_COEF: 1.5 51 | H: 72 52 | W: 72 53 | TEST: 54 | METRICS: ['MAE','RMSE','MAPE','Pearson'] 55 | USE_LAST_EPOCH: True # to use provided validation dataset to find the best epoch, should be false 56 | DATA: 57 | FS: 30 58 | DATASET: UBFC 59 | DO_PREPROCESS: False # if first time, should be true 60 | DATA_FORMAT: NDCHW 61 | DATA_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/UBFC/RawData" # Raw dataset path, need to be updated 62 | CACHED_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/rppg_toolbox/PreprocessedData" # Processed dataset save path, need to be updated 63 | EXP_DATA_NAME: "" 64 | BEGIN: 0.0 65 | END: 1.0 66 | PREPROCESS: 67 | DATA_TYPE: [ 'DiffNormalized','Standardized' ] 68 | LABEL_TYPE: DiffNormalized 69 | DO_CHUNK: True 70 | CHUNK_LENGTH: 180 71 | DYNAMIC_DETECTION: False 72 | DYNAMIC_DETECTION_FREQUENCY : 180 73 | CROP_FACE: True 74 | LARGE_FACE_BOX: True 75 | LARGE_BOX_COEF: 1.5 76 | H: 72 77 | W: 72 78 | DEVICE: cuda:0 79 | NUM_OF_GPU_TRAIN: 1 80 | LOG: 81 | PATH: runs/exp 82 | MODEL: 83 | DROP_RATE: 0.2 84 | NAME: DeepPhys 85 | INFERENCE: 86 | BATCH_SIZE: 4 87 | EVALUATION_METHOD: "FFT" # "FFT" or "peak detection" 88 | MODEL_PATH: "" 89 | 90 | -------------------------------------------------------------------------------- /rppg-Toolbox_MMPD/configs/train_configs/SCAMPS_SCAMPS_UBFC_EFFICIENTPHYS.yaml: -------------------------------------------------------------------------------- 1 | BASE: [''] 2 | TOOLBOX_MODE: "train_and_test" # "train_and_test" or "only_test" 3 | TRAIN: 4 | BATCH_SIZE: 4 5 | EPOCHS: 30 6 | LR: 9e-3 7 | MODEL_FILE_NAME: SCAMPS_SCAMPS_PURE_efficientphys 8 | DATA: 9 | FS: 30 10 | DATASET: SCAMPS 11 | DO_PREPROCESS: False # if first time, should be true 12 | DATA_FORMAT: NDCHW 13 | DATA_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/SCAMPS/RawData/Train" # Raw dataset path, need to be updated 14 | CACHED_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/rppg_toolbox/PreprocessedData" # Processed dataset save path, need to be updated 15 | EXP_DATA_NAME: "" 16 | BEGIN: 0.0 17 | END: 1.0 18 | PREPROCESS : 19 | DATA_TYPE: ['Standardized'] 20 | LABEL_TYPE: DiffNormalized 21 | DO_CHUNK: True 22 | CHUNK_LENGTH: 180 23 | DYNAMIC_DETECTION: False 24 | DYNAMIC_DETECTION_FREQUENCY : 180 25 | CROP_FACE: True 26 | LARGE_FACE_BOX: True 27 | LARGE_BOX_COEF: 1.5 28 | H: 72 29 | W: 72 30 | VALID: 31 | DATA: 32 | FS: 30 33 | DATASET: SCAMPS 34 | DO_PREPROCESS: False # if first time, should be true 35 | DATA_FORMAT: NDCHW 36 | DATA_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/SCAMPS/RawData/Val" # Raw dataset path, need to be updated 37 | CACHED_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/rppg_toolbox/PreprocessedData" # Processed dataset save path, need to be updated 38 | EXP_DATA_NAME: "" 39 | BEGIN: 0.0 40 | END: 1.0 41 | PREPROCESS: 42 | DATA_TYPE: ['Standardized'] 43 | LABEL_TYPE: DiffNormalized 44 | DO_CHUNK: True 45 | CHUNK_LENGTH: 180 46 | DYNAMIC_DETECTION: False 47 | DYNAMIC_DETECTION_FREQUENCY : 180 48 | CROP_FACE: True 49 | LARGE_FACE_BOX: True 50 | LARGE_BOX_COEF: 1.5 51 | H: 72 52 | W: 72 53 | TEST: 54 | METRICS: ['MAE','RMSE','MAPE','Pearson'] 55 | USE_LAST_EPOCH: True 56 | DATA: 57 | FS: 30 58 | DATASET: UBFC 59 | DO_PREPROCESS: False # if first time, should be true 60 | DATA_FORMAT: NDCHW 61 | DATA_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/UBFC/RawData" # Raw dataset path, need to be updated 62 | CACHED_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/rppg_toolbox/PreprocessedData" # Processed dataset save path, need to be updated 63 | EXP_DATA_NAME: "" 64 | BEGIN: 0.0 65 | END: 1.0 66 | PREPROCESS: 67 | DATA_TYPE: ['Standardized'] 68 | LABEL_TYPE: DiffNormalized 69 | DO_CHUNK: True 70 | CHUNK_LENGTH: 180 71 | DYNAMIC_DETECTION: False 72 | DYNAMIC_DETECTION_FREQUENCY : 180 73 | CROP_FACE: True 74 | LARGE_FACE_BOX: True 75 | LARGE_BOX_COEF: 1.5 76 | H: 72 77 | W: 72 78 | DEVICE: cuda:0 79 | NUM_OF_GPU_TRAIN: 1 80 | LOG: 81 | PATH: runs/exp 82 | MODEL: 83 | DROP_RATE: 0.2 84 | NAME: EfficientPhys 85 | EFFICIENTPHYS: 86 | FRAME_DEPTH: 10 87 | INFERENCE: 88 | BATCH_SIZE: 4 89 | EVALUATION_METHOD: FFT # "FFT" or "peak detection" 90 | MODEL_PATH: "" 91 | 92 | -------------------------------------------------------------------------------- /rppg-Toolbox_MMPD/configs/train_configs/SCAMPS_SCAMPS_UBFC_PHYSNET_BASIC.yaml: -------------------------------------------------------------------------------- 1 | BASE: [''] 2 | TOOLBOX_MODE: "train_and_test" # "train_and_test" or "only_test" 3 | TRAIN: 4 | BATCH_SIZE: 4 5 | EPOCHS: 30 6 | LR: 9e-3 7 | MODEL_FILE_NAME: SCAMPS_SCAMPS_UBFC_physnet 8 | DATA: 9 | FS: 30 10 | DATASET: SCAMPS 11 | DO_PREPROCESS: False # if first time, should be true 12 | DATA_FORMAT: NCDHW 13 | DATA_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/SCAMPS/RawData/Train" # Raw dataset path, need to be updated 14 | CACHED_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/rppg_toolbox/PreprocessedData" # Processed dataset save path, need to be updated 15 | EXP_DATA_NAME: "" 16 | BEGIN: 0.0 17 | END: 1.0 18 | PREPROCESS : 19 | DATA_TYPE: ['DiffNormalized'] #if use physnet, should be DiffNormalized 20 | LABEL_TYPE: DiffNormalized 21 | DO_CHUNK: True 22 | CHUNK_LENGTH: 128 #only support for factor of 512 23 | DYNAMIC_DETECTION: False 24 | DYNAMIC_DETECTION_FREQUENCY : 128 25 | CROP_FACE: True 26 | LARGE_FACE_BOX: True 27 | LARGE_BOX_COEF: 1.5 28 | H: 72 29 | W: 72 30 | VALID: 31 | DATA: 32 | FS: 30 33 | DATASET: SCAMPS 34 | DO_PREPROCESS: False # if first time, should be true 35 | DATA_FORMAT: NCDHW 36 | DATA_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/SCAMPS/RawData/Val" # Raw dataset path, need to be updated 37 | CACHED_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/rppg_toolbox/PreprocessedData" # Processed dataset save path, need to be updated 38 | EXP_DATA_NAME: "" 39 | BEGIN: 0.0 40 | END: 1.0 41 | PREPROCESS : 42 | DATA_TYPE: ['DiffNormalized'] #if use physnet, should be DiffNormalized 43 | LABEL_TYPE: DiffNormalized 44 | DO_CHUNK: True 45 | CHUNK_LENGTH: 128 #only support for factor of 512 46 | DYNAMIC_DETECTION: False 47 | DYNAMIC_DETECTION_FREQUENCY : 128 48 | CROP_FACE: True 49 | LARGE_FACE_BOX: True 50 | LARGE_BOX_COEF: 1.5 51 | H: 72 52 | W: 72 53 | TEST: 54 | METRICS: ['MAE','RMSE','MAPE','Pearson'] 55 | USE_LAST_EPOCH: True # to use provided validation dataset to find the best epoch, should be false 56 | DATA: 57 | FS: 30 58 | DATASET: UBFC 59 | DO_PREPROCESS: False # if first time, should be true 60 | DATA_FORMAT: NCDHW 61 | DATA_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/UBFC/RawData" # Raw dataset path, need to be updated 62 | CACHED_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/rppg_toolbox/PreprocessedData" # Processed dataset save path, need to be updated 63 | EXP_DATA_NAME: "" 64 | BEGIN: 0.0 65 | END: 1.0 66 | PREPROCESS : 67 | DATA_TYPE: ['DiffNormalized'] #if use physnet, should be DiffNormalized 68 | LABEL_TYPE: DiffNormalized 69 | DO_CHUNK: True 70 | CHUNK_LENGTH: 128 #only support for factor of 512 71 | DYNAMIC_DETECTION: False 72 | DYNAMIC_DETECTION_FREQUENCY : 128 73 | CROP_FACE: True 74 | LARGE_FACE_BOX: True 75 | LARGE_BOX_COEF: 1.5 76 | H: 72 77 | W: 72 78 | DEVICE: cuda:0 79 | NUM_OF_GPU_TRAIN: 1 80 | LOG: 81 | PATH: runs/exp 82 | MODEL: 83 | DROP_RATE: 0.2 84 | NAME: Physnet 85 | PHYSNET: 86 | FRAME_NUM: 128 87 | INFERENCE: 88 | BATCH_SIZE: 4 89 | EVALUATION_METHOD: "FFT" # "FFT" or "peak detection" 90 | MODEL_PATH: "" 91 | 92 | -------------------------------------------------------------------------------- /rppg-Toolbox_MMPD/configs/train_configs/SCAMPS_SCAMPS_UBFC_TSCAN_BASIC.yaml: -------------------------------------------------------------------------------- 1 | BASE: [''] 2 | TOOLBOX_MODE: "train_and_test" # "train_and_test" or "only_test" 3 | TRAIN: 4 | BATCH_SIZE: 4 5 | EPOCHS: 30 6 | LR: 9e-3 7 | MODEL_FILE_NAME: SCAMPS_SCAMPS_UBFC_tscan 8 | DATA: 9 | FS: 30 10 | DATASET: SCAMPS 11 | DO_PREPROCESS: False # if first time, should be true 12 | DATA_FORMAT: NDCHW 13 | DATA_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/SCAMPS/RawData/Train" # Raw dataset path, need to be updated 14 | CACHED_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/rppg_toolbox/PreprocessedData" # Processed dataset save path, need to be updated 15 | EXP_DATA_NAME: "" 16 | BEGIN: 0.0 17 | END: 1.0 18 | PREPROCESS : 19 | DATA_TYPE: ['DiffNormalized','Standardized'] 20 | LABEL_TYPE: DiffNormalized 21 | DO_CHUNK: True 22 | CHUNK_LENGTH: 180 23 | DYNAMIC_DETECTION: False 24 | DYNAMIC_DETECTION_FREQUENCY : 180 25 | CROP_FACE: True 26 | LARGE_FACE_BOX: True 27 | LARGE_BOX_COEF: 1.5 28 | H: 72 29 | W: 72 30 | VALID: 31 | DATA: 32 | FS: 30 33 | DATASET: SCAMPS 34 | DO_PREPROCESS: False # if first time, should be true 35 | DATA_FORMAT: NDCHW 36 | DATA_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/SCAMPS/RawData/Val" # Raw dataset path, need to be updated 37 | CACHED_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/rppg_toolbox/PreprocessedData" # Processed dataset save path, need to be updated 38 | EXP_DATA_NAME: "" 39 | BEGIN: 0.0 40 | END: 1.0 41 | PREPROCESS: 42 | DATA_TYPE: [ 'DiffNormalized','Standardized' ] 43 | LABEL_TYPE: DiffNormalized 44 | DO_CHUNK: True 45 | CHUNK_LENGTH: 180 46 | DYNAMIC_DETECTION: False 47 | DYNAMIC_DETECTION_FREQUENCY : 180 48 | CROP_FACE: True 49 | LARGE_FACE_BOX: True 50 | LARGE_BOX_COEF: 1.5 51 | H: 72 52 | W: 72 53 | TEST: 54 | METRICS: ['MAE','RMSE','MAPE','Pearson'] 55 | USE_LAST_EPOCH: True # to use provided validation dataset to find the best epoch, should be false 56 | DATA: 57 | FS: 30 58 | DATASET: UBFC 59 | DO_PREPROCESS: False # if first time, should be true 60 | DATA_FORMAT: NDCHW 61 | DATA_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/UBFC/RawData" # Raw dataset path, need to be updated 62 | CACHED_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/rppg_toolbox/PreprocessedData" # Processed dataset save path, need to be updated 63 | EXP_DATA_NAME: "" 64 | BEGIN: 0.0 65 | END: 1.0 66 | PREPROCESS: 67 | DATA_TYPE: [ 'DiffNormalized','Standardized' ] 68 | LABEL_TYPE: DiffNormalized 69 | DO_CHUNK: True 70 | CHUNK_LENGTH: 180 71 | DYNAMIC_DETECTION: False 72 | DYNAMIC_DETECTION_FREQUENCY : 180 73 | CROP_FACE: True 74 | LARGE_FACE_BOX: True 75 | LARGE_BOX_COEF: 1.5 76 | H: 72 77 | W: 72 78 | DEVICE: cuda:0 79 | NUM_OF_GPU_TRAIN: 1 80 | LOG: 81 | PATH: runs/exp 82 | MODEL: 83 | DROP_RATE: 0.2 84 | NAME: Tscan 85 | TSCAN: 86 | FRAME_DEPTH: 10 87 | INFERENCE: 88 | BATCH_SIZE: 4 89 | EVALUATION_METHOD: "FFT" # "FFT" or "peak detection" 90 | MODEL_PATH: "" 91 | 92 | -------------------------------------------------------------------------------- /rppg-Toolbox_MMPD/configs/train_configs/UBFC_UBFC_MMPD_TSCAN_BASIC.yaml: -------------------------------------------------------------------------------- 1 | BASE: [''] 2 | TOOLBOX_MODE: "only_test" # "train_and_test" or "only_test" 3 | TRAIN: 4 | BATCH_SIZE: 4 5 | EPOCHS: 30 6 | LR: 9e-3 7 | MODEL_FILE_NAME: UBFC_UBFC_MMPD_tscan 8 | DATA: 9 | FS: 30 10 | DATASET: UBFC 11 | DO_PREPROCESS: True # if first time, should be true 12 | DATA_FORMAT: NDCHW 13 | DATA_PATH: "/data1/acsp/toolbox_data/UBFC/RawData/" # need to be updated 14 | CACHED_PATH: "/data/rPPG_dataset/processed_dataset_makeup" # Processed dataset save path, need to be updated 15 | EXP_DATA_NAME: "" 16 | BEGIN: 0.0 17 | END: 0.8 18 | PREPROCESS : 19 | DATA_TYPE: ['DiffNormalized','Standardized'] 20 | LABEL_TYPE: DiffNormalized 21 | DO_CHUNK: True 22 | CHUNK_LENGTH: 180 23 | DYNAMIC_DETECTION: False 24 | DYNAMIC_DETECTION_FREQUENCY : 180 25 | CROP_FACE: True 26 | LARGE_FACE_BOX: True 27 | LARGE_BOX_COEF: 1.5 28 | H: 72 29 | W: 72 30 | VALID: 31 | DATA: 32 | FS: 30 33 | DATASET: UBFC 34 | DO_PREPROCESS: True # if first time, should be true 35 | DATA_FORMAT: NDCHW 36 | DATA_PATH: "/data1/acsp/toolbox_data/UBFC/RawData/" # need to be updated 37 | CACHED_PATH: "/data/rPPG_dataset/processed_dataset_makeup" # Processed dataset save path, need to be updated 38 | EXP_DATA_NAME: "" 39 | BEGIN: 0.8 40 | END: 1.0 41 | PREPROCESS: 42 | DATA_TYPE: [ 'DiffNormalized','Standardized' ] 43 | LABEL_TYPE: DiffNormalized 44 | DO_CHUNK: True 45 | CHUNK_LENGTH: 180 46 | DYNAMIC_DETECTION: False 47 | DYNAMIC_DETECTION_FREQUENCY : 180 48 | CROP_FACE: True 49 | LARGE_FACE_BOX: True 50 | LARGE_BOX_COEF: 1.5 51 | H: 72 52 | W: 72 53 | TEST: 54 | METRICS: ['MAE','RMSE','MAPE','Pearson'] 55 | USE_LAST_EPOCH: True # to use provided validation dataset to find the best epoch, should be false 56 | DATA: 57 | INFO: 58 | LIGHT: [1, 2, 3, 4] # 'LED-low','LED-high','Incandescent','Nature' 59 | MOTION: [1, 2, 3, 4] # 'Stationary', 'Rotation', 'Talking', 'Walking' 60 | EXERCISE: [1, 2] # True, False 61 | SKIN_COLOR: [3, 4, 5, 6] # 3, 4, 5, 6 62 | GENDER: [1, 2] # 'male','female' 63 | GLASSER: [1, 2] # True, False 64 | HAIR_COVER: [1, 2] # True, False 65 | MAKEUP: [1, 2] # True, False 66 | FS: 30 67 | DATASET: MMPD 68 | DO_PREPROCESS: True # if first time, should be true 69 | DATA_FORMAT: NDCHW 70 | DATA_PATH: "/data/rPPG_dataset/mat_dataset" # Raw dataset path, need to be updated 71 | CACHED_PATH: "/data/rPPG_dataset/processed_dataset" # Processed dataset save path, need to be updated 72 | EXP_DATA_NAME: "" 73 | BEGIN: 0.0 74 | END: 1.0 75 | PREPROCESS: 76 | DATA_TYPE: [ 'DiffNormalized','Standardized' ] 77 | LABEL_TYPE: DiffNormalized 78 | DO_CHUNK: True 79 | CHUNK_LENGTH: 180 80 | DYNAMIC_DETECTION: False 81 | DYNAMIC_DETECTION_FREQUENCY : 180 82 | CROP_FACE: True 83 | LARGE_FACE_BOX: True 84 | LARGE_BOX_COEF: 1.5 85 | H: 72 86 | W: 72 87 | DEVICE: cuda:0 88 | NUM_OF_GPU_TRAIN: 1 89 | LOG: 90 | PATH: runs/exp 91 | MODEL: 92 | DROP_RATE: 0.2 93 | NAME: Tscan 94 | TSCAN: 95 | FRAME_DEPTH: 10 96 | INFERENCE: 97 | BATCH_SIZE: 4 98 | EVALUATION_METHOD: "FFT" # "FFT" or "peak detection" 99 | MODEL_PATH: "/data/rPPG-Toolbox_MMPD/final_model_release/UBFC_TSCAN.pth" 100 | -------------------------------------------------------------------------------- /rppg-Toolbox_MMPD/configs/train_configs/UBFC_UBFC_PURE_DEEPPHYS_BASIC.yaml: -------------------------------------------------------------------------------- 1 | BASE: [''] 2 | TOOLBOX_MODE: "train_and_test" # "train_and_test" or "only_test" 3 | TRAIN: 4 | BATCH_SIZE: 4 5 | EPOCHS: 30 6 | LR: 9e-3 7 | MODEL_FILE_NAME: UBFC_UBFC_PURE_deepphys 8 | DATA: 9 | FS: 30 10 | DATASET: UBFC 11 | DO_PREPROCESS: False # if first time, should be true 12 | DATA_FORMAT: NDCHW 13 | DATA_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/UBFC/RawData" # Raw dataset path, need to be updated 14 | CACHED_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/rppg_toolbox/PreprocessedData" # Processed dataset save path, need to be updated 15 | EXP_DATA_NAME: "" 16 | BEGIN: 0.0 17 | END: 0.8 18 | PREPROCESS : 19 | DATA_TYPE: ['DiffNormalized','Standardized'] 20 | LABEL_TYPE: DiffNormalized 21 | DO_CHUNK: True 22 | CHUNK_LENGTH: 180 23 | DYNAMIC_DETECTION: False 24 | DYNAMIC_DETECTION_FREQUENCY : 180 25 | CROP_FACE: True 26 | LARGE_FACE_BOX: True 27 | LARGE_BOX_COEF: 1.5 28 | H: 72 29 | W: 72 30 | VALID: 31 | DATA: 32 | FS: 30 33 | DATASET: UBFC 34 | DO_PREPROCESS: False # if first time, should be true 35 | DATA_FORMAT: NDCHW 36 | DATA_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/UBFC/RawData" # Raw dataset path, need to be updated 37 | CACHED_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/rppg_toolbox/PreprocessedData" # Processed dataset save path, need to be updated 38 | EXP_DATA_NAME: "" 39 | BEGIN: 0.8 40 | END: 1.0 41 | PREPROCESS: 42 | DATA_TYPE: [ 'DiffNormalized','Standardized' ] 43 | LABEL_TYPE: DiffNormalized 44 | DO_CHUNK: True 45 | CHUNK_LENGTH: 180 46 | DYNAMIC_DETECTION: False 47 | DYNAMIC_DETECTION_FREQUENCY : 180 48 | CROP_FACE: True 49 | LARGE_FACE_BOX: True 50 | LARGE_BOX_COEF: 1.5 51 | H: 72 52 | W: 72 53 | TEST: 54 | METRICS: ['MAE','RMSE','MAPE','Pearson'] 55 | USE_LAST_EPOCH: True # to use provided validation dataset to find the best epoch, should be false 56 | DATA: 57 | FS: 30 58 | DATASET: PURE 59 | DO_PREPROCESS: False # if first time, should be true 60 | DATA_FORMAT: NDCHW 61 | DATA_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/PURE/RawData" # Raw dataset path, need to be updated 62 | CACHED_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/rppg_toolbox/PreprocessedData" # Processed dataset save path, need to be updated 63 | EXP_DATA_NAME: "" 64 | BEGIN: 0.0 65 | END: 1.0 66 | PREPROCESS: 67 | DATA_TYPE: [ 'DiffNormalized','Standardized' ] 68 | LABEL_TYPE: DiffNormalized 69 | DO_CHUNK: True 70 | CHUNK_LENGTH: 180 71 | DYNAMIC_DETECTION: False 72 | DYNAMIC_DETECTION_FREQUENCY : 180 73 | CROP_FACE: True 74 | LARGE_FACE_BOX: True 75 | LARGE_BOX_COEF: 1.5 76 | H: 72 77 | W: 72 78 | DEVICE: cuda:0 79 | NUM_OF_GPU_TRAIN: 1 80 | LOG: 81 | PATH: runs/exp 82 | MODEL: 83 | DROP_RATE: 0.2 84 | NAME: DeepPhys 85 | INFERENCE: 86 | BATCH_SIZE: 4 87 | EVALUATION_METHOD: "FFT" # "FFT" or "peak detection" 88 | MODEL_PATH: "" 89 | 90 | -------------------------------------------------------------------------------- /rppg-Toolbox_MMPD/configs/train_configs/UBFC_UBFC_PURE_EFFICIENTPHYS.yaml: -------------------------------------------------------------------------------- 1 | BASE: [''] 2 | TOOLBOX_MODE: "train_and_test" # "train_and_test" or "only_test" 3 | TRAIN: 4 | BATCH_SIZE: 4 5 | EPOCHS: 30 6 | LR: 9e-3 7 | MODEL_FILE_NAME: UBFC_UBFC_PURE_efficientphys 8 | DATA: 9 | FS: 30 10 | DATASET: UBFC 11 | DO_PREPROCESS: False # if first time, should be true 12 | DATA_FORMAT: NDCHW 13 | DATA_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/UBFC/RawData" # Raw dataset path, need to be updated 14 | CACHED_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/rppg_toolbox/PreprocessedData" # Processed dataset save path, need to be updated 15 | EXP_DATA_NAME: "" 16 | BEGIN: 0.0 17 | END: 0.8 18 | PREPROCESS : 19 | DATA_TYPE: ['Standardized'] 20 | LABEL_TYPE: DiffNormalized 21 | DO_CHUNK: True 22 | CHUNK_LENGTH: 180 23 | DYNAMIC_DETECTION: False 24 | DYNAMIC_DETECTION_FREQUENCY : 180 25 | CROP_FACE: True 26 | LARGE_FACE_BOX: True 27 | LARGE_BOX_COEF: 1.5 28 | H: 72 29 | W: 72 30 | VALID: 31 | DATA: 32 | FS: 30 33 | DATASET: UBFC 34 | DO_PREPROCESS: False # if first time, should be true 35 | DATA_FORMAT: NDCHW 36 | DATA_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/UBFC/RawData" # Raw dataset path, need to be updated 37 | CACHED_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/rppg_toolbox/PreprocessedData" # Processed dataset save path, need to be updated 38 | EXP_DATA_NAME: "" 39 | BEGIN: 0.8 40 | END: 1.0 41 | PREPROCESS: 42 | DATA_TYPE: ['Standardized'] 43 | LABEL_TYPE: DiffNormalized 44 | DO_CHUNK: True 45 | CHUNK_LENGTH: 180 46 | DYNAMIC_DETECTION: False 47 | DYNAMIC_DETECTION_FREQUENCY : 180 48 | CROP_FACE: True 49 | LARGE_FACE_BOX: True 50 | LARGE_BOX_COEF: 1.5 51 | H: 72 52 | W: 72 53 | TEST: 54 | METRICS: ['MAE','RMSE','MAPE','Pearson'] 55 | USE_LAST_EPOCH: True 56 | DATA: 57 | FS: 30 58 | DATASET: PURE 59 | DO_PREPROCESS: False # if first time, should be true 60 | DATA_FORMAT: NDCHW 61 | DATA_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/PURE/RawData" # Raw dataset path, need to be updated 62 | CACHED_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/rppg_toolbox/PreprocessedData" # Processed dataset save path, need to be updated 63 | EXP_DATA_NAME: "" 64 | BEGIN: 0.0 65 | END: 1.0 66 | PREPROCESS: 67 | DATA_TYPE: ['Standardized'] 68 | LABEL_TYPE: DiffNormalized 69 | DO_CHUNK: True 70 | CHUNK_LENGTH: 180 71 | DYNAMIC_DETECTION: False 72 | DYNAMIC_DETECTION_FREQUENCY : 180 73 | CROP_FACE: True 74 | LARGE_FACE_BOX: True 75 | LARGE_BOX_COEF: 1.5 76 | H: 72 77 | W: 72 78 | DEVICE: cuda:0 79 | NUM_OF_GPU_TRAIN: 1 80 | LOG: 81 | PATH: runs/exp 82 | MODEL: 83 | DROP_RATE: 0.2 84 | NAME: EfficientPhys 85 | EFFICIENTPHYS: 86 | FRAME_DEPTH: 10 87 | INFERENCE: 88 | BATCH_SIZE: 4 89 | EVALUATION_METHOD: FFT # "FFT" or "peak detection" 90 | MODEL_PATH: "" 91 | 92 | -------------------------------------------------------------------------------- /rppg-Toolbox_MMPD/configs/train_configs/UBFC_UBFC_PURE_PHYSNET_BASIC.yaml: -------------------------------------------------------------------------------- 1 | BASE: [''] 2 | TOOLBOX_MODE: "train_and_test" # "train_and_test" or "only_test" 3 | TRAIN: 4 | BATCH_SIZE: 4 5 | EPOCHS: 30 6 | LR: 9e-3 7 | MODEL_FILE_NAME: UBFC_UBFC_PURE_physnet_stan 8 | DATA: 9 | FS: 30 10 | DATASET: UBFC 11 | DO_PREPROCESS: False # if first time, should be true 12 | DATA_FORMAT: NCDHW 13 | DATA_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/UBFC/RawData" # Raw dataset path, need to be updated 14 | CACHED_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/rppg_toolbox/PreprocessedData" # Processed dataset save path, need to be updated 15 | FILE_LIST_PATH: "/data2/rppg_datasets/DataFileLists/" # Path to store file lists, needs to be updated 16 | EXP_DATA_NAME: "" 17 | BEGIN: 0.0 18 | END: 0.8 19 | PREPROCESS : 20 | DATA_TYPE: ['DiffNormalized'] #if use physnet, should be DiffNormalized 21 | LABEL_TYPE: DiffNormalized 22 | DO_CHUNK: True 23 | CHUNK_LENGTH: 128 #only support for factor of 512 24 | DYNAMIC_DETECTION: False 25 | DYNAMIC_DETECTION_FREQUENCY : 128 26 | CROP_FACE: True 27 | LARGE_FACE_BOX: True 28 | LARGE_BOX_COEF: 1.5 29 | H: 72 30 | W: 72 31 | VALID: 32 | DATA: 33 | FS: 30 34 | DATASET: UBFC 35 | DO_PREPROCESS: False # if first time, should be true 36 | DATA_FORMAT: NCDHW 37 | DATA_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/UBFC/RawData" # Raw dataset path, need to be updated 38 | CACHED_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/rppg_toolbox/PreprocessedData" # Processed dataset save path, need to be updated 39 | FILE_LIST_PATH: "/data2/rppg_datasets/DataFileLists/" # Path to store file lists, needs to be updated 40 | EXP_DATA_NAME: "" 41 | BEGIN: 0.8 42 | END: 1.0 43 | PREPROCESS : 44 | DATA_TYPE: ['DiffNormalized'] #if use physnet, should be DiffNormalized 45 | LABEL_TYPE: DiffNormalized 46 | DO_CHUNK: True 47 | CHUNK_LENGTH: 128 #only support for factor of 512 48 | DYNAMIC_DETECTION: False 49 | DYNAMIC_DETECTION_FREQUENCY : 128 50 | CROP_FACE: True 51 | LARGE_FACE_BOX: True 52 | LARGE_BOX_COEF: 1.5 53 | H: 72 54 | W: 72 55 | TEST: 56 | METRICS: ['MAE','RMSE','MAPE','Pearson'] 57 | USE_LAST_EPOCH: True # to use provided validation dataset to find the best epoch, should be false 58 | DATA: 59 | FS: 30 60 | DATASET: PURE 61 | DO_PREPROCESS: False # if first time, should be true 62 | DATA_FORMAT: NCDHW 63 | DATA_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/PURE/RawData" # Raw dataset path, need to be updated 64 | CACHED_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/rppg_toolbox/PreprocessedData" # Processed dataset save path, need to be updated 65 | FILE_LIST_PATH: "/data2/rppg_datasets/DataFileLists/" # Path to store file lists, needs to be updated 66 | EXP_DATA_NAME: "" 67 | BEGIN: 0.0 68 | END: 1.0 69 | PREPROCESS : 70 | DATA_TYPE: ['DiffNormalized'] #if use physnet, should be DiffNormalized 71 | LABEL_TYPE: DiffNormalized 72 | DO_CHUNK: True 73 | CHUNK_LENGTH: 128 #only support for factor of 512 74 | DYNAMIC_DETECTION: False 75 | DYNAMIC_DETECTION_FREQUENCY : 128 76 | CROP_FACE: True 77 | LARGE_FACE_BOX: True 78 | LARGE_BOX_COEF: 1.5 79 | H: 72 80 | W: 72 81 | DEVICE: cuda:0 82 | NUM_OF_GPU_TRAIN: 1 83 | LOG: 84 | PATH: runs/exp 85 | MODEL: 86 | DROP_RATE: 0.2 87 | NAME: Physnet 88 | PHYSNET: 89 | FRAME_NUM: 128 90 | INFERENCE: 91 | BATCH_SIZE: 4 92 | EVALUATION_METHOD: "FFT" # "FFT" or "peak detection" 93 | MODEL_PATH: "" 94 | -------------------------------------------------------------------------------- /rppg-Toolbox_MMPD/configs/train_configs/UBFC_UBFC_PURE_TSCAN_BASIC.yaml: -------------------------------------------------------------------------------- 1 | BASE: [''] 2 | TOOLBOX_MODE: "train_and_test" # "train_and_test" or "only_test" 3 | TRAIN: 4 | BATCH_SIZE: 4 5 | EPOCHS: 30 6 | LR: 9e-3 7 | MODEL_FILE_NAME: UBFC_UBFC_PURE_tscan 8 | DATA: 9 | FS: 30 10 | DATASET: UBFC 11 | DO_PREPROCESS: False # if first time, should be true 12 | DATA_FORMAT: NDCHW 13 | DATA_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/UBFC/RawData" # Raw dataset path, need to be updated 14 | CACHED_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/rppg_toolbox/PreprocessedData" # Processed dataset save path, need to be updated 15 | EXP_DATA_NAME: "" 16 | BEGIN: 0.0 17 | END: 0.8 18 | PREPROCESS : 19 | DATA_TYPE: ['DiffNormalized','Standardized'] 20 | LABEL_TYPE: DiffNormalized 21 | DO_CHUNK: True 22 | CHUNK_LENGTH: 180 23 | DYNAMIC_DETECTION: False 24 | DYNAMIC_DETECTION_FREQUENCY : 180 25 | CROP_FACE: True 26 | LARGE_FACE_BOX: True 27 | LARGE_BOX_COEF: 1.5 28 | H: 72 29 | W: 72 30 | VALID: 31 | DATA: 32 | FS: 30 33 | DATASET: UBFC 34 | DO_PREPROCESS: False # if first time, should be true 35 | DATA_FORMAT: NDCHW 36 | DATA_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/UBFC/RawData" # Raw dataset path, need to be updated 37 | CACHED_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/rppg_toolbox/PreprocessedData" # Processed dataset save path, need to be updated 38 | EXP_DATA_NAME: "" 39 | BEGIN: 0.8 40 | END: 1.0 41 | PREPROCESS: 42 | DATA_TYPE: [ 'DiffNormalized','Standardized' ] 43 | LABEL_TYPE: DiffNormalized 44 | DO_CHUNK: True 45 | CHUNK_LENGTH: 180 46 | DYNAMIC_DETECTION: False 47 | DYNAMIC_DETECTION_FREQUENCY : 180 48 | CROP_FACE: True 49 | LARGE_FACE_BOX: True 50 | LARGE_BOX_COEF: 1.5 51 | H: 72 52 | W: 72 53 | TEST: 54 | METRICS: ['MAE','RMSE','MAPE','Pearson'] 55 | USE_LAST_EPOCH: True # to use provided validation dataset to find the best epoch, should be false 56 | DATA: 57 | FS: 30 58 | DATASET: PURE 59 | DO_PREPROCESS: False # if first time, should be true 60 | DATA_FORMAT: NDCHW 61 | DATA_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/PURE/RawData" # Raw dataset path, need to be updated 62 | CACHED_PATH: "/gscratch/ubicomp/xliu0/data3/mnt/Datasets/rppg_toolbox/PreprocessedData" # Processed dataset save path, need to be updated 63 | EXP_DATA_NAME: "" 64 | BEGIN: 0.0 65 | END: 1.0 66 | PREPROCESS: 67 | DATA_TYPE: [ 'DiffNormalized','Standardized' ] 68 | LABEL_TYPE: DiffNormalized 69 | DO_CHUNK: True 70 | CHUNK_LENGTH: 180 71 | DYNAMIC_DETECTION: False 72 | DYNAMIC_DETECTION_FREQUENCY : 180 73 | CROP_FACE: True 74 | LARGE_FACE_BOX: True 75 | LARGE_BOX_COEF: 1.5 76 | H: 72 77 | W: 72 78 | DEVICE: cuda:0 79 | NUM_OF_GPU_TRAIN: 1 80 | LOG: 81 | PATH: runs/exp 82 | MODEL: 83 | DROP_RATE: 0.2 84 | NAME: Tscan 85 | TSCAN: 86 | FRAME_DEPTH: 10 87 | INFERENCE: 88 | BATCH_SIZE: 4 89 | EVALUATION_METHOD: "FFT" # "FFT" or "peak detection" 90 | MODEL_PATH: "" 91 | 92 | -------------------------------------------------------------------------------- /rppg-Toolbox_MMPD/dataset/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/THU-CS-PI/MMPD_rPPG_dataset/b88d2e248cf4d0b362c90492d189d7bd68ae85e7/rppg-Toolbox_MMPD/dataset/__init__.py -------------------------------------------------------------------------------- /rppg-Toolbox_MMPD/dataset/data_loader/COHFACELoader.py: -------------------------------------------------------------------------------- 1 | """The dataloader for COHFACE datasets. 2 | 3 | Details for the COHFACE Dataset see https://www.idiap.ch/en/dataset/cohface 4 | If you use this dataset, please cite the following publication: 5 | Guillaume Heusch, André Anjos, Sébastien Marcel, “A reproducible study on remote heart rate measurement”, arXiv, 2016. 6 | http://publications.idiap.ch/index.php/publications/show/3688 7 | """ 8 | import glob 9 | import os 10 | import re 11 | 12 | import cv2 13 | import h5py 14 | import numpy as np 15 | from dataset.data_loader.BaseLoader import BaseLoader 16 | 17 | 18 | class COHFACELoader(BaseLoader): 19 | """The data loader for the COHFACE dataset.""" 20 | 21 | def __init__(self, name, data_path, config_data): 22 | """Initializes an COHFACE dataloader. 23 | Args: 24 | data_path(str): path of a folder which stores raw video and bvp data. 25 | e.g. data_path should be "RawData" for below dataset structure: 26 | ----------------- 27 | RawData/ 28 | | |-- 1/ 29 | | |-- 0/ 30 | | |-- data.avi 31 | | |-- data.hdf5 32 | | |... 33 | | |-- 3/ 34 | | |-- data.avi 35 | | |-- data.hdf5 36 | |... 37 | | |-- n/ 38 | | |-- 0/ 39 | | |-- data.avi 40 | | |-- data.hdf5 41 | | |... 42 | | |-- 3/ 43 | | |-- data.avi 44 | | |-- data.hdf5 45 | ----------------- 46 | name(str): name of the dataloader. 47 | config_data(CfgNode): data settings(ref:config.py). 48 | """ 49 | super().__init__(name, data_path, config_data) 50 | 51 | def get_raw_data(self, data_path): 52 | """Returns data directories under the path(For COHFACE dataset).""" 53 | data_dirs = glob.glob(data_path + os.sep + "*") 54 | if not data_dirs: 55 | raise ValueError(self.dataset_name + " data paths empty!") 56 | dirs = list() 57 | for data_dir in data_dirs: 58 | for i in range(4): 59 | subject = os.path.split(data_dir)[-1] 60 | dirs.append({"index": int('{0}0{1}'.format(subject, i)), 61 | "path": os.path.join(data_dir, str(i))}) 62 | return dirs 63 | 64 | def preprocess_dataset(self, data_dirs, config_preprocess): 65 | """Preprocesses the raw data.""" 66 | file_num = len(data_dirs) 67 | for i in range(file_num): 68 | frames = self.read_video( 69 | os.path.join( 70 | data_dirs[i]["path"], 71 | "data.avi")) 72 | bvps = self.read_wave( 73 | os.path.join( 74 | data_dirs[i]["path"], 75 | "data.hdf5")) 76 | target_length = frames.shape[0] 77 | bvps = BaseLoader.resample_ppg(bvps, target_length) 78 | frames_clips, bvps_clips = self.preprocess(frames, bvps, config_preprocess) 79 | self.preprocessed_data_len += self.save(frames_clips, bvps_clips, data_dirs[i]["index"]) 80 | 81 | @staticmethod 82 | def read_video(video_file): 83 | """Reads a video file, returns frames(T,H,W,3) """ 84 | VidObj = cv2.VideoCapture(video_file) 85 | VidObj.set(cv2.CAP_PROP_POS_MSEC, 0) 86 | success, frame = VidObj.read() 87 | frames = list() 88 | while (success): 89 | frame = cv2.cvtColor(np.array(frame), cv2.COLOR_BGR2RGB) 90 | frame = np.asarray(frame) 91 | frame[np.isnan(frame)] = 0 # TODO: maybe change into avg 92 | frames.append(frame) 93 | success, frame = VidObj.read() 94 | 95 | return np.asarray(frames) 96 | 97 | @staticmethod 98 | def read_wave(bvp_file): 99 | """Reads a bvp signal file.""" 100 | f = h5py.File(bvp_file, 'r') 101 | pulse = f["pulse"][:] 102 | return pulse 103 | -------------------------------------------------------------------------------- /rppg-Toolbox_MMPD/dataset/data_loader/PURELoader.py: -------------------------------------------------------------------------------- 1 | """The dataloader for PURE datasets. 2 | 3 | Details for the PURE Dataset see https://www.tu-ilmenau.de/universitaet/fakultaeten/fakultaet-informatik-und-automatisierung/profil/institute-und-fachgebiete/institut-fuer-technische-informatik-und-ingenieurinformatik/fachgebiet-neuroinformatik-und-kognitive-robotik/data-sets-code/pulse-rate-detection-dataset-pure 4 | If you use this dataset, please cite the following publication: 5 | Stricker, R., Müller, S., Gross, H.-M. 6 | Non-contact Video-based Pulse Rate Measurement on a Mobile Service Robot 7 | in: Proc. 23st IEEE Int. Symposium on Robot and Human Interactive Communication (Ro-Man 2014), Edinburgh, Scotland, UK, pp. 1056 - 1062, IEEE 2014 8 | """ 9 | import glob 10 | import glob 11 | import json 12 | import os 13 | import re 14 | 15 | import cv2 16 | import numpy as np 17 | from dataset.data_loader.BaseLoader import BaseLoader 18 | from tqdm import tqdm 19 | 20 | 21 | class PURELoader(BaseLoader): 22 | """The data loader for the PURE dataset.""" 23 | 24 | def __init__(self, name, data_path, config_data): 25 | """Initializes an PURE dataloader. 26 | Args: 27 | data_path(str): path of a folder which stores raw video and bvp data. 28 | e.g. data_path should be "RawData" for below dataset structure: 29 | ----------------- 30 | RawData/ 31 | | |-- 01-01/ 32 | | |-- 01-01/ 33 | | |-- 01-01.json 34 | | |-- 01-02/ 35 | | |-- 01-02/ 36 | | |-- 01-02.json 37 | |... 38 | | |-- ii-jj/ 39 | | |-- ii-jj/ 40 | | |-- ii-jj.json 41 | ----------------- 42 | name(str): name of the dataloader. 43 | config_data(CfgNode): data settings(ref:config.py). 44 | """ 45 | super().__init__(name, data_path, config_data) 46 | 47 | def get_raw_data(self, data_path): 48 | """Returns data directories under the path(For PURE dataset).""" 49 | 50 | data_dirs = glob.glob(data_path + os.sep + "*-*") 51 | if not data_dirs: 52 | raise ValueError(self.dataset_name + " data paths empty!") 53 | dirs = list() 54 | for data_dir in data_dirs: 55 | subject_trail_val = os.path.split(data_dir)[-1].replace('-', '') 56 | index = int(subject_trail_val) 57 | subject = int(subject_trail_val[0:2]) 58 | dirs.append({"index": index, "path": data_dir, "subject": subject}) 59 | return dirs 60 | 61 | def split_raw_data(self, data_dirs, begin, end): 62 | """Returns a subset of data dirs, split with begin and end values, 63 | and ensures no overlapping subjects between splits""" 64 | 65 | # return the full directory 66 | if begin == 0 and end == 1: 67 | return data_dirs 68 | 69 | # get info about the dataset: subject list and num vids per subject 70 | data_info = dict() 71 | for data in data_dirs: 72 | subject = data['subject'] 73 | data_dir = data['path'] 74 | index = data['index'] 75 | # creates a dictionary of data_dirs indexed by subject number 76 | if subject not in data_info: # if subject not in the data info dictionary 77 | data_info[subject] = [] # make an emplty list for that subject 78 | # append a tuple of the filename, subject num, trial num, and chunk num 79 | data_info[subject].append({"index": index, "path": data_dir, "subject": subject}) 80 | 81 | subj_list = list(data_info.keys()) # all subjects by number ID (1-27) 82 | subj_list = sorted(subj_list) 83 | num_subjs = len(subj_list) # number of unique subjects 84 | 85 | # get split of data set (depending on start / end) 86 | subj_range = list(range(0, num_subjs)) 87 | if begin != 0 or end != 1: 88 | subj_range = list(range(int(begin * num_subjs), int(end * num_subjs))) 89 | 90 | # compile file list 91 | data_dirs_new = [] 92 | for i in subj_range: 93 | subj_num = subj_list[i] 94 | subj_files = data_info[subj_num] 95 | data_dirs_new += subj_files # add file information to file_list (tuple of fname, subj ID, trial num, 96 | # chunk num) 97 | 98 | return data_dirs_new 99 | 100 | def preprocess_dataset_subprocess(self, data_dirs, config_preprocess, i, file_list_dict): 101 | """ Invoked by preprocess_dataset for multi_process. """ 102 | filename = os.path.split(data_dirs[i]['path'])[-1] 103 | saved_filename = data_dirs[i]['index'] 104 | 105 | frames = self.read_video( 106 | os.path.join(data_dirs[i]['path'], filename, "")) 107 | bvps = self.read_wave( 108 | os.path.join(data_dirs[i]['path'], "{0}.json".format(filename))) 109 | target_length = frames.shape[0] 110 | bvps = BaseLoader.resample_ppg(bvps, target_length) 111 | frames_clips, bvps_clips = self.preprocess(frames, bvps, config_preprocess) 112 | input_name_list, label_name_list = self.save_multi_process(frames_clips, bvps_clips, saved_filename) 113 | file_list_dict[i] = input_name_list 114 | 115 | @staticmethod 116 | def read_video(video_file): 117 | """Reads a video file, returns frames(T, H, W, 3) """ 118 | frames = list() 119 | all_png = sorted(glob.glob(video_file + '*.png')) 120 | for png_path in all_png: 121 | img = cv2.imread(png_path) 122 | img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) 123 | frames.append(img) 124 | return np.asarray(frames) 125 | 126 | @staticmethod 127 | def read_wave(bvp_file): 128 | """Reads a bvp signal file.""" 129 | with open(bvp_file, "r") as f: 130 | labels = json.load(f) 131 | waves = [label["Value"]["waveform"] 132 | for label in labels["/FullPackage"]] 133 | return np.asarray(waves) 134 | -------------------------------------------------------------------------------- /rppg-Toolbox_MMPD/dataset/data_loader/SCAMPSLoader.py: -------------------------------------------------------------------------------- 1 | """The dataloader for SCAMPS datasets. 2 | 3 | Details for the SCAMPS Dataset see https://github.com/danmcduff/scampsdataset 4 | If you use this dataset, please cite the following publication: 5 | McDuff, Daniel and Wander, Miah and Liu, Xin and Hill, Brian L and Hernandez, Javier and Lester, Jonathan and Baltrusaitis, Tadas 6 | SCAMPS: Synthetics for Camera Measurement of Physiological Signals 7 | in: Conference on Neural Information Processing Systems' 2022 8 | """ 9 | import glob 10 | import json 11 | import os 12 | import re 13 | from multiprocessing import Pool, Process, Value, Array, Manager 14 | 15 | import cv2 16 | import mat73 17 | import matplotlib.pyplot as plt 18 | import numpy as np 19 | import scipy.io 20 | from dataset.data_loader.BaseLoader import BaseLoader 21 | from tqdm import tqdm 22 | 23 | 24 | class SCAMPSLoader(BaseLoader): 25 | """The data loader for the SCAMPS Processed dataset.""" 26 | 27 | def __init__(self, name, data_path, config_data): 28 | """Initializes an SCAMPS Processed dataloader. 29 | Args: 30 | data_path(string): path of a folder which stores raw video and ground truth biosignal in mat files. 31 | Each mat file contains a video sequence of resolution of 72x72 and various ground trugh signal. 32 | e.g., dXsub -> raw/diffnormalized data; d_ppg -> pulse signal, d_br -> resp signal 33 | ----------------- 34 | ProcessedData/ 35 | | |-- P000001.mat/ 36 | | |-- P000002.mat/ 37 | | |-- P000003.mat/ 38 | ... 39 | ----------------- 40 | name(str): name of the dataloader. 41 | config_data(CfgNode): data settings(ref:config.py). 42 | """ 43 | super().__init__(name, data_path, config_data) 44 | self.cached_path = config_data.CACHED_PATH + "_" + self.dataset_name 45 | self.file_list_path = config_data.FILE_LIST_PATH.split('.')[0] + "_" + self.dataset_name \ 46 | + os.path.basename(config_data.FILE_LIST_PATH) # append split name before .csv ext 47 | 48 | def get_raw_data(self, data_path): 49 | """Returns data directories under the path(For COHFACE dataset).""" 50 | data_dirs = glob.glob(data_path + os.sep + "*.mat") 51 | if not data_dirs: 52 | raise ValueError(self.dataset_name + " data paths empty!") 53 | dirs = list() 54 | for data_dir in data_dirs: 55 | subject = os.path.split(data_dir)[-1] 56 | dirs.append({"index": subject, "path": data_dir}) 57 | return dirs 58 | 59 | def split_raw_data(self, data_dirs, begin, end): 60 | """Returns a subset of data dirs, split with begin and end values""" 61 | if begin == 0 and end == 1: # return the full directory if begin == 0 and end == 1 62 | return data_dirs 63 | file_num = len(data_dirs) 64 | choose_range = range(int(begin * file_num), int(end * file_num)) 65 | data_dirs_new = [] 66 | for i in choose_range: 67 | data_dirs_new.append(data_dirs[i]) 68 | return data_dirs_new 69 | 70 | def preprocess_dataset_subprocess(self, data_dirs, config_preprocess, i, file_list_dict): 71 | """ Invoked by preprocess_dataset() for multi_process. """ 72 | matfile_path = data_dirs[i]['path'] 73 | saved_filename = data_dirs[i]['index'] 74 | frames = self.read_video(matfile_path) 75 | frames = (np.round(frames * 255)).astype(np.uint8) 76 | bvps = self.read_wave(matfile_path) 77 | frames_clips, bvps_clips = self.preprocess( 78 | frames, bvps, config_preprocess) 79 | input_name_list, label_name_list = self.save_multi_process(frames_clips, bvps_clips, saved_filename) 80 | file_list_dict[i] = input_name_list 81 | 82 | def preprocess_dataset_backup(self, data_dirs, config_preprocess): 83 | """Preprocesses the raw data.""" 84 | file_num = len(data_dirs) 85 | pbar = tqdm(list(range(file_num))) 86 | for i in pbar: 87 | matfile_path = data_dirs[i]['path'] 88 | pbar.set_description("Processing %s" % matfile_path) 89 | frames = self.read_video(matfile_path) 90 | bvps = self.read_wave(matfile_path) 91 | frames_clips, bvps_clips = self.preprocess(frames, bvps, config_preprocess) 92 | self.preprocessed_data_len += self.save(frames_clips, bvps_clips, data_dirs[i]['index']) 93 | 94 | @staticmethod 95 | def read_video(video_file): 96 | """Reads a video file, returns frames(T, H, W, 3). """ 97 | mat = mat73.loadmat(video_file) 98 | frames = mat['Xsub'] # load raw frames 99 | return np.asarray(frames) 100 | 101 | @staticmethod 102 | def read_wave(wave_file): 103 | """Reads a bvp signal file.""" 104 | mat = mat73.loadmat(wave_file) 105 | ppg = mat['d_ppg'] # load raw frames 106 | return np.asarray(ppg) 107 | -------------------------------------------------------------------------------- /rppg-Toolbox_MMPD/dataset/data_loader/UBFCLoader.py: -------------------------------------------------------------------------------- 1 | """The dataloader for UBFC datasets. 2 | 3 | Details for the UBFC-RPPG Dataset see https://sites.google.com/view/ybenezeth/ubfcrppg. 4 | If you use this dataset, please cite this paper: 5 | S. Bobbia, R. Macwan, Y. Benezeth, A. Mansouri, J. Dubois, "Unsupervised skin tissue segmentation for remote photoplethysmography", Pattern Recognition Letters, 2017. 6 | """ 7 | import glob 8 | import os 9 | import re 10 | from multiprocessing import Pool, Process, Value, Array, Manager 11 | 12 | import cv2 13 | import numpy as np 14 | from dataset.data_loader.BaseLoader import BaseLoader 15 | from tqdm import tqdm 16 | 17 | 18 | class UBFCLoader(BaseLoader): 19 | """The data loader for the UBFC dataset.""" 20 | 21 | def __init__(self, name, data_path, config_data): 22 | """Initializes an UBFC dataloader. 23 | Args: 24 | data_path(str): path of a folder which stores raw video and bvp data. 25 | e.g. data_path should be "RawData" for below dataset structure: 26 | ----------------- 27 | RawData/ 28 | | |-- subject1/ 29 | | |-- vid.avi 30 | | |-- ground_truth.txt 31 | | |-- subject2/ 32 | | |-- vid.avi 33 | | |-- ground_truth.txt 34 | |... 35 | | |-- subjectn/ 36 | | |-- vid.avi 37 | | |-- ground_truth.txt 38 | ----------------- 39 | name(string): name of the dataloader. 40 | config_data(CfgNode): data settings(ref:config.py). 41 | """ 42 | super().__init__(name, data_path, config_data) 43 | 44 | def get_raw_data(self, data_path): 45 | """Returns data directories under the path(For UBFC dataset).""" 46 | data_dirs = glob.glob(data_path + os.sep + "subject*") 47 | if not data_dirs: 48 | raise ValueError(self.dataset_name + " data paths empty!") 49 | dirs = [{"index": re.search( 50 | 'subject(\d+)', data_dir).group(0), "path": data_dir} for data_dir in data_dirs] 51 | return dirs 52 | 53 | def split_raw_data(self, data_dirs, begin, end): 54 | """Returns a subset of data dirs, split with begin and end values.""" 55 | if begin == 0 and end == 1: # return the full directory if begin == 0 and end == 1 56 | return data_dirs 57 | 58 | file_num = len(data_dirs) 59 | choose_range = range(int(begin * file_num), int(end * file_num)) 60 | data_dirs_new = [] 61 | 62 | for i in choose_range: 63 | data_dirs_new.append(data_dirs[i]) 64 | 65 | return data_dirs_new 66 | 67 | def preprocess_dataset_subprocess(self, data_dirs, config_preprocess, i, file_list_dict): 68 | """ invoked by preprocess_dataset for multi_process.""" 69 | filename = os.path.split(data_dirs[i]['path'])[-1] 70 | saved_filename = data_dirs[i]['index'] 71 | 72 | frames = self.read_video( 73 | os.path.join(data_dirs[i]['path'],"vid.avi")) 74 | bvps = self.read_wave( 75 | os.path.join(data_dirs[i]['path'],"ground_truth.txt")) 76 | 77 | frames_clips, bvps_clips = self.preprocess(frames, bvps, config_preprocess) 78 | input_name_list, label_name_list = self.save_multi_process(frames_clips, bvps_clips, saved_filename) 79 | file_list_dict[i] = input_name_list 80 | 81 | @staticmethod 82 | def read_video(video_file): 83 | """Reads a video file, returns frames(T, H, W, 3) """ 84 | VidObj = cv2.VideoCapture(video_file) 85 | VidObj.set(cv2.CAP_PROP_POS_MSEC, 0) 86 | success, frame = VidObj.read() 87 | frames = list() 88 | while success: 89 | frame = cv2.cvtColor(np.array(frame), cv2.COLOR_BGR2RGB) 90 | frame = np.asarray(frame) 91 | frames.append(frame) 92 | success, frame = VidObj.read() 93 | return np.asarray(frames) 94 | 95 | @staticmethod 96 | def read_wave(bvp_file): 97 | """Reads a bvp signal file.""" 98 | with open(bvp_file, "r") as f: 99 | str1 = f.read() 100 | str1 = str1.split("\n") 101 | bvp = [float(x) for x in str1[0].split()] 102 | return np.asarray(bvp) 103 | -------------------------------------------------------------------------------- /rppg-Toolbox_MMPD/dataset/data_loader/__init__.py: -------------------------------------------------------------------------------- 1 | import dataset.data_loader.BaseLoader 2 | import dataset.data_loader.COHFACELoader 3 | import dataset.data_loader.UBFCLoader 4 | import dataset.data_loader.PURELoader 5 | import dataset.data_loader.SCAMPSLoader 6 | import dataset.data_loader.MMPDLoader 7 | -------------------------------------------------------------------------------- /rppg-Toolbox_MMPD/evaluation/metrics.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pandas as pd 3 | import torch 4 | from evaluation.post_process import * 5 | 6 | 7 | def read_label(dataset): 8 | """Read manually corrected labels.""" 9 | df = pd.read_csv("label/{0}_Comparison.csv".format(dataset)) 10 | out_dict = df.to_dict(orient='index') 11 | out_dict = {str(value['VideoID']): value for key, value in out_dict.items()} 12 | return out_dict 13 | 14 | 15 | def read_hr_label(feed_dict, index): 16 | """Read manually corrected UBFC labels.""" 17 | # For UBFC only 18 | if index[:7] == 'subject': 19 | index = index[7:] 20 | video_dict = feed_dict[index] 21 | if video_dict['Preferred'] == 'Peak Detection': 22 | hr = video_dict['Peak Detection'] 23 | elif video_dict['Preferred'] == 'FFT': 24 | hr = video_dict['FFT'] 25 | else: 26 | hr = video_dict['Peak Detection'] 27 | return index, hr 28 | 29 | 30 | def _reform_data_from_dict(data): 31 | """Helper func for calculate metrics: reformat predictions and labels from dicts. """ 32 | sort_data = sorted(data.items(), key=lambda x: x[0]) 33 | sort_data = [i[1] for i in sort_data] 34 | sort_data = torch.cat(sort_data, dim=0) 35 | return np.reshape(sort_data.cpu(), (-1)) 36 | 37 | 38 | def calculate_metrics(predictions, labels, config): 39 | """Calculate rPPG Metrics (MAE, RMSE, MAPE, Pearson Coef.).""" 40 | predict_hr_fft_all = list() 41 | gt_hr_fft_all = list() 42 | predict_hr_peak_all = list() 43 | gt_hr_peak_all = list() 44 | for index in predictions.keys(): 45 | prediction = _reform_data_from_dict(predictions[index]) 46 | label = _reform_data_from_dict(labels[index]) 47 | 48 | if config.TEST.DATA.PREPROCESS.LABEL_TYPE == "Standardized" or \ 49 | config.TEST.DATA.PREPROCESS.LABEL_TYPE == "Raw": 50 | diff_flag_test = False 51 | elif config.TEST.DATA.PREPROCESS.LABEL_TYPE == "DiffNormalized": 52 | diff_flag_test = True 53 | else: 54 | raise ValueError("Not supported label type in testing!") 55 | gt_hr_fft, pred_hr_fft = calculate_metric_per_video( 56 | prediction, label, diff_flag=diff_flag_test, fs=config.TEST.DATA.FS, hr_method='FFT') 57 | gt_hr_peak, pred_hr_peak = calculate_metric_per_video( 58 | prediction, label, diff_flag=diff_flag_test, fs=config.TEST.DATA.FS, hr_method='Peak') 59 | gt_hr_fft_all.append(gt_hr_fft) 60 | predict_hr_fft_all.append(pred_hr_fft) 61 | predict_hr_peak_all.append(pred_hr_peak) 62 | gt_hr_peak_all.append(gt_hr_peak) 63 | predict_hr_peak_all = np.array(predict_hr_peak_all) 64 | predict_hr_fft_all = np.array(predict_hr_fft_all) 65 | gt_hr_peak_all = np.array(gt_hr_peak_all) 66 | gt_hr_fft_all = np.array(gt_hr_fft_all) 67 | for metric in config.TEST.METRICS: 68 | if metric == "MAE": 69 | if config.INFERENCE.EVALUATION_METHOD == "FFT": 70 | MAE_FFT = np.mean(np.abs(predict_hr_fft_all - gt_hr_fft_all)) 71 | print("FFT MAE (FFT Label):{0}".format(MAE_FFT)) 72 | elif config.INFERENCE.EVALUATION_METHOD == "peak detection": 73 | MAE_PEAK = np.mean(np.abs(predict_hr_peak_all - gt_hr_peak_all)) 74 | print("Peak MAE (Peak Label):{0}".format(MAE_PEAK)) 75 | else: 76 | raise ValueError("Your evaluation method is not supported yet! Support FFT and peak detection now ") 77 | 78 | elif metric == "RMSE": 79 | if config.INFERENCE.EVALUATION_METHOD == "FFT": 80 | RMSE_FFT = np.sqrt(np.mean(np.square(predict_hr_fft_all - gt_hr_fft_all))) 81 | print("FFT RMSE (FFT Label):{0}".format(RMSE_FFT)) 82 | elif config.INFERENCE.EVALUATION_METHOD == "peak detection": 83 | RMSE_PEAK = np.sqrt(np.mean(np.square(predict_hr_peak_all - gt_hr_peak_all))) 84 | print("PEAK RMSE (Peak Label):{0}".format(RMSE_PEAK)) 85 | else: 86 | raise ValueError("Your evaluation method is not supported yet! Support FFT and peak detection now ") 87 | 88 | elif metric == "MAPE": 89 | if config.INFERENCE.EVALUATION_METHOD == "FFT": 90 | MAPE_FFT = np.mean(np.abs((predict_hr_fft_all - gt_hr_fft_all) / gt_hr_fft_all)) * 100 91 | print("FFT MAPE (FFT Label):{0}".format(MAPE_FFT)) 92 | elif config.INFERENCE.EVALUATION_METHOD == "peak detection": 93 | MAPE_PEAK = np.mean(np.abs((predict_hr_peak_all - gt_hr_peak_all) / gt_hr_peak_all)) * 100 94 | print("PEAK MAPE (Peak Label):{0}".format(MAPE_PEAK)) 95 | else: 96 | raise ValueError("Your evaluation method is not supported yet! Support FFT and peak detection now ") 97 | 98 | elif metric == "Pearson": 99 | if config.INFERENCE.EVALUATION_METHOD == "FFT": 100 | Pearson_FFT = np.corrcoef(predict_hr_fft_all, gt_hr_fft_all) 101 | print("FFT Pearson (FFT Label):{0}".format(Pearson_FFT[0][1])) 102 | elif config.INFERENCE.EVALUATION_METHOD == "peak detection": 103 | Pearson_PEAK = np.corrcoef(predict_hr_peak_all, gt_hr_peak_all) 104 | print("PEAK Pearson (Peak Label):{0}".format(Pearson_PEAK[0][1])) 105 | else: 106 | raise ValueError("Your evaluation method is not supported yet! Support FFT and peak detection now ") 107 | 108 | else: 109 | raise ValueError("Wrong Test Metric Type") 110 | -------------------------------------------------------------------------------- /rppg-Toolbox_MMPD/evaluation/post_process.py: -------------------------------------------------------------------------------- 1 | """The post processing files for caluclating heart rate using FFT or peak detection. 2 | The file also includes helper funcs such as detrend, mag2db etc. 3 | """ 4 | 5 | import numpy as np 6 | import scipy 7 | import scipy.io 8 | from scipy.signal import butter 9 | from scipy.sparse import spdiags 10 | 11 | 12 | def _next_power_of_2(x): 13 | """Calculate the nearest power of 2.""" 14 | return 1 if x == 0 else 2 ** (x - 1).bit_length() 15 | 16 | 17 | def _detrend(input_signal, lambda_value): 18 | """Detrend PPG signal.""" 19 | signal_length = input_signal.shape[0] 20 | # observation matrix 21 | H = np.identity(signal_length) 22 | ones = np.ones(signal_length) 23 | minus_twos = -2 * np.ones(signal_length) 24 | diags_data = np.array([ones, minus_twos, ones]) 25 | diags_index = np.array([0, 1, 2]) 26 | D = spdiags(diags_data, diags_index, 27 | (signal_length - 2), signal_length).toarray() 28 | detrended_signal = np.dot( 29 | (H - np.linalg.inv(H + (lambda_value ** 2) * np.dot(D.T, D))), input_signal) 30 | return detrended_signal 31 | 32 | 33 | def mag2db(mag): 34 | """Convert magnitude to db.""" 35 | return 20. * np.log10(mag) 36 | 37 | 38 | def _calculate_fft_hr(ppg_signal, fs=60, low_pass=0.75, high_pass=2.5): 39 | """Calculate heart rate based on PPG using Fast Fourier transform (FFT).""" 40 | ppg_signal = np.expand_dims(ppg_signal, 0) 41 | N = _next_power_of_2(ppg_signal.shape[1]) 42 | f_ppg, pxx_ppg = scipy.signal.periodogram(ppg_signal, fs=fs, nfft=N, detrend=False) 43 | fmask_ppg = np.argwhere((f_ppg >= low_pass) & (f_ppg <= high_pass)) 44 | mask_ppg = np.take(f_ppg, fmask_ppg) 45 | mask_pxx = np.take(pxx_ppg, fmask_ppg) 46 | fft_hr = np.take(mask_ppg, np.argmax(mask_pxx, 0))[0] * 60 47 | return fft_hr 48 | 49 | 50 | def _calculate_peak_hr(ppg_signal, fs): 51 | """Calculate heart rate based on PPG using peak detection.""" 52 | ppg_peaks, _ = scipy.signal.find_peaks(ppg_signal) 53 | hr_peak = 60 / (np.mean(np.diff(ppg_peaks)) / fs) 54 | return hr_peak 55 | 56 | 57 | def calculate_metric_per_video(predictions, labels, fs=30, diff_flag=True, use_bandpass=True, hr_method='FFT'): 58 | """Calculate video-level HR""" 59 | if diff_flag: # if the predictions and labels are 1st derivative of PPG signal. 60 | predictions = _detrend(np.cumsum(predictions), 100) 61 | labels = _detrend(np.cumsum(labels), 100) 62 | else: 63 | predictions = _detrend(predictions, 100) 64 | labels = _detrend(labels, 100) 65 | if use_bandpass: 66 | # bandpass filter between [0.75, 2.5] Hz 67 | # equals [45, 150] beats per min 68 | [b, a] = butter(1, [0.75 / fs * 2, 2.5 / fs * 2], btype='bandpass') 69 | predictions = scipy.signal.filtfilt(b, a, np.double(predictions)) 70 | labels = scipy.signal.filtfilt(b, a, np.double(labels)) 71 | if hr_method == 'FFT': 72 | hr_pred = _calculate_fft_hr(predictions, fs=fs) 73 | hr_label = _calculate_fft_hr(labels, fs=fs) 74 | elif hr_method == 'Peak': 75 | hr_pred = _calculate_peak_hr(predictions, fs=fs) 76 | hr_label = _calculate_peak_hr(labels, fs=fs) 77 | else: 78 | raise ValueError('Please use FFT or Peak to calculate your HR.') 79 | return hr_label, hr_pred 80 | 81 | -------------------------------------------------------------------------------- /rppg-Toolbox_MMPD/final_model_release/PURE_DeepPhys.pth: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/THU-CS-PI/MMPD_rPPG_dataset/b88d2e248cf4d0b362c90492d189d7bd68ae85e7/rppg-Toolbox_MMPD/final_model_release/PURE_DeepPhys.pth -------------------------------------------------------------------------------- /rppg-Toolbox_MMPD/final_model_release/PURE_EfficientPhys.pth: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/THU-CS-PI/MMPD_rPPG_dataset/b88d2e248cf4d0b362c90492d189d7bd68ae85e7/rppg-Toolbox_MMPD/final_model_release/PURE_EfficientPhys.pth -------------------------------------------------------------------------------- /rppg-Toolbox_MMPD/final_model_release/PURE_PhysNet_DiffNormalized.pth: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/THU-CS-PI/MMPD_rPPG_dataset/b88d2e248cf4d0b362c90492d189d7bd68ae85e7/rppg-Toolbox_MMPD/final_model_release/PURE_PhysNet_DiffNormalized.pth -------------------------------------------------------------------------------- /rppg-Toolbox_MMPD/final_model_release/PURE_TSCAN.pth: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/THU-CS-PI/MMPD_rPPG_dataset/b88d2e248cf4d0b362c90492d189d7bd68ae85e7/rppg-Toolbox_MMPD/final_model_release/PURE_TSCAN.pth -------------------------------------------------------------------------------- /rppg-Toolbox_MMPD/final_model_release/SCAMPS_DeepPhys.pth: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/THU-CS-PI/MMPD_rPPG_dataset/b88d2e248cf4d0b362c90492d189d7bd68ae85e7/rppg-Toolbox_MMPD/final_model_release/SCAMPS_DeepPhys.pth -------------------------------------------------------------------------------- /rppg-Toolbox_MMPD/final_model_release/SCAMPS_EfficientPhys.pth: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/THU-CS-PI/MMPD_rPPG_dataset/b88d2e248cf4d0b362c90492d189d7bd68ae85e7/rppg-Toolbox_MMPD/final_model_release/SCAMPS_EfficientPhys.pth -------------------------------------------------------------------------------- /rppg-Toolbox_MMPD/final_model_release/SCAMPS_PhysNet_DiffNormalied.pth: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/THU-CS-PI/MMPD_rPPG_dataset/b88d2e248cf4d0b362c90492d189d7bd68ae85e7/rppg-Toolbox_MMPD/final_model_release/SCAMPS_PhysNet_DiffNormalied.pth -------------------------------------------------------------------------------- /rppg-Toolbox_MMPD/final_model_release/SCAMPS_TSCAN.pth: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/THU-CS-PI/MMPD_rPPG_dataset/b88d2e248cf4d0b362c90492d189d7bd68ae85e7/rppg-Toolbox_MMPD/final_model_release/SCAMPS_TSCAN.pth -------------------------------------------------------------------------------- /rppg-Toolbox_MMPD/final_model_release/UBFC_DeepPhys.pth: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/THU-CS-PI/MMPD_rPPG_dataset/b88d2e248cf4d0b362c90492d189d7bd68ae85e7/rppg-Toolbox_MMPD/final_model_release/UBFC_DeepPhys.pth -------------------------------------------------------------------------------- /rppg-Toolbox_MMPD/neural_methods/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/THU-CS-PI/MMPD_rPPG_dataset/b88d2e248cf4d0b362c90492d189d7bd68ae85e7/rppg-Toolbox_MMPD/neural_methods/__init__.py -------------------------------------------------------------------------------- /rppg-Toolbox_MMPD/neural_methods/loss/NegPearsonLoss.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, division 2 | import torch 3 | import matplotlib.pyplot as plt 4 | import argparse, os 5 | import pandas as pd 6 | import numpy as np 7 | import random 8 | import math 9 | from torchvision import transforms 10 | from torch import nn 11 | 12 | 13 | class Neg_Pearson(nn.Module): 14 | def __init__(self): 15 | super(Neg_Pearson, self).__init__() 16 | return 17 | 18 | def forward(self, preds, labels): 19 | cos = nn.CosineSimilarity(dim=0, eps=1e-6) 20 | pearson = cos(preds - preds.mean(dim=0, keepdim=True), labels - labels.mean(dim=0, keepdim=True)) 21 | return torch.mean(1 - pearson) 22 | 23 | 24 | -------------------------------------------------------------------------------- /rppg-Toolbox_MMPD/neural_methods/loss/PhysNetNegPearsonLoss.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, division 2 | import torch 3 | import matplotlib.pyplot as plt 4 | import argparse, os 5 | import pandas as pd 6 | import numpy as np 7 | import random 8 | import math 9 | from torchvision import transforms 10 | from torch import nn 11 | 12 | 13 | class Neg_Pearson(nn.Module): 14 | """ 15 | The Neg_Pearson Module is from the orignal author of Physnet. 16 | Code of 'Remote Photoplethysmograph Signal Measurement from Facial Videos Using Spatio-Temporal Networks' 17 | source: https://github.com/ZitongYu/PhysNet/blob/master/NegPearsonLoss.py 18 | """ 19 | 20 | def __init__(self): 21 | super(Neg_Pearson, self).__init__() 22 | return 23 | 24 | 25 | def forward(self, preds, labels): 26 | loss = 0 27 | for i in range(preds.shape[0]): 28 | sum_x = torch.sum(preds[i]) 29 | sum_y = torch.sum(labels[i]) 30 | sum_xy = torch.sum(preds[i]*labels[i]) 31 | sum_x2 = torch.sum(torch.pow(preds[i],2)) 32 | sum_y2 = torch.sum(torch.pow(labels[i],2)) 33 | N = preds.shape[1] 34 | pearson = (N*sum_xy - sum_x*sum_y)/(torch.sqrt((N*sum_x2 - torch.pow(sum_x,2))*(N*sum_y2 - torch.pow(sum_y,2)))) 35 | loss += 1 - pearson 36 | 37 | 38 | loss = loss/preds.shape[0] 39 | return loss 40 | 41 | 42 | 43 | 44 | -------------------------------------------------------------------------------- /rppg-Toolbox_MMPD/neural_methods/loss/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/THU-CS-PI/MMPD_rPPG_dataset/b88d2e248cf4d0b362c90492d189d7bd68ae85e7/rppg-Toolbox_MMPD/neural_methods/loss/__init__.py -------------------------------------------------------------------------------- /rppg-Toolbox_MMPD/neural_methods/model/DeepPhys.py: -------------------------------------------------------------------------------- 1 | """DeepPhys - 2D Convolutional Attention Network. 2 | DeepPhys: Video-Based Physiological Measurement Using Convolutional Attention Networks 3 | ECCV, 2018 4 | Weixuan Chen, Daniel McDuff 5 | """ 6 | 7 | import torch 8 | import torch.nn as nn 9 | 10 | 11 | class Attention_mask(nn.Module): 12 | def __init__(self): 13 | super(Attention_mask, self).__init__() 14 | 15 | def forward(self, x): 16 | xsum = torch.sum(x, dim=2, keepdim=True) 17 | xsum = torch.sum(xsum, dim=3, keepdim=True) 18 | xshape = tuple(x.size()) 19 | return x / xsum * xshape[2] * xshape[3] * 0.5 20 | 21 | def get_config(self): 22 | """May be generated manually. """ 23 | config = super(Attention_mask, self).get_config() 24 | return config 25 | 26 | 27 | class DeepPhys(nn.Module): 28 | 29 | def __init__(self, in_channels=3, nb_filters1=32, nb_filters2=64, kernel_size=3, dropout_rate1=0.25, 30 | dropout_rate2=0.5, pool_size=(2, 2), nb_dense=128, img_size=36): 31 | """Definition of DeepPhys. 32 | Args: 33 | in_channels: the number of input channel. Default: 3 34 | img_size: height/width of each frame. Default: 36. 35 | Returns: 36 | DeepPhys model. 37 | """ 38 | super(DeepPhys, self).__init__() 39 | self.in_channels = in_channels 40 | self.kernel_size = kernel_size 41 | self.dropout_rate1 = dropout_rate1 42 | self.dropout_rate2 = dropout_rate2 43 | self.pool_size = pool_size 44 | self.nb_filters1 = nb_filters1 45 | self.nb_filters2 = nb_filters2 46 | self.nb_dense = nb_dense 47 | # Motion branch convs 48 | self.motion_conv1 = nn.Conv2d(self.in_channels, self.nb_filters1, kernel_size=self.kernel_size, padding=(1, 1), 49 | bias=True) 50 | self.motion_conv2 = nn.Conv2d(self.nb_filters1, self.nb_filters1, kernel_size=self.kernel_size, bias=True) 51 | self.motion_conv3 = nn.Conv2d(self.nb_filters1, self.nb_filters2, kernel_size=self.kernel_size, padding=(1, 1), 52 | bias=True) 53 | self.motion_conv4 = nn.Conv2d(self.nb_filters2, self.nb_filters2, kernel_size=self.kernel_size, bias=True) 54 | # Apperance branch convs 55 | self.apperance_conv1 = nn.Conv2d(self.in_channels, self.nb_filters1, kernel_size=self.kernel_size, 56 | padding=(1, 1), bias=True) 57 | self.apperance_conv2 = nn.Conv2d(self.nb_filters1, self.nb_filters1, kernel_size=self.kernel_size, bias=True) 58 | self.apperance_conv3 = nn.Conv2d(self.nb_filters1, self.nb_filters2, kernel_size=self.kernel_size, 59 | padding=(1, 1), bias=True) 60 | self.apperance_conv4 = nn.Conv2d(self.nb_filters2, self.nb_filters2, kernel_size=self.kernel_size, bias=True) 61 | # Attention layers 62 | self.apperance_att_conv1 = nn.Conv2d(self.nb_filters1, 1, kernel_size=1, padding=(0, 0), bias=True) 63 | self.attn_mask_1 = Attention_mask() 64 | self.apperance_att_conv2 = nn.Conv2d(self.nb_filters2, 1, kernel_size=1, padding=(0, 0), bias=True) 65 | self.attn_mask_2 = Attention_mask() 66 | # Avg pooling 67 | self.avg_pooling_1 = nn.AvgPool2d(self.pool_size) 68 | self.avg_pooling_2 = nn.AvgPool2d(self.pool_size) 69 | self.avg_pooling_3 = nn.AvgPool2d(self.pool_size) 70 | # Dropout layers 71 | self.dropout_1 = nn.Dropout(self.dropout_rate1) 72 | self.dropout_2 = nn.Dropout(self.dropout_rate1) 73 | self.dropout_3 = nn.Dropout(self.dropout_rate1) 74 | self.dropout_4 = nn.Dropout(self.dropout_rate2) 75 | # Dense layers 76 | if img_size == 36: 77 | self.final_dense_1 = nn.Linear(3136, self.nb_dense, bias=True) 78 | elif img_size == 72: 79 | self.final_dense_1 = nn.Linear(16384, self.nb_dense, bias=True) 80 | elif img_size == 96: 81 | self.final_dense_1 = nn.Linear(30976, self.nb_dense, bias=True) 82 | else: 83 | raise Exception('Unsupported image size') 84 | self.final_dense_2 = nn.Linear(self.nb_dense, 1, bias=True) 85 | 86 | def forward(self, inputs, params=None): 87 | 88 | diff_input = inputs[:, :3, :, :] 89 | raw_input = inputs[:, 3:, :, :] 90 | 91 | d1 = torch.tanh(self.motion_conv1(diff_input)) 92 | d2 = torch.tanh(self.motion_conv2(d1)) 93 | 94 | r1 = torch.tanh(self.apperance_conv1(raw_input)) 95 | r2 = torch.tanh(self.apperance_conv2(r1)) 96 | 97 | g1 = torch.sigmoid(self.apperance_att_conv1(r2)) 98 | g1 = self.attn_mask_1(g1) 99 | gated1 = d2 * g1 100 | 101 | d3 = self.avg_pooling_1(gated1) 102 | d4 = self.dropout_1(d3) 103 | 104 | r3 = self.avg_pooling_2(r2) 105 | r4 = self.dropout_2(r3) 106 | 107 | d5 = torch.tanh(self.motion_conv3(d4)) 108 | d6 = torch.tanh(self.motion_conv4(d5)) 109 | 110 | r5 = torch.tanh(self.apperance_conv3(r4)) 111 | r6 = torch.tanh(self.apperance_conv4(r5)) 112 | 113 | g2 = torch.sigmoid(self.apperance_att_conv2(r6)) 114 | g2 = self.attn_mask_2(g2) 115 | gated2 = d6 * g2 116 | 117 | d7 = self.avg_pooling_3(gated2) 118 | d8 = self.dropout_3(d7) 119 | d9 = d8.view(d8.size(0), -1) 120 | d10 = torch.tanh(self.final_dense_1(d9)) 121 | d11 = self.dropout_4(d10) 122 | out = self.final_dense_2(d11) 123 | 124 | return out 125 | 126 | -------------------------------------------------------------------------------- /rppg-Toolbox_MMPD/neural_methods/model/EfficientPhys.py: -------------------------------------------------------------------------------- 1 | """EfficientPhys: Enabling Simple, Fast and Accurate Camera-Based Vitals Measurement 2 | Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision (WACV 2023) 3 | Xin Liu, Brial Hill, Ziheng Jiang, Shwetak Patel, Daniel McDuff 4 | """ 5 | 6 | import torch 7 | import torch.nn as nn 8 | 9 | 10 | class Attention_mask(nn.Module): 11 | def __init__(self): 12 | super(Attention_mask, self).__init__() 13 | 14 | def forward(self, x): 15 | xsum = torch.sum(x, dim=2, keepdim=True) 16 | xsum = torch.sum(xsum, dim=3, keepdim=True) 17 | xshape = tuple(x.size()) 18 | return x / xsum * xshape[2] * xshape[3] * 0.5 19 | 20 | def get_config(self): 21 | """May be generated manually. """ 22 | config = super(Attention_mask, self).get_config() 23 | return config 24 | 25 | 26 | class TSM(nn.Module): 27 | def __init__(self, n_segment=10, fold_div=3): 28 | super(TSM, self).__init__() 29 | self.n_segment = n_segment 30 | self.fold_div = fold_div 31 | 32 | def forward(self, x): 33 | nt, c, h, w = x.size() 34 | n_batch = nt // self.n_segment 35 | x = x.view(n_batch, self.n_segment, c, h, w) 36 | fold = c // self.fold_div 37 | out = torch.zeros_like(x) 38 | out[:, :-1, :fold] = x[:, 1:, :fold] # shift left 39 | out[:, 1:, fold: 2 * fold] = x[:, :-1, fold: 2 * fold] # shift right 40 | out[:, :, 2 * fold:] = x[:, :, 2 * fold:] # not shift 41 | return out.view(nt, c, h, w) 42 | 43 | 44 | class EfficientPhys(nn.Module): 45 | 46 | def __init__(self, in_channels=3, nb_filters1=32, nb_filters2=64, kernel_size=3, dropout_rate1=0.25, 47 | dropout_rate2=0.5, pool_size=(2, 2), nb_dense=128, frame_depth=20, img_size=36, channel='raw'): 48 | super(EfficientPhys, self).__init__() 49 | self.in_channels = in_channels 50 | self.kernel_size = kernel_size 51 | self.dropout_rate1 = dropout_rate1 52 | self.dropout_rate2 = dropout_rate2 53 | self.pool_size = pool_size 54 | self.nb_filters1 = nb_filters1 55 | self.nb_filters2 = nb_filters2 56 | self.nb_dense = nb_dense 57 | # TSM layers 58 | self.TSM_1 = TSM(n_segment=frame_depth) 59 | self.TSM_2 = TSM(n_segment=frame_depth) 60 | self.TSM_3 = TSM(n_segment=frame_depth) 61 | self.TSM_4 = TSM(n_segment=frame_depth) 62 | # Motion branch convs 63 | self.motion_conv1 = nn.Conv2d(self.in_channels, self.nb_filters1, kernel_size=self.kernel_size, padding=(1, 1), 64 | bias=True) 65 | self.motion_conv2 = nn.Conv2d(self.nb_filters1, self.nb_filters1, kernel_size=self.kernel_size, bias=True) 66 | self.motion_conv3 = nn.Conv2d(self.nb_filters1, self.nb_filters2, kernel_size=self.kernel_size, padding=(1, 1), 67 | bias=True) 68 | self.motion_conv4 = nn.Conv2d(self.nb_filters2, self.nb_filters2, kernel_size=self.kernel_size, bias=True) 69 | # Attention layers 70 | self.apperance_att_conv1 = nn.Conv2d(self.nb_filters1, 1, kernel_size=1, padding=(0, 0), bias=True) 71 | self.attn_mask_1 = Attention_mask() 72 | self.apperance_att_conv2 = nn.Conv2d(self.nb_filters2, 1, kernel_size=1, padding=(0, 0), bias=True) 73 | self.attn_mask_2 = Attention_mask() 74 | # Avg pooling 75 | self.avg_pooling_1 = nn.AvgPool2d(self.pool_size) 76 | self.avg_pooling_2 = nn.AvgPool2d(self.pool_size) 77 | self.avg_pooling_3 = nn.AvgPool2d(self.pool_size) 78 | # Dropout layers 79 | self.dropout_1 = nn.Dropout(self.dropout_rate1) 80 | self.dropout_2 = nn.Dropout(self.dropout_rate1) 81 | self.dropout_3 = nn.Dropout(self.dropout_rate1) 82 | self.dropout_4 = nn.Dropout(self.dropout_rate2) 83 | # Dense layers 84 | if img_size == 36: 85 | self.final_dense_1 = nn.Linear(3136, self.nb_dense, bias=True) 86 | elif img_size == 72: 87 | self.final_dense_1 = nn.Linear(16384, self.nb_dense, bias=True) 88 | elif img_size == 96: 89 | self.final_dense_1 = nn.Linear(30976, self.nb_dense, bias=True) 90 | else: 91 | raise Exception('Unsupported image size') 92 | self.final_dense_2 = nn.Linear(self.nb_dense, 1, bias=True) 93 | self.batch_norm = nn.BatchNorm2d(3) 94 | self.channel = channel 95 | 96 | def forward(self, inputs, params=None): 97 | inputs = torch.diff(inputs, dim=0) 98 | inputs = self.batch_norm(inputs) 99 | 100 | network_input = self.TSM_1(inputs) 101 | d1 = torch.tanh(self.motion_conv1(network_input)) 102 | d1 = self.TSM_2(d1) 103 | d2 = torch.tanh(self.motion_conv2(d1)) 104 | 105 | g1 = torch.sigmoid(self.apperance_att_conv1(d2)) 106 | g1 = self.attn_mask_1(g1) 107 | gated1 = d2 * g1 108 | 109 | d3 = self.avg_pooling_1(gated1) 110 | d4 = self.dropout_1(d3) 111 | 112 | d4 = self.TSM_3(d4) 113 | d5 = torch.tanh(self.motion_conv3(d4)) 114 | d5 = self.TSM_4(d5) 115 | d6 = torch.tanh(self.motion_conv4(d5)) 116 | 117 | g2 = torch.sigmoid(self.apperance_att_conv2(d6)) 118 | g2 = self.attn_mask_2(g2) 119 | gated2 = d6 * g2 120 | 121 | d7 = self.avg_pooling_3(gated2) 122 | d8 = self.dropout_3(d7) 123 | d9 = d8.view(d8.size(0), -1) 124 | d10 = torch.tanh(self.final_dense_1(d9)) 125 | d11 = self.dropout_4(d10) 126 | out = self.final_dense_2(d11) 127 | 128 | return out 129 | -------------------------------------------------------------------------------- /rppg-Toolbox_MMPD/neural_methods/model/PhysNet.py: -------------------------------------------------------------------------------- 1 | """ PhysNet 2 | We repulicate the net pipeline of the orginal paper, but set the input as diffnormalized data. 3 | orginal source: 4 | Remote Photoplethysmograph Signal Measurement from Facial Videos Using Spatio-Temporal Networks 5 | British Machine Vision Conference (BMVC)} 2019, 6 | By Zitong Yu, 2019/05/05 7 | Only for research purpose, and commercial use is not allowed. 8 | MIT License 9 | Copyright (c) 2019 10 | """ 11 | 12 | import math 13 | import pdb 14 | 15 | import torch 16 | import torch.nn as nn 17 | from torch.nn.modules.utils import _triple 18 | 19 | 20 | class PhysNet_padding_Encoder_Decoder_MAX(nn.Module): 21 | def __init__(self, frames=128): 22 | super(PhysNet_padding_Encoder_Decoder_MAX, self).__init__() 23 | 24 | self.ConvBlock1 = nn.Sequential( 25 | nn.Conv3d(3, 16, [1, 5, 5], stride=1, padding=[0, 2, 2]), 26 | nn.BatchNorm3d(16), 27 | nn.ReLU(inplace=True), 28 | ) 29 | 30 | self.ConvBlock2 = nn.Sequential( 31 | nn.Conv3d(16, 32, [3, 3, 3], stride=1, padding=1), 32 | nn.BatchNorm3d(32), 33 | nn.ReLU(inplace=True), 34 | ) 35 | self.ConvBlock3 = nn.Sequential( 36 | nn.Conv3d(32, 64, [3, 3, 3], stride=1, padding=1), 37 | nn.BatchNorm3d(64), 38 | nn.ReLU(inplace=True), 39 | ) 40 | 41 | self.ConvBlock4 = nn.Sequential( 42 | nn.Conv3d(64, 64, [3, 3, 3], stride=1, padding=1), 43 | nn.BatchNorm3d(64), 44 | nn.ReLU(inplace=True), 45 | ) 46 | self.ConvBlock5 = nn.Sequential( 47 | nn.Conv3d(64, 64, [3, 3, 3], stride=1, padding=1), 48 | nn.BatchNorm3d(64), 49 | nn.ReLU(inplace=True), 50 | ) 51 | self.ConvBlock6 = nn.Sequential( 52 | nn.Conv3d(64, 64, [3, 3, 3], stride=1, padding=1), 53 | nn.BatchNorm3d(64), 54 | nn.ReLU(inplace=True), 55 | ) 56 | self.ConvBlock7 = nn.Sequential( 57 | nn.Conv3d(64, 64, [3, 3, 3], stride=1, padding=1), 58 | nn.BatchNorm3d(64), 59 | nn.ReLU(inplace=True), 60 | ) 61 | self.ConvBlock8 = nn.Sequential( 62 | nn.Conv3d(64, 64, [3, 3, 3], stride=1, padding=1), 63 | nn.BatchNorm3d(64), 64 | nn.ReLU(inplace=True), 65 | ) 66 | self.ConvBlock9 = nn.Sequential( 67 | nn.Conv3d(64, 64, [3, 3, 3], stride=1, padding=1), 68 | nn.BatchNorm3d(64), 69 | nn.ReLU(inplace=True), 70 | ) 71 | 72 | self.upsample = nn.Sequential( 73 | nn.ConvTranspose3d(in_channels=64, out_channels=64, kernel_size=[ 74 | 4, 1, 1], stride=[2, 1, 1], padding=[1, 0, 0]), # [1, 128, 32] 75 | nn.BatchNorm3d(64), 76 | nn.ELU(), 77 | ) 78 | self.upsample2 = nn.Sequential( 79 | nn.ConvTranspose3d(in_channels=64, out_channels=64, kernel_size=[ 80 | 4, 1, 1], stride=[2, 1, 1], padding=[1, 0, 0]), # [1, 128, 32] 81 | nn.BatchNorm3d(64), 82 | nn.ELU(), 83 | ) 84 | 85 | self.ConvBlock10 = nn.Conv3d(64, 1, [1, 1, 1], stride=1, padding=0) 86 | 87 | self.MaxpoolSpa = nn.MaxPool3d((1, 2, 2), stride=(1, 2, 2)) 88 | self.MaxpoolSpaTem = nn.MaxPool3d((2, 2, 2), stride=2) 89 | 90 | # self.poolspa = nn.AdaptiveMaxPool3d((frames,1,1)) # pool only spatial space 91 | self.poolspa = nn.AdaptiveAvgPool3d((frames, 1, 1)) 92 | 93 | def forward(self, x): # Batch_size*[3, T, 128,128] 94 | x_visual = x 95 | [batch, channel, length, width, height] = x.shape 96 | 97 | x = self.ConvBlock1(x) # x [3, T, 128,128] 98 | x = self.MaxpoolSpa(x) # x [16, T, 64,64] 99 | 100 | x = self.ConvBlock2(x) # x [32, T, 64,64] 101 | x_visual6464 = self.ConvBlock3(x) # x [32, T, 64,64] 102 | # x [32, T/2, 32,32] Temporal halve 103 | x = self.MaxpoolSpaTem(x_visual6464) 104 | 105 | x = self.ConvBlock4(x) # x [64, T/2, 32,32] 106 | x_visual3232 = self.ConvBlock5(x) # x [64, T/2, 32,32] 107 | x = self.MaxpoolSpaTem(x_visual3232) # x [64, T/4, 16,16] 108 | 109 | x = self.ConvBlock6(x) # x [64, T/4, 16,16] 110 | x_visual1616 = self.ConvBlock7(x) # x [64, T/4, 16,16] 111 | x = self.MaxpoolSpa(x_visual1616) # x [64, T/4, 8,8] 112 | 113 | x = self.ConvBlock8(x) # x [64, T/4, 8, 8] 114 | x = self.ConvBlock9(x) # x [64, T/4, 8, 8] 115 | x = self.upsample(x) # x [64, T/2, 8, 8] 116 | x = self.upsample2(x) # x [64, T, 8, 8] 117 | 118 | # x [64, T, 1,1] --> groundtruth left and right - 7 119 | x = self.poolspa(x) 120 | x = self.ConvBlock10(x) # x [1, T, 1,1] 121 | 122 | rPPG = x.view(-1, length) 123 | 124 | return rPPG, x_visual, x_visual3232, x_visual1616 125 | -------------------------------------------------------------------------------- /rppg-Toolbox_MMPD/neural_methods/model/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/THU-CS-PI/MMPD_rPPG_dataset/b88d2e248cf4d0b362c90492d189d7bd68ae85e7/rppg-Toolbox_MMPD/neural_methods/model/__init__.py -------------------------------------------------------------------------------- /rppg-Toolbox_MMPD/neural_methods/trainer/BaseTrainer.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch.autograd import Variable 3 | import matplotlib.pyplot as plt 4 | 5 | 6 | class BaseTrainer: 7 | @staticmethod 8 | def add_trainer_args(parser): 9 | """Adds arguments to Paser for training process""" 10 | parser.add_argument('--lr', default=None, type=float) 11 | parser.add_argument('--model_file_name', default=None, type=float) 12 | return parser 13 | 14 | def __init__(self): 15 | pass 16 | 17 | def train(self, data_loader): 18 | pass 19 | 20 | def valid(self, data_loader): 21 | pass 22 | 23 | def test(self): 24 | pass 25 | -------------------------------------------------------------------------------- /rppg-Toolbox_MMPD/neural_methods/trainer/DeepPhysTrainer.py: -------------------------------------------------------------------------------- 1 | """Trainer for DeepPhys.""" 2 | 3 | import logging 4 | import os 5 | from collections import OrderedDict 6 | 7 | import numpy as np 8 | import torch 9 | import torch.optim as optim 10 | from evaluation.metrics import calculate_metrics 11 | from neural_methods.loss.NegPearsonLoss import Neg_Pearson 12 | from neural_methods.model.DeepPhys import DeepPhys 13 | from neural_methods.trainer.BaseTrainer import BaseTrainer 14 | from tqdm import tqdm 15 | 16 | 17 | class DeepPhysTrainer(BaseTrainer): 18 | 19 | def __init__(self, config, data_loader): 20 | """Inits parameters from args and the writer for TensorboardX.""" 21 | super().__init__() 22 | self.device = torch.device(config.DEVICE) 23 | self.max_epoch_num = config.TRAIN.EPOCHS 24 | self.model_dir = config.MODEL.MODEL_DIR 25 | self.model_file_name = config.TRAIN.MODEL_FILE_NAME 26 | self.batch_size = config.TRAIN.BATCH_SIZE 27 | self.chunk_len = config.TRAIN.DATA.PREPROCESS.CHUNK_LENGTH 28 | self.config = config 29 | self.min_valid_loss = None 30 | self.best_epoch = 0 31 | 32 | if config.TOOLBOX_MODE == "train_and_test": 33 | self.model = DeepPhys(img_size=config.TRAIN.DATA.PREPROCESS.H).to(self.device) 34 | self.model = torch.nn.DataParallel(self.model, device_ids=list(range(config.NUM_OF_GPU_TRAIN))) 35 | 36 | self.num_train_batches = len(data_loader["train"]) 37 | self.criterion = torch.nn.MSELoss() 38 | self.optimizer = optim.AdamW( 39 | self.model.parameters(), lr=config.TRAIN.LR, weight_decay=0) 40 | # See more details on the OneCycleLR scheduler here: https://pytorch.org/docs/stable/generated/torch.optim.lr_scheduler.OneCycleLR.html 41 | self.scheduler = torch.optim.lr_scheduler.OneCycleLR( 42 | self.optimizer, max_lr=config.TRAIN.LR, epochs=config.TRAIN.EPOCHS, steps_per_epoch=self.num_train_batches) 43 | elif config.TOOLBOX_MODE == "only_test": 44 | self.model = DeepPhys(img_size=config.TEST.DATA.PREPROCESS.H).to(self.device) 45 | self.model = torch.nn.DataParallel(self.model, device_ids=list(range(config.NUM_OF_GPU_TRAIN))) 46 | else: 47 | raise ValueError("DeepPhys trainer initialized in incorrect toolbox mode!") 48 | 49 | def train(self, data_loader): 50 | """Training routine for model""" 51 | if data_loader["train"] is None: 52 | raise ValueError("No data for train") 53 | 54 | for epoch in range(self.max_epoch_num): 55 | print('') 56 | print(f"====Training Epoch: {epoch}====") 57 | running_loss = 0.0 58 | train_loss = [] 59 | self.model.train() 60 | # Model Training 61 | tbar = tqdm(data_loader["train"], ncols=80) 62 | for idx, batch in enumerate(tbar): 63 | tbar.set_description("Train epoch %s" % epoch) 64 | data, labels = batch[0].to( 65 | self.device), batch[1].to(self.device) 66 | N, D, C, H, W = data.shape 67 | data = data.view(N * D, C, H, W) 68 | labels = labels.view(-1, 1) 69 | self.optimizer.zero_grad() 70 | pred_ppg = self.model(data) 71 | loss = self.criterion(pred_ppg, labels) 72 | loss.backward() 73 | self.optimizer.step() 74 | self.scheduler.step() 75 | running_loss += loss.item() 76 | if idx % 100 == 99: # print every 100 mini-batches 77 | print( 78 | f'[{epoch}, {idx + 1:5d}] loss: {running_loss / 100:.3f}') 79 | running_loss = 0.0 80 | train_loss.append(loss.item()) 81 | tbar.set_postfix({"loss": loss.item(), "lr": self.optimizer.param_groups[0]["lr"]}) 82 | self.save_model(epoch) 83 | if not self.config.TEST.USE_LAST_EPOCH: 84 | valid_loss = self.valid(data_loader) 85 | print('validation loss: ', valid_loss) 86 | if self.min_valid_loss is None: 87 | self.min_valid_loss = valid_loss 88 | self.best_epoch = epoch 89 | print("Update best model! Best epoch: {}".format(self.best_epoch)) 90 | elif (valid_loss < self.min_valid_loss): 91 | self.min_valid_loss = valid_loss 92 | self.best_epoch = epoch 93 | print("Update best model! Best epoch: {}".format(self.best_epoch)) 94 | if not self.config.TEST.USE_LAST_EPOCH: 95 | print("best trained epoch: {}, min_val_loss: {}".format(self.best_epoch, self.min_valid_loss)) 96 | 97 | def valid(self, data_loader): 98 | """ Model evaluation on the validation dataset.""" 99 | if data_loader["valid"] is None: 100 | raise ValueError("No data for valid") 101 | 102 | print('') 103 | print("===Validating===") 104 | valid_loss = [] 105 | self.model.eval() 106 | valid_step = 0 107 | with torch.no_grad(): 108 | vbar = tqdm(data_loader["valid"], ncols=80) 109 | for valid_idx, valid_batch in enumerate(vbar): 110 | vbar.set_description("Validation") 111 | data_valid, labels_valid = valid_batch[0].to( 112 | self.device), valid_batch[1].to(self.device) 113 | N, D, C, H, W = data_valid.shape 114 | data_valid = data_valid.view(N * D, C, H, W) 115 | labels_valid = labels_valid.view(-1, 1) 116 | pred_ppg_valid = self.model(data_valid) 117 | loss = self.criterion(pred_ppg_valid, labels_valid) 118 | valid_loss.append(loss.item()) 119 | valid_step += 1 120 | vbar.set_postfix(loss=loss.item()) 121 | valid_loss = np.asarray(valid_loss) 122 | return np.mean(valid_loss) 123 | 124 | def test(self, data_loader): 125 | """ Model evaluation on the testing dataset.""" 126 | if data_loader["test"] is None: 127 | raise ValueError("No data for test") 128 | config = self.config 129 | 130 | print('') 131 | print("===Testing===") 132 | predictions = dict() 133 | labels = dict() 134 | if self.config.TOOLBOX_MODE == "only_test": 135 | if not os.path.exists(self.config.INFERENCE.MODEL_PATH): 136 | raise ValueError("Inference model path error! Please check INFERENCE.MODEL_PATH in your yaml.") 137 | self.model.load_state_dict(torch.load(self.config.INFERENCE.MODEL_PATH)) 138 | print("Testing uses pretrained model!") 139 | else: 140 | if self.config.TEST.USE_LAST_EPOCH: 141 | last_epoch_model_path = os.path.join( 142 | self.model_dir, self.model_file_name + '_Epoch' + str(self.max_epoch_num - 1) + '.pth') 143 | print("Testing uses last epoch as non-pretrained model!") 144 | print(last_epoch_model_path) 145 | self.model.load_state_dict(torch.load(last_epoch_model_path)) 146 | else: 147 | best_model_path = os.path.join( 148 | self.model_dir, self.model_file_name + '_Epoch' + str(self.best_epoch) + '.pth') 149 | print("Testing uses best epoch selected using model selection as non-pretrained model!") 150 | print(best_model_path) 151 | self.model.load_state_dict(torch.load(best_model_path)) 152 | 153 | self.model = self.model.to(self.config.DEVICE) 154 | self.model.eval() 155 | with torch.no_grad(): 156 | for _, test_batch in enumerate(data_loader['test']): 157 | batch_size = test_batch[0].shape[0] 158 | data_test, labels_test = test_batch[0].to( 159 | self.config.DEVICE), test_batch[1].to(self.config.DEVICE) 160 | N, D, C, H, W = data_test.shape 161 | data_test = data_test.view(N * D, C, H, W) 162 | labels_test = labels_test.view(-1, 1) 163 | pred_ppg_test = self.model(data_test) 164 | for idx in range(batch_size): 165 | subj_index = test_batch[2][idx] 166 | sort_index = int(test_batch[3][idx]) 167 | if subj_index not in predictions.keys(): 168 | predictions[subj_index] = dict() 169 | labels[subj_index] = dict() 170 | predictions[subj_index][sort_index] = pred_ppg_test[idx * self.chunk_len:(idx + 1) * self.chunk_len] 171 | labels[subj_index][sort_index] = labels_test[idx * self.chunk_len:(idx + 1) * self.chunk_len] 172 | 173 | print('') 174 | calculate_metrics(predictions, labels, self.config) 175 | 176 | def save_model(self, index): 177 | if not os.path.exists(self.model_dir): 178 | os.makedirs(self.model_dir) 179 | model_path = os.path.join( 180 | self.model_dir, self.model_file_name + '_Epoch' + str(index) + '.pth') 181 | torch.save(self.model.state_dict(), model_path) 182 | -------------------------------------------------------------------------------- /rppg-Toolbox_MMPD/neural_methods/trainer/PhysnetTrainer.py: -------------------------------------------------------------------------------- 1 | """PhysNet Trainer.""" 2 | import os 3 | from collections import OrderedDict 4 | 5 | import numpy as np 6 | import torch 7 | import torch.optim as optim 8 | from evaluation.metrics import calculate_metrics 9 | from neural_methods.loss.PhysNetNegPearsonLoss import Neg_Pearson 10 | from neural_methods.model.PhysNet import PhysNet_padding_Encoder_Decoder_MAX 11 | from neural_methods.trainer.BaseTrainer import BaseTrainer 12 | from torch.autograd import Variable 13 | from tqdm import tqdm 14 | 15 | 16 | class PhysnetTrainer(BaseTrainer): 17 | 18 | def __init__(self, config, data_loader): 19 | """Inits parameters from args and the writer for TensorboardX.""" 20 | super().__init__() 21 | self.device = torch.device(config.DEVICE) 22 | self.max_epoch_num = config.TRAIN.EPOCHS 23 | self.model_dir = config.MODEL.MODEL_DIR 24 | self.model_file_name = config.TRAIN.MODEL_FILE_NAME 25 | self.batch_size = config.TRAIN.BATCH_SIZE 26 | self.num_of_gpu = config.NUM_OF_GPU_TRAIN 27 | self.base_len = self.num_of_gpu 28 | self.config = config 29 | self.min_valid_loss = None 30 | self.best_epoch = 0 31 | 32 | self.model = PhysNet_padding_Encoder_Decoder_MAX( 33 | frames=config.MODEL.PHYSNET.FRAME_NUM).to(self.device) # [3, T, 128,128] 34 | 35 | if config.TOOLBOX_MODE == "train_and_test": 36 | self.num_train_batches = len(data_loader["train"]) 37 | self.loss_model = Neg_Pearson() 38 | self.optimizer = optim.Adam( 39 | self.model.parameters(), lr=config.TRAIN.LR) 40 | # See more details on the OneCycleLR scheduler here: https://pytorch.org/docs/stable/generated/torch.optim.lr_scheduler.OneCycleLR.html 41 | self.scheduler = torch.optim.lr_scheduler.OneCycleLR( 42 | self.optimizer, max_lr=config.TRAIN.LR, epochs=config.TRAIN.EPOCHS, steps_per_epoch=self.num_train_batches) 43 | elif config.TOOLBOX_MODE == "only_test": 44 | pass 45 | else: 46 | raise ValueError("PhysNet trainer initialized in incorrect toolbox mode!") 47 | 48 | def train(self, data_loader): 49 | """Training routine for model""" 50 | if data_loader["train"] is None: 51 | raise ValueError("No data for train") 52 | 53 | for epoch in range(self.max_epoch_num): 54 | print('') 55 | print(f"====Training Epoch: {epoch}====") 56 | running_loss = 0.0 57 | train_loss = [] 58 | self.model.train() 59 | tbar = tqdm(data_loader["train"], ncols=80) 60 | for idx, batch in enumerate(tbar): 61 | tbar.set_description("Train epoch %s" % epoch) 62 | rPPG, x_visual, x_visual3232, x_visual1616 = self.model( 63 | batch[0].to(torch.float32).to(self.device)) 64 | BVP_label = batch[1].to( 65 | torch.float32).to(self.device) 66 | rPPG = (rPPG - torch.mean(rPPG)) / torch.std(rPPG) # normalize 67 | BVP_label = (BVP_label - torch.mean(BVP_label)) / \ 68 | torch.std(BVP_label) # normalize 69 | loss = self.loss_model(rPPG, BVP_label) 70 | loss.backward() 71 | running_loss += loss.item() 72 | if idx % 100 == 99: # print every 100 mini-batches 73 | print( 74 | f'[{epoch}, {idx + 1:5d}] loss: {running_loss / 100:.3f}') 75 | running_loss = 0.0 76 | train_loss.append(loss.item()) 77 | self.optimizer.step() 78 | self.scheduler.step() 79 | self.optimizer.zero_grad() 80 | tbar.set_postfix(loss=loss.item()) 81 | self.save_model(epoch) 82 | if not self.config.TEST.USE_LAST_EPOCH: 83 | valid_loss = self.valid(data_loader) 84 | print('validation loss: ', valid_loss) 85 | if self.min_valid_loss is None: 86 | self.min_valid_loss = valid_loss 87 | self.best_epoch = epoch 88 | print("Update best model! Best epoch: {}".format(self.best_epoch)) 89 | elif (valid_loss < self.min_valid_loss): 90 | self.min_valid_loss = valid_loss 91 | self.best_epoch = epoch 92 | print("Update best model! Best epoch: {}".format(self.best_epoch)) 93 | if not self.config.TEST.USE_LAST_EPOCH: 94 | print("best trained epoch: {}, min_val_loss: {}".format( 95 | self.best_epoch, self.min_valid_loss)) 96 | 97 | def valid(self, data_loader): 98 | """ Runs the model on valid sets.""" 99 | if data_loader["valid"] is None: 100 | raise ValueError("No data for valid") 101 | 102 | print('') 103 | print(" ====Validing===") 104 | valid_loss = [] 105 | self.model.eval() 106 | valid_step = 0 107 | with torch.no_grad(): 108 | vbar = tqdm(data_loader["valid"], ncols=80) 109 | for valid_idx, valid_batch in enumerate(vbar): 110 | vbar.set_description("Validation") 111 | BVP_label = valid_batch[1].to( 112 | torch.float32).to(self.device) 113 | rPPG, x_visual, x_visual3232, x_visual1616 = self.model( 114 | valid_batch[0].to(torch.float32).to(self.device)) 115 | rPPG = (rPPG - torch.mean(rPPG)) / torch.std(rPPG) # normalize 116 | BVP_label = (BVP_label - torch.mean(BVP_label)) / \ 117 | torch.std(BVP_label) # normalize 118 | loss_ecg = self.loss_model(rPPG, BVP_label) 119 | valid_loss.append(loss_ecg.item()) 120 | valid_step += 1 121 | vbar.set_postfix(loss=loss_ecg.item()) 122 | valid_loss = np.asarray(valid_loss) 123 | return np.mean(valid_loss) 124 | 125 | def test(self, data_loader): 126 | """ Runs the model on test sets.""" 127 | if data_loader["test"] is None: 128 | raise ValueError("No data for test") 129 | 130 | print('') 131 | print("===Testing===") 132 | predictions = dict() 133 | labels = dict() 134 | 135 | if self.config.TOOLBOX_MODE == "only_test": 136 | if not os.path.exists(self.config.INFERENCE.MODEL_PATH): 137 | raise ValueError("Inference model path error! Please check INFERENCE.MODEL_PATH in your yaml.") 138 | self.model.load_state_dict(torch.load(self.config.INFERENCE.MODEL_PATH)) 139 | print("Testing uses pretrained model!") 140 | print(self.config.INFERENCE.MODEL_PATH) 141 | else: 142 | if self.config.TEST.USE_LAST_EPOCH: 143 | last_epoch_model_path = os.path.join( 144 | self.model_dir, self.model_file_name + '_Epoch' + str(self.max_epoch_num - 1) + '.pth') 145 | print("Testing uses last epoch as non-pretrained model!") 146 | print(last_epoch_model_path) 147 | self.model.load_state_dict(torch.load(last_epoch_model_path)) 148 | else: 149 | best_model_path = os.path.join( 150 | self.model_dir, self.model_file_name + '_Epoch' + str(self.best_epoch) + '.pth') 151 | print("Testing uses best epoch selected using model selection as non-pretrained model!") 152 | print(best_model_path) 153 | self.model.load_state_dict(torch.load(best_model_path)) 154 | 155 | self.model = self.model.to(self.config.DEVICE) 156 | self.model.eval() 157 | with torch.no_grad(): 158 | for _, test_batch in enumerate(data_loader['test']): 159 | batch_size = test_batch[0].shape[0] 160 | data, label = test_batch[0].to( 161 | self.config.DEVICE), test_batch[1].to(self.config.DEVICE) 162 | pred_ppg_test, _, _, _ = self.model(data) 163 | for idx in range(batch_size): 164 | subj_index = test_batch[2][idx] 165 | sort_index = int(test_batch[3][idx]) 166 | if subj_index not in predictions.keys(): 167 | predictions[subj_index] = dict() 168 | labels[subj_index] = dict() 169 | predictions[subj_index][sort_index] = pred_ppg_test[idx] 170 | labels[subj_index][sort_index] = label[idx] 171 | 172 | print('') 173 | calculate_metrics(predictions, labels, self.config) 174 | 175 | def save_model(self, index): 176 | if not os.path.exists(self.model_dir): 177 | os.makedirs(self.model_dir) 178 | model_path = os.path.join( 179 | self.model_dir, self.model_file_name + '_Epoch' + str(index) + '.pth') 180 | torch.save(self.model.state_dict(), model_path) 181 | print('Saved Model Path: ', model_path) 182 | -------------------------------------------------------------------------------- /rppg-Toolbox_MMPD/neural_methods/trainer/TscanTrainer.py: -------------------------------------------------------------------------------- 1 | """Trainer for TSCAN.""" 2 | 3 | import logging 4 | import os 5 | from collections import OrderedDict 6 | 7 | import numpy as np 8 | import torch 9 | import torch.optim as optim 10 | from evaluation.metrics import calculate_metrics 11 | from neural_methods.loss.NegPearsonLoss import Neg_Pearson 12 | from neural_methods.model.TS_CAN import TSCAN 13 | from neural_methods.trainer.BaseTrainer import BaseTrainer 14 | from tqdm import tqdm 15 | 16 | 17 | class TscanTrainer(BaseTrainer): 18 | 19 | def __init__(self, config, data_loader): 20 | """Inits parameters from args and the writer for TensorboardX.""" 21 | super().__init__() 22 | self.device = torch.device(config.DEVICE) 23 | self.frame_depth = config.MODEL.TSCAN.FRAME_DEPTH 24 | self.max_epoch_num = config.TRAIN.EPOCHS 25 | self.model_dir = config.MODEL.MODEL_DIR 26 | self.model_file_name = config.TRAIN.MODEL_FILE_NAME 27 | self.batch_size = config.TRAIN.BATCH_SIZE 28 | self.num_of_gpu = config.NUM_OF_GPU_TRAIN 29 | self.base_len = self.num_of_gpu * self.frame_depth 30 | self.chunk_len = config.TRAIN.DATA.PREPROCESS.CHUNK_LENGTH 31 | self.config = config 32 | self.min_valid_loss = None 33 | self.best_epoch = 0 34 | 35 | if config.TOOLBOX_MODE == "train_and_test": 36 | self.model = TSCAN(frame_depth=self.frame_depth, img_size=config.TRAIN.DATA.PREPROCESS.H).to(self.device) 37 | self.model = torch.nn.DataParallel(self.model, device_ids=list(range(config.NUM_OF_GPU_TRAIN))) 38 | 39 | self.num_train_batches = len(data_loader["train"]) 40 | self.criterion = torch.nn.MSELoss() 41 | self.optimizer = optim.AdamW( 42 | self.model.parameters(), lr=config.TRAIN.LR, weight_decay=0) 43 | # See more details on the OneCycleLR scheduler here: https://pytorch.org/docs/stable/generated/torch.optim.lr_scheduler.OneCycleLR.html 44 | self.scheduler = torch.optim.lr_scheduler.OneCycleLR( 45 | self.optimizer, max_lr=config.TRAIN.LR, epochs=config.TRAIN.EPOCHS, steps_per_epoch=self.num_train_batches) 46 | elif config.TOOLBOX_MODE == "only_test": 47 | self.model = TSCAN(frame_depth=self.frame_depth, img_size=config.TEST.DATA.PREPROCESS.H).to(self.device) 48 | self.model = torch.nn.DataParallel(self.model, device_ids=list(range(config.NUM_OF_GPU_TRAIN))) 49 | else: 50 | raise ValueError("TS-CAN trainer initialized in incorrect toolbox mode!") 51 | 52 | def train(self, data_loader): 53 | """Training routine for model""" 54 | if data_loader["train"] is None: 55 | raise ValueError("No data for train") 56 | 57 | for epoch in range(self.max_epoch_num): 58 | print('') 59 | print(f"====Training Epoch: {epoch}====") 60 | running_loss = 0.0 61 | train_loss = [] 62 | self.model.train() 63 | # Model Training 64 | tbar = tqdm(data_loader["train"], ncols=80) 65 | for idx, batch in enumerate(tbar): 66 | tbar.set_description("Train epoch %s" % epoch) 67 | data, labels = batch[0].to( 68 | self.device), batch[1].to(self.device) 69 | N, D, C, H, W = data.shape 70 | data = data.view(N * D, C, H, W) 71 | labels = labels.view(-1, 1) 72 | data = data[:(N * D) // self.base_len * self.base_len] 73 | labels = labels[:(N * D) // self.base_len * self.base_len] 74 | self.optimizer.zero_grad() 75 | pred_ppg = self.model(data) 76 | loss = self.criterion(pred_ppg, labels) 77 | loss.backward() 78 | self.optimizer.step() 79 | self.scheduler.step() 80 | running_loss += loss.item() 81 | if idx % 100 == 99: # print every 100 mini-batches 82 | print( 83 | f'[{epoch}, {idx + 1:5d}] loss: {running_loss / 100:.3f}') 84 | running_loss = 0.0 85 | train_loss.append(loss.item()) 86 | tbar.set_postfix(loss=loss.item()) 87 | self.save_model(epoch) 88 | if not self.config.TEST.USE_LAST_EPOCH: 89 | valid_loss = self.valid(data_loader) 90 | print('validation loss: ', valid_loss) 91 | if self.min_valid_loss is None: 92 | self.min_valid_loss = valid_loss 93 | self.best_epoch = epoch 94 | print("Update best model! Best epoch: {}".format(self.best_epoch)) 95 | elif (valid_loss < self.min_valid_loss): 96 | self.min_valid_loss = valid_loss 97 | self.best_epoch = epoch 98 | print("Update best model! Best epoch: {}".format(self.best_epoch)) 99 | if not self.config.TEST.USE_LAST_EPOCH: 100 | print("best trained epoch: {}, min_val_loss: {}".format(self.best_epoch, self.min_valid_loss)) 101 | 102 | def valid(self, data_loader): 103 | """ Model evaluation on the validation dataset.""" 104 | if data_loader["valid"] is None: 105 | raise ValueError("No data for valid") 106 | 107 | print('') 108 | print("===Validating===") 109 | valid_loss = [] 110 | self.model.eval() 111 | valid_step = 0 112 | with torch.no_grad(): 113 | vbar = tqdm(data_loader["valid"], ncols=80) 114 | for valid_idx, valid_batch in enumerate(vbar): 115 | vbar.set_description("Validation") 116 | data_valid, labels_valid = valid_batch[0].to( 117 | self.device), valid_batch[1].to(self.device) 118 | N, D, C, H, W = data_valid.shape 119 | data_valid = data_valid.view(N * D, C, H, W) 120 | labels_valid = labels_valid.view(-1, 1) 121 | data_valid = data_valid[:(N * D) // self.base_len * self.base_len] 122 | labels_valid = labels_valid[:(N * D) // self.base_len * self.base_len] 123 | pred_ppg_valid = self.model(data_valid) 124 | loss = self.criterion(pred_ppg_valid, labels_valid) 125 | valid_loss.append(loss.item()) 126 | valid_step += 1 127 | vbar.set_postfix(loss=loss.item()) 128 | valid_loss = np.asarray(valid_loss) 129 | return np.mean(valid_loss) 130 | 131 | def test(self, data_loader): 132 | """ Model evaluation on the testing dataset.""" 133 | if data_loader["test"] is None: 134 | raise ValueError("No data for test") 135 | 136 | print('') 137 | print("===Testing===") 138 | predictions = dict() 139 | labels = dict() 140 | 141 | if self.config.TOOLBOX_MODE == "only_test": 142 | if not os.path.exists(self.config.INFERENCE.MODEL_PATH): 143 | raise ValueError("Inference model path error! Please check INFERENCE.MODEL_PATH in your yaml.") 144 | self.model.load_state_dict(torch.load(self.config.INFERENCE.MODEL_PATH)) 145 | print("Testing uses pretrained model!") 146 | else: 147 | if self.config.TEST.USE_LAST_EPOCH: 148 | last_epoch_model_path = os.path.join( 149 | self.model_dir, self.model_file_name + '_Epoch' + str(self.max_epoch_num - 1) + '.pth') 150 | print("Testing uses last epoch as non-pretrained model!") 151 | print(last_epoch_model_path) 152 | self.model.load_state_dict(torch.load(last_epoch_model_path)) 153 | else: 154 | best_model_path = os.path.join( 155 | self.model_dir, self.model_file_name + '_Epoch' + str(self.best_epoch) + '.pth') 156 | print("Testing uses best epoch selected using model selection as non-pretrained model!") 157 | print(best_model_path) 158 | self.model.load_state_dict(torch.load(best_model_path)) 159 | 160 | self.model = self.model.to(self.config.DEVICE) 161 | self.model.eval() 162 | with torch.no_grad(): 163 | for _, test_batch in enumerate(data_loader['test']): 164 | batch_size = test_batch[0].shape[0] 165 | data_test, labels_test = test_batch[0].to( 166 | self.config.DEVICE), test_batch[1].to(self.config.DEVICE) 167 | N, D, C, H, W = data_test.shape 168 | data_test = data_test.view(N * D, C, H, W) 169 | labels_test = labels_test.view(-1, 1) 170 | data_test = data_test[:(N * D) // self.base_len * self.base_len] 171 | labels_test = labels_test[:(N * D) // self.base_len * self.base_len] 172 | pred_ppg_test = self.model(data_test) 173 | for idx in range(batch_size): 174 | subj_index = test_batch[2][idx] 175 | sort_index = int(test_batch[3][idx]) 176 | if subj_index not in predictions.keys(): 177 | predictions[subj_index] = dict() 178 | labels[subj_index] = dict() 179 | predictions[subj_index][sort_index] = pred_ppg_test[idx * self.chunk_len:(idx + 1) * self.chunk_len] 180 | labels[subj_index][sort_index] = labels_test[idx * self.chunk_len:(idx + 1) * self.chunk_len] 181 | 182 | print('') 183 | calculate_metrics(predictions, labels, self.config) 184 | 185 | def save_model(self, index): 186 | if not os.path.exists(self.model_dir): 187 | os.makedirs(self.model_dir) 188 | model_path = os.path.join( 189 | self.model_dir, self.model_file_name + '_Epoch' + str(index) + '.pth') 190 | torch.save(self.model.state_dict(), model_path) 191 | print('Saved Model Path: ', model_path) 192 | -------------------------------------------------------------------------------- /rppg-Toolbox_MMPD/neural_methods/trainer/__init__.py: -------------------------------------------------------------------------------- 1 | import neural_methods.trainer.BaseTrainer 2 | import neural_methods.trainer.PhysnetTrainer 3 | import neural_methods.trainer.TscanTrainer 4 | import neural_methods.trainer.DeepPhysTrainer 5 | import neural_methods.trainer.EfficientPhysTrainer 6 | -------------------------------------------------------------------------------- /rppg-Toolbox_MMPD/requirements.txt: -------------------------------------------------------------------------------- 1 | h5py==2.10.0 2 | yacs==0.1.8 3 | scipy==1.5.2 4 | pandas==1.1.5 5 | scikit_image==0.17.2 6 | numpy==1.22.0 7 | matplotlib==3.1.2 8 | opencv_python==4.5.2.54 9 | PyYAML==6.0 10 | scikit_learn==1.0.2 11 | tensorboardX==2.4.1 12 | tqdm==4.64.0 13 | mat73==0.59 -------------------------------------------------------------------------------- /rppg-Toolbox_MMPD/setup.sh: -------------------------------------------------------------------------------- 1 | conda remove --name rppg-toolbox --all -y 2 | conda create -n rppg-toolbox python=3.8 pytorch=1.12.1 torchvision=0.13.1 torchaudio=0.12.1 cudatoolkit=10.2 -c pytorch -q -y 3 | -------------------------------------------------------------------------------- /rppg-Toolbox_MMPD/unsupervised_methods/__init__.py: -------------------------------------------------------------------------------- 1 | from unsupervised_methods.methods.CHROME_DEHAAN import * 2 | from unsupervised_methods.methods.ICA_POH import * 3 | from unsupervised_methods.methods.POS_WANG import * 4 | from unsupervised_methods.methods.LGI import * 5 | from unsupervised_methods.methods.GREEN import * 6 | -------------------------------------------------------------------------------- /rppg-Toolbox_MMPD/unsupervised_methods/methods/CHROME_DEHAAN.py: -------------------------------------------------------------------------------- 1 | # The Chrominance Method from: De Haan, G., & Jeanne, V. (2013). 2 | # Robust pulse rate from chrominance-based rPPG. IEEE Transactions on Biomedical Engineering, 60(10), 2878-2886. 3 | # DOI: 10.1109/TBME.2013.2266196 4 | import numpy as np 5 | import math 6 | from scipy import signal 7 | 8 | import unsupervised_methods.utils as utils 9 | 10 | 11 | def CHROME_DEHAAN(frames,FS): 12 | LPF = 0.7 13 | HPF = 2.5 14 | WinSec = 1.6 15 | 16 | RGB = process_video(frames) 17 | FN = RGB.shape[0] 18 | NyquistF = 1/2*FS 19 | B, A = signal.butter(3, [LPF/NyquistF, HPF/NyquistF], 'bandpass') 20 | 21 | WinL = math.ceil(WinSec*FS) 22 | if(WinL % 2): 23 | WinL = WinL+1 24 | NWin = math.floor((FN-WinL//2)/(WinL//2)) 25 | S = np.zeros((NWin, 1)) 26 | WinS = 0 27 | WinM = int(WinS+WinL//2) 28 | WinE = WinS+WinL 29 | totallen = (WinL//2)*(NWin+1) 30 | S = np.zeros(totallen) 31 | 32 | for i in range(NWin): 33 | RGBBase = np.mean(RGB[WinS:WinE, :], axis=0) 34 | RGBNorm = np.zeros((WinE-WinS, 3)) 35 | for temp in range(WinS, WinE): 36 | RGBNorm[temp-WinS] = np.true_divide(RGB[temp], RGBBase)-1 37 | Xs = np.squeeze(3*RGBNorm[:, 0]-2*RGBNorm[:, 1]) 38 | Ys = np.squeeze(1.5*RGBNorm[:, 0]+RGBNorm[:, 1]-1.5*RGBNorm[:, 2]) 39 | Xf = signal.filtfilt(B, A, Xs, axis=0) 40 | Yf = signal.filtfilt(B, A, Ys) 41 | 42 | Alpha = np.std(Xf) / np.std(Yf) 43 | SWin = Xf-Alpha*Yf 44 | SWin = np.multiply(SWin, signal.hanning(WinL)) 45 | 46 | if(i == -1): 47 | S = SWin 48 | else: 49 | temp = SWin[:int(WinL//2)] 50 | S[WinS:WinM] = S[WinS:WinM] + SWin[:int(WinL//2)] 51 | S[WinM:WinE] = SWin[int(WinL//2):] 52 | WinS = WinM 53 | WinM = WinS+WinL//2 54 | WinE = WinS+WinL 55 | BVP = S 56 | return BVP 57 | 58 | def process_video(frames): 59 | "Calculates the average value of each frame." 60 | RGB = [] 61 | for frame in frames: 62 | sum = np.sum(np.sum(frame, axis=0), axis=0) 63 | RGB.append(sum/(frame.shape[0]*frame.shape[1])) 64 | return np.asarray(RGB) 65 | -------------------------------------------------------------------------------- /rppg-Toolbox_MMPD/unsupervised_methods/methods/GREEN.py: -------------------------------------------------------------------------------- 1 | """ GREEN 2 | Verkruysse, W., Svaasand, L. O. & Nelson, J. S. 3 | Remote plethysmographic imaging using ambient light. 4 | Optical. Express 16, 21434–21445 (2008). 5 | """ 6 | 7 | import numpy as np 8 | import math 9 | from scipy import signal 10 | from scipy import linalg 11 | from unsupervised_methods import utils 12 | 13 | 14 | def GREEN(frames): 15 | precessed_data = utils.process_video(frames) 16 | BVP = precessed_data[:, 1, :] 17 | BVP = BVP.reshape(-1) 18 | return BVP 19 | -------------------------------------------------------------------------------- /rppg-Toolbox_MMPD/unsupervised_methods/methods/ICA_POH.py: -------------------------------------------------------------------------------- 1 | """ICA 2 | Non-contact, automated cardiac pulse measurements using video imaging and blind source separation. 3 | Poh, M. Z., McDuff, D. J., & Picard, R. W. (2010). 4 | Optics express, 18(10), 10762-10774. DOI: 10.1364/OE.18.010762 5 | """ 6 | import math 7 | 8 | import numpy as np 9 | from scipy import linalg 10 | from scipy import signal 11 | from unsupervised_methods import utils 12 | 13 | 14 | def ICA_POH(frames, FS): 15 | # Cut off frequency. 16 | LPF = 0.7 17 | HPF = 2.5 18 | RGB = process_video(frames) 19 | 20 | NyquistF = 1 / 2 * FS 21 | BGRNorm = np.zeros(RGB.shape) 22 | Lambda = 100 23 | for c in range(3): 24 | BGRDetrend = utils.detrend(RGB[:, c], Lambda) 25 | BGRNorm[:, c] = (BGRDetrend - np.mean(BGRDetrend)) / np.std(BGRDetrend) 26 | _, S = ica(np.mat(BGRNorm).H, 3) 27 | 28 | # select BVP Source 29 | MaxPx = np.zeros((1, 3)) 30 | for c in range(3): 31 | FF = np.fft.fft(S[c, :]) 32 | F = np.arange(0, FF.shape[1]) / FF.shape[1] * FS * 60 33 | FF = FF[:, 1:] 34 | FF = FF[0] 35 | N = FF.shape[0] 36 | Px = np.abs(FF[:math.floor(N / 2)]) 37 | Px = np.multiply(Px, Px) 38 | Fx = np.arange(0, N / 2) / (N / 2) * NyquistF 39 | Px = Px / np.sum(Px, axis=0) 40 | MaxPx[0, c] = np.max(Px) 41 | MaxComp = np.argmax(MaxPx) 42 | BVP_I = S[MaxComp, :] 43 | B, A = signal.butter(3, [LPF / NyquistF, HPF / NyquistF], 'bandpass') 44 | BVP_F = signal.filtfilt(B, A, np.real(BVP_I).astype(np.double)) 45 | 46 | BVP = BVP_F[0] 47 | return BVP 48 | 49 | 50 | def process_video(frames): 51 | "Calculates the average value of each frame." 52 | RGB = [] 53 | for frame in frames: 54 | sum = np.sum(np.sum(frame, axis=0), axis=0) 55 | RGB.append(sum / (frame.shape[0] * frame.shape[1])) 56 | return np.asarray(RGB) 57 | 58 | 59 | def ica(X, Nsources, Wprev=0): 60 | nRows = X.shape[0] 61 | nCols = X.shape[1] 62 | if nRows > nCols: 63 | print( 64 | "Warning - The number of rows is cannot be greater than the number of columns.") 65 | print("Please transpose input.") 66 | 67 | if Nsources > min(nRows, nCols): 68 | Nsources = min(nRows, nCols) 69 | print( 70 | 'Warning - The number of soures cannot exceed number of observation channels.') 71 | print('The number of sources will be reduced to the number of observation channels ', Nsources) 72 | 73 | Winv, Zhat = jade(X, Nsources, Wprev) 74 | W = np.linalg.pinv(Winv) 75 | return W, Zhat 76 | 77 | 78 | def jade(X, m, Wprev): 79 | n = X.shape[0] 80 | T = X.shape[1] 81 | nem = m 82 | seuil = 1 / math.sqrt(T) / 100 83 | if m < n: 84 | D, U = np.linalg.eig(np.matmul(X, np.mat(X).H) / T) 85 | Diag = D 86 | k = np.argsort(Diag) 87 | pu = Diag[k] 88 | ibl = np.sqrt(pu[n - m:n] - np.mean(pu[0:n - m])) 89 | bl = np.true_divide(np.ones(m, 1), ibl) 90 | W = np.matmul(np.diag(bl), np.transpose(U[0:n, k[n - m:n]])) 91 | IW = np.matmul(U[0:n, k[n - m:n]], np.diag(ibl)) 92 | else: 93 | IW = linalg.sqrtm(np.matmul(X, X.H) / T) 94 | W = np.linalg.inv(IW) 95 | 96 | Y = np.mat(np.matmul(W, X)) 97 | R = np.matmul(Y, Y.H) / T 98 | C = np.matmul(Y, Y.T) / T 99 | Q = np.zeros((m * m * m * m, 1)) 100 | index = 0 101 | 102 | for lx in range(m): 103 | Y1 = Y[lx, :] 104 | for kx in range(m): 105 | Yk1 = np.multiply(Y1, np.conj(Y[kx, :])) 106 | for jx in range(m): 107 | Yjk1 = np.multiply(Yk1, np.conj(Y[jx, :])) 108 | for ix in range(m): 109 | Q[index] = np.matmul(Yjk1 / math.sqrt(T), Y[ix, :].T / math.sqrt( 110 | T)) - R[ix, jx] * R[lx, kx] - R[ix, kx] * R[lx, jx] - C[ix, lx] * np.conj(C[jx, kx]) 111 | index += 1 112 | # Compute and Reshape the significant Eigen 113 | D, U = np.linalg.eig(Q.reshape(m * m, m * m)) 114 | Diag = abs(D) 115 | K = np.argsort(Diag) 116 | la = Diag[K] 117 | M = np.zeros((m, nem * m), dtype=complex) 118 | Z = np.zeros(m) 119 | h = m * m - 1 120 | for u in range(0, nem * m, m): 121 | Z = U[:, K[h]].reshape((m, m)) 122 | M[:, u:u + m] = la[h] * Z 123 | h = h - 1 124 | # Approximate the Diagonalization of the Eigen Matrices: 125 | B = np.array([[1, 0, 0], [0, 1, 1], [0, 0 - 1j, 0 + 1j]]) 126 | Bt = np.mat(B).H 127 | 128 | encore = 1 129 | if Wprev == 0: 130 | V = np.eye(m).astype(complex) 131 | else: 132 | V = np.linalg.inv(Wprev) 133 | # Main Loop: 134 | while encore: 135 | encore = 0 136 | for p in range(m - 1): 137 | for q in range(p + 1, m): 138 | Ip = np.arange(p, nem * m, m) 139 | Iq = np.arange(q, nem * m, m) 140 | g = np.mat([M[p, Ip] - M[q, Iq], M[p, Iq], M[q, Ip]]) 141 | temp1 = np.matmul(g, g.H) 142 | temp2 = np.matmul(B, temp1) 143 | temp = np.matmul(temp2, Bt) 144 | D, vcp = np.linalg.eig(np.real(temp)) 145 | K = np.argsort(D) 146 | la = D[K] 147 | angles = vcp[:, K[2]] 148 | if angles[0, 0] < 0: 149 | angles = -angles 150 | c = np.sqrt(0.5 + angles[0, 0] / 2) 151 | s = 0.5 * (angles[1, 0] - 1j * angles[2, 0]) / c 152 | 153 | if abs(s) > seuil: 154 | encore = 1 155 | pair = [p, q] 156 | G = np.mat([[c, -np.conj(s)], [s, c]]) # Givens Rotation 157 | V[:, pair] = np.matmul(V[:, pair], G) 158 | M[pair, :] = np.matmul(G.H, M[pair, :]) 159 | temp1 = c * M[:, Ip] + s * M[:, Iq] 160 | temp2 = -np.conj(s) * M[:, Ip] + c * M[:, Iq] 161 | temp = np.concatenate((temp1, temp2), axis=1) 162 | M[:, Ip] = temp1 163 | M[:, Iq] = temp2 164 | 165 | # Whiten the Matrix 166 | # Estimation of the Mixing Matrix and Signal Separation 167 | A = np.matmul(IW, V) 168 | S = np.matmul(np.mat(V).H, Y) 169 | return A, S 170 | -------------------------------------------------------------------------------- /rppg-Toolbox_MMPD/unsupervised_methods/methods/LGI.py: -------------------------------------------------------------------------------- 1 | """LGI 2 | Local group invariance for heart rate estimation from face videos. 3 | Pilz, C. S., Zaunseder, S., Krajewski, J. & Blazek, V. 4 | In Proceedings of the IEEE conference on computer vision and pattern recognition workshops, 1254–1262 5 | (2018). 6 | """ 7 | 8 | import math 9 | 10 | import numpy as np 11 | from scipy import linalg 12 | from scipy import signal 13 | from unsupervised_methods import utils 14 | 15 | 16 | def LGI(frames): 17 | precessed_data = utils.process_video(frames) 18 | U, _, _ = np.linalg.svd(precessed_data) 19 | S = U[:, :, 0] 20 | S = np.expand_dims(S, 2) 21 | SST = np.matmul(S, np.swapaxes(S, 1, 2)) 22 | p = np.tile(np.identity(3), (S.shape[0], 1, 1)) 23 | P = p - SST 24 | Y = np.matmul(P, precessed_data) 25 | bvp = Y[:, 1, :] 26 | bvp = bvp.reshape(-1) 27 | return bvp 28 | -------------------------------------------------------------------------------- /rppg-Toolbox_MMPD/unsupervised_methods/methods/PBV.py: -------------------------------------------------------------------------------- 1 | """PBV 2 | Improved motion robustness of remote-ppg by using the blood volume pulse signature. 3 | De Haan, G. & Van Leest, A. 4 | Physiol. measurement 35, 1913 (2014) 5 | """ 6 | 7 | import math 8 | 9 | import numpy as np 10 | from scipy import linalg 11 | from scipy import signal 12 | from unsupervised_methods import utils 13 | 14 | 15 | def PBV(frames): 16 | precessed_data = utils.process_video(frames) 17 | sig_mean = np.mean(precessed_data, axis=2) 18 | 19 | signal_norm_r = precessed_data[:, 0, :] / np.expand_dims(sig_mean[:, 0], axis=1) 20 | signal_norm_g = precessed_data[:, 1, :] / np.expand_dims(sig_mean[:, 1], axis=1) 21 | signal_norm_b = precessed_data[:, 2, :] / np.expand_dims(sig_mean[:, 2], axis=1) 22 | 23 | pbv_n = np.array([np.std(signal_norm_r, axis=1), np.std(signal_norm_g, axis=1), np.std(signal_norm_b, axis=1)]) 24 | pbv_d = np.sqrt(np.var(signal_norm_r, axis=1) + np.var(signal_norm_g, axis=1) + np.var(signal_norm_b, axis=1)) 25 | pbv = pbv_n / pbv_d 26 | 27 | C = np.swapaxes(np.array([signal_norm_r, signal_norm_g, signal_norm_b]), 0, 1) 28 | Ct = np.swapaxes(np.swapaxes(np.transpose(C), 0, 2), 1, 2) 29 | Q = np.matmul(C, Ct) 30 | W = np.linalg.solve(Q, np.swapaxes(pbv, 0, 1)) 31 | 32 | A = np.matmul(Ct, np.expand_dims(W, axis=2)) 33 | B = np.matmul(np.swapaxes(np.expand_dims(pbv.T, axis=2), 1, 2), np.expand_dims(W, axis=2)) 34 | bvp = A / B 35 | return bvp.squeeze(axis=2).reshape(-1) 36 | 37 | 38 | def PBV2(frames): 39 | precessed_data = utils.process_video(frames) 40 | data_mean = np.mean(precessed_data, axis=2) 41 | R_norm = precessed_data[:, 0, :] / np.expand_dims(data_mean[:, 0], axis=1) 42 | G_norm = precessed_data[:, 1, :] / np.expand_dims(data_mean[:, 1], axis=1) 43 | B_norm = precessed_data[:, 2, :] / np.expand_dims(data_mean[:, 2], axis=1) 44 | RGB_array = np.array([R_norm, G_norm, B_norm]) 45 | 46 | PBV_n = np.array([np.std(R_norm, axis=1), np.std(G_norm, axis=1), np.std(B_norm, axis=1)]) 47 | PBV_d = np.sqrt(np.var(R_norm, axis=1) + np.var(G_norm, axis=1) + np.var(B_norm, axis=1)) 48 | PBV = PBV_n / PBV_d 49 | C = np.transpose(RGB_array, (1, 0, 2)) 50 | Ct = np.transpose(RGB_array, (1, 2, 0)) 51 | 52 | Q = np.matmul(C, Ct) 53 | W = np.linalg.solve(Q, np.swapaxes(PBV, 0, 1)) 54 | 55 | Numerator = np.matmul(Ct, np.expand_dims(W, axis=2)) 56 | Denominator = np.matmul(np.swapaxes(np.expand_dims(PBV.T, axis=2), 1, 2), np.expand_dims(W, axis=2)) 57 | BVP = Numerator / Denominator 58 | BVP = BVP.squeeze(axis=2).reshape(-1) 59 | return BVP 60 | -------------------------------------------------------------------------------- /rppg-Toolbox_MMPD/unsupervised_methods/methods/POS_WANG.py: -------------------------------------------------------------------------------- 1 | """POS 2 | Wang, W., den Brinker, A. C., Stuijk, S., & de Haan, G. (2017). 3 | Algorithmic principles of remote PPG. 4 | IEEE Transactions on Biomedical Engineering, 64(7), 1479-1491. 5 | """ 6 | 7 | import math 8 | 9 | import numpy as np 10 | from scipy import signal 11 | from unsupervised_methods import utils 12 | 13 | 14 | def _process_video(frames): 15 | """Calculates the average value of each frame.""" 16 | RGB = [] 17 | for frame in frames: 18 | summation = np.sum(np.sum(frame, axis=0), axis=0) 19 | RGB.append(summation / (frame.shape[0] * frame.shape[1])) 20 | return np.asarray(RGB) 21 | 22 | 23 | def POS_WANG(frames, fs): 24 | WinSec = 1.6 25 | RGB = _process_video(frames) 26 | N = RGB.shape[0] 27 | H = np.zeros((1, N)) 28 | l = math.ceil(WinSec * fs) 29 | 30 | for n in range(N): 31 | m = n - l 32 | if m >= 0: 33 | Cn = np.true_divide(RGB[m:n, :], np.mean(RGB[m:n, :], axis=0)) 34 | Cn = np.mat(Cn).H 35 | S = np.matmul(np.array([[0, 1, -1], [-2, 1, 1]]), Cn) 36 | h = S[0, :] + (np.std(S[0, :]) / np.std(S[1, :])) * S[1, :] 37 | mean_h = np.mean(h) 38 | for temp in range(h.shape[1]): 39 | h[0, temp] = h[0, temp] - mean_h 40 | H[0, m:n] = H[0, m:n] + (h[0]) 41 | 42 | BVP = H 43 | BVP = utils.detrend(np.mat(BVP).H, 100) 44 | BVP = np.asarray(np.transpose(BVP))[0] 45 | b, a = signal.butter(1, [0.75 / fs * 2, 3 / fs * 2], btype='bandpass') 46 | BVP = signal.filtfilt(b, a, BVP.astype(np.double)) 47 | return BVP 48 | 49 | 50 | -------------------------------------------------------------------------------- /rppg-Toolbox_MMPD/unsupervised_methods/unsupervised_predictor.py: -------------------------------------------------------------------------------- 1 | """Unsupervised learning methods including POS, GREEN, CHROME, ICA, LGI and PBV.""" 2 | 3 | import logging 4 | import os 5 | from collections import OrderedDict 6 | 7 | import numpy as np 8 | import torch 9 | from evaluation.post_process import * 10 | from unsupervised_methods.methods.CHROME_DEHAAN import * 11 | from unsupervised_methods.methods.GREEN import * 12 | from unsupervised_methods.methods.ICA_POH import * 13 | from unsupervised_methods.methods.LGI import * 14 | from unsupervised_methods.methods.PBV import * 15 | from unsupervised_methods.methods.POS_WANG import * 16 | from tqdm import tqdm 17 | 18 | 19 | def unsupervised_predict(config, data_loader, method_name): 20 | """ Model evaluation on the testing dataset.""" 21 | if data_loader["unsupervised"] is None: 22 | raise ValueError("No data for unsupervised method predicting") 23 | print("===Unsupervised Method ( " + method_name + " ) Predicting ===") 24 | predict_hr_peak_all = [] 25 | gt_hr_peak_all = [] 26 | predict_hr_fft_all = [] 27 | gt_hr_fft_all = [] 28 | sbar = tqdm(data_loader["unsupervised"], ncols=80) 29 | for _, test_batch in enumerate(sbar): 30 | batch_size = test_batch[0].shape[0] 31 | for idx in range(batch_size): 32 | data_input, labels_input = test_batch[0][idx].cpu().numpy(), test_batch[1][idx].cpu().numpy() 33 | if method_name == "POS": 34 | BVP = POS_WANG(data_input, config.UNSUPERVISED.DATA.FS) 35 | elif method_name == "CHROM": 36 | BVP = CHROME_DEHAAN(data_input, config.UNSUPERVISED.DATA.FS) 37 | elif method_name == "ICA": 38 | BVP = ICA_POH(data_input, config.UNSUPERVISED.DATA.FS) 39 | elif method_name == "GREEN": 40 | BVP = GREEN(data_input) 41 | elif method_name == "LGI": 42 | BVP = LGI(data_input) 43 | elif method_name == "PBV": 44 | BVP = PBV(data_input) 45 | else: 46 | raise ValueError("unsupervised method name wrong!") 47 | 48 | if config.INFERENCE.EVALUATION_METHOD == "peak detection": 49 | gt_hr, pre_hr = calculate_metric_per_video(BVP, labels_input, diff_flag=False, 50 | fs=config.UNSUPERVISED.DATA.FS, hr_method='Peak') 51 | predict_hr_peak_all.append(pre_hr) 52 | gt_hr_peak_all.append(gt_hr) 53 | if config.INFERENCE.EVALUATION_METHOD == "FFT": 54 | gt_fft_hr, pre_fft_hr = calculate_metric_per_video(BVP, labels_input, diff_flag=False, 55 | fs=config.UNSUPERVISED.DATA.FS, hr_method='FFT') 56 | predict_hr_fft_all.append(pre_fft_hr) 57 | gt_hr_fft_all.append(gt_fft_hr) 58 | print("Used Unsupervised Method: " + method_name) 59 | if config.INFERENCE.EVALUATION_METHOD == "peak detection": 60 | predict_hr_peak_all = np.array(predict_hr_peak_all) 61 | gt_hr_peak_all = np.array(gt_hr_peak_all) 62 | for metric in config.UNSUPERVISED.METRICS: 63 | if metric == "MAE": 64 | MAE_PEAK = np.mean(np.abs(predict_hr_peak_all - gt_hr_peak_all)) 65 | print("Peak MAE (Peak Label):{0}".format(MAE_PEAK)) 66 | elif metric == "RMSE": 67 | RMSE_PEAK = np.sqrt( 68 | np.mean(np.square(predict_hr_peak_all - gt_hr_peak_all))) 69 | print("PEAK RMSE (Peak Label):{0}".format(RMSE_PEAK)) 70 | elif metric == "MAPE": 71 | MAPE_PEAK = np.mean( 72 | np.abs((predict_hr_peak_all - gt_hr_peak_all) / gt_hr_peak_all)) * 100 73 | print("PEAK MAPE (Peak Label):{0}".format(MAPE_PEAK)) 74 | elif metric == "Pearson": 75 | Pearson_PEAK = np.corrcoef(predict_hr_peak_all, gt_hr_peak_all) 76 | print("PEAK Pearson (Peak Label):{0}".format(Pearson_PEAK[0][1])) 77 | else: 78 | raise ValueError("Wrong Test Metric Type") 79 | if config.INFERENCE.EVALUATION_METHOD == "FFT": 80 | predict_hr_fft_all = np.array(predict_hr_fft_all) 81 | gt_hr_fft_all = np.array(gt_hr_fft_all) 82 | for metric in config.UNSUPERVISED.METRICS: 83 | if metric == "MAE": 84 | MAE_PEAK = np.mean(np.abs(predict_hr_fft_all - gt_hr_fft_all)) 85 | print("FFT MAE (FFT Label):{0}".format(MAE_PEAK)) 86 | elif metric == "RMSE": 87 | RMSE_PEAK = np.sqrt( 88 | np.mean(np.square(predict_hr_fft_all - gt_hr_fft_all))) 89 | print("FFT RMSE (FFT Label):{0}".format(RMSE_PEAK)) 90 | elif metric == "MAPE": 91 | MAPE_PEAK = np.mean( 92 | np.abs((predict_hr_fft_all - gt_hr_fft_all) / gt_hr_fft_all)) * 100 93 | print("FFT MAPE (FFT Label):{0}".format(MAPE_PEAK)) 94 | elif metric == "Pearson": 95 | Pearson_PEAK = np.corrcoef(predict_hr_fft_all, gt_hr_fft_all) 96 | print("FFT Pearson (FFT Label):{0}".format(Pearson_PEAK[0][1])) 97 | else: 98 | raise ValueError("Wrong Test Metric Type") 99 | -------------------------------------------------------------------------------- /rppg-Toolbox_MMPD/unsupervised_methods/utils.py: -------------------------------------------------------------------------------- 1 | import math 2 | 3 | import cv2 4 | import numpy as np 5 | from scipy import io as scio 6 | from scipy import linalg 7 | from scipy import signal 8 | from scipy import sparse 9 | from skimage.util import img_as_float 10 | from sklearn.metrics import mean_squared_error 11 | 12 | 13 | def detrend(input_signal, lambda_value): 14 | signal_length = input_signal.shape[0] 15 | # observation matrix 16 | H = np.identity(signal_length) 17 | ones = np.ones(signal_length) 18 | minus_twos = -2 * np.ones(signal_length) 19 | diags_data = np.array([ones, minus_twos, ones]) 20 | diags_index = np.array([0, 1, 2]) 21 | D = sparse.spdiags(diags_data, diags_index, 22 | (signal_length - 2), signal_length).toarray() 23 | filtered_signal = np.dot( 24 | (H - np.linalg.inv(H + (lambda_value ** 2) * np.dot(D.T, D))), input_signal) 25 | return filtered_signal 26 | 27 | 28 | def process_video(frames): 29 | RGB = [] 30 | for frame in frames: 31 | summation = np.sum(np.sum(frame, axis=0), axis=0) 32 | RGB.append(summation / (frame.shape[0] * frame.shape[1])) 33 | RGB = np.asarray(RGB) 34 | RGB = RGB.transpose(1, 0).reshape(1, 3, -1) 35 | return np.asarray(RGB) 36 | -------------------------------------------------------------------------------- /rppg-Toolbox_MMPD/wip/label/COHFACE_Comparison.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/THU-CS-PI/MMPD_rPPG_dataset/b88d2e248cf4d0b362c90492d189d7bd68ae85e7/rppg-Toolbox_MMPD/wip/label/COHFACE_Comparison.xlsx -------------------------------------------------------------------------------- /rppg-Toolbox_MMPD/wip/label/PURE_Comparison.csv: -------------------------------------------------------------------------------- 1 | VideoID,Peak Detection,FFT,Sensor,Preferred 2 | 101,68.21627084,65.91796875,70.12518666,Peak Detection 3 | 102,74.62082912,73.828125,77.59145427,Peak Detection 4 | 103,77.63401109,76.46484375,77.00811194,Peak Detection 5 | 104,74.75328947,73.38867188,75.1799273,Peak Detection 6 | 105,71.39588101,70.3125,72.6378837,Peak Detection 7 | 106,69.89409985,69.43359375,72.05473373,FFT 8 | 201,71.96401799,69.43359375,72.98443291,Peak Detection 9 | 202,79.34925216,80.859375,81.45601023,Peak Detection 10 | 203,72.11740042,72.0703125,73.93230486,Peak Detection 11 | 204,72.66055046,70.3125,72.33227777,Peak Detection 12 | 205,74.17714744,75.5859375,74.80115274,Peak Detection 13 | 206,70.51349921,68.5546875,72.51022521,Peak Detection 14 | 301,54.35761589,53.61328125,53.52918685,Peak Detection 15 | 302,57.25826811,57.12890625,56.98753428,FFT 16 | 303,57.51879699,58.0078125,58.43378471,Peak Detection 17 | 304,56.49306345,56.25,56.74741805,FFT 18 | 305,56.23510723,56.25,56.6887038,Peak Detection 19 | 306,58.18759936,55.37109375,59.29270186,Peak Detection 20 | 401,61.35239142,61.5234375,60.25468956,Peak Detection 21 | 402,65.61797753,65.91796875,66.65202283,Peak Detection 22 | 403,69.30917327,70.75195313,68.66889037,Peak Detection 23 | 404,70.36620984,71.63085938,70.18861681,FFT 24 | 405,65.47179758,67.67578125,65.68627451,FFT 25 | 406,64.30235568,65.91796875,64.86323907,FFT 26 | 501,56.86172967,90.52734375,45.97156639,Peak Detection 27 | 502,59.43611887,55.37109375,51.35948859,Peak Detection 28 | 503,53.98388541,50.53710938,50.8158302,FFT 29 | 504,53.08310992,51.85546875,53.66353712,Peak Detection 30 | 505,52.83018868,50.09765625,48.89611725,Peak Detection 31 | 506,52.85638014,50.9765625,55.3379845,Peak Detection 32 | 601,65.35122336227309,64.16015625,64.97895277207392,FFT 33 | 603,70.68273092,68.5546875,74.58625277,FFT 34 | 604,73.35423197,73.828125,72.30286221,FFT 35 | 605,67.71159875,65.0390625,68.3076528,Peak Detection 36 | 606,68.4375,68.5546875,68.49642127,FFT 37 | 701,126.8805892,126.5625,125.2667185,Peak Detection 38 | 702,100.6075334,134.0332031,134.789348,Peak Detection 39 | 703,127.5622337,126.5625,127.0706958,FFT 40 | 704,126.5720081,125.2441406,126.0055679,Peak Detection 41 | 705,124.4984161,136.2304688,132.3242613,Peak Detection 42 | 706,127.5407292,133.59375,131.7253593,Peak Detection 43 | 801,52.22896333,46.58203125,49.49410559,Peak Detection 44 | 802,60.52009456,60.64453125,58.39284802,Peak Detection 45 | 803,50.06954103,50.53710938,51.43167077,Peak Detection 46 | 804,50.70422535,50.09765625,55.14686541,FFT 47 | 805,52.37068966,52.734375,52.52156965,Peak Detection 48 | 806,54.38916384,54.4921875,54.82616487,Peak Detection 49 | 901,46.42665234,90.52734375,46.53237778,Peak Detection 50 | 902,52.8440367,54.4921875,50.81521197,Peak Detection 51 | 903,48.25737265,47.90039063,48.31095951,Peak Detection 52 | 904,50.70422535,47.90039063,47.96425083,Peak Detection 53 | 905,48.14121423,47.4609375,46.95608629,Peak Detection 54 | 906,48.66129452,49.21875,48.34288613,Peak Detection 55 | 1001,83.17143607,79.1015625,83.48221649,Peak Detection 56 | 1002,97.44637385,93.1640625,98.88562753,Peak Detection 57 | 1003,73.76391982,71.63085938,75.07454506,Peak Detection 58 | 1004,75.11530859,75.5859375,78.36825054,FFT 59 | 1005,74.76340694,73.828125,77.30624839,FFT 60 | 1006,78.19942423,79.1015625,78.47045161,FFT 61 | -------------------------------------------------------------------------------- /rppg-Toolbox_MMPD/wip/label/PURE_Comparison.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/THU-CS-PI/MMPD_rPPG_dataset/b88d2e248cf4d0b362c90492d189d7bd68ae85e7/rppg-Toolbox_MMPD/wip/label/PURE_Comparison.xlsx -------------------------------------------------------------------------------- /rppg-Toolbox_MMPD/wip/label/UBFC_Comparison.csv: -------------------------------------------------------------------------------- 1 | VideoID,Peak Detection,FFT,Sensor,Preferred,, 2 | 1,107.95,109.86,106.7,FFT,, 3 | 3,95.42,88.77,101.17,Peak Detection,, 4 | 4,107.44,112.5,112.27,Peak Detection,, 5 | 5,99.40828402,101.0742188,98.27691419,FFT,, 6 | 8,99.41,101.07,98.28,Peak Detection,, 7 | 9,108.9,107.23,107.23,Peak Detection,, 8 | 10,110.5341987,108.984375,109.8088735,Peak Detection,, 9 | 11,123.7388724,128.3203125,96.21050384,Peak Detection,, 10 | 12,68.80733945,69.43359375,65.96596229,Peak Detection,, 11 | 13,107.5697211,109.8632813,107.1013712,Peak Detection,, 12 | 14,77.54688292,70.3125,80.41242649,Peak Detection,, 13 | 15,114.9700599,112.5,115.7866832,Peak Detection,, 14 | 16,93.19526627,90.96679688,91.79846341,Peak Detection,, 15 | 17,86.59793814,88.76953125,85.40310313,Peak Detection,, 16 | 18,118.9349112,124.8046875,103.7622468,Peak Detection,, 17 | 20,127.027027,133.59375,60.22508929,FFT,, 18 | 22,102.6785714,106.3476563,101.7930556,Peak Detection,, 19 | 23,65.72769953,69.43359375,63.28856844,Peak Detection,, 20 | 24,106.3595393,87.01171875,84.59167297,Peak Detection,, 21 | 25,113.8290479,113.3789063,91.53287293,BOTH,, 22 | 26,98.83321894,95.80078125,73.5347138,Peak Detection,, 23 | 27,108.5553997,111.6210938,89.36670161,Peak Detection,, 24 | 30,98.31181728,102.8320313,92.71101085,Peak Detection,, 25 | 31,78.64945382,77.34375,78.23751603,Peak Detection,, 26 | 32,105.5666004,96.6796875,107.328759,Peak Detection,, 27 | 33,117.2135285,113.3789063,117.1761381,BOTH,, 28 | 34,116.2444113,115.1367188,116.5466437,Peak Detection,, 29 | 35,104.0156709,116.4550781,102.3579532,Peak Detection,, 30 | 36,118.7592319,121.7285156,119.0246248,Peak Detection,, 31 | 37,60.23976024,55.37109375,62.97210433,Peak Detection,, 32 | 38,109.7345133,112.0605469,109.9967902,Peak Detection,, 33 | 39,84.82142857,86.1328125,85.02252479,Peak Detection,, 34 | 40,86.18453865,87.890625,86.96957544,Peak Detection,, 35 | 41,100.4444444,100.1953125,100.77938,Peak Detection,, 36 | 42,95.39375929,98.4375,93.86798531,Peak Detection,, 37 | 43,99.34771701,97.55859375,97.04197661,Peak Detection,, 38 | 44,82.84142071,79.1015625,82.21315469,Peak Detection,, 39 | 45,110.2040816,111.6210938,110.8200901,Peak Detection,, 40 | 46,97.12556732,91.40625,96.68196032,Peak Detection,, 41 | 47,109.2824887,105.46875,111.3776075,Peak Detection,, 42 | 48,90.97472924,91.40625,87.41854959,BOTH,, 43 | 49,87.91317218,86.1328125,87.56820452,BOTH,, 44 | -------------------------------------------------------------------------------- /rppg-Toolbox_MMPD/wip/label/UBFC_Comparison.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/THU-CS-PI/MMPD_rPPG_dataset/b88d2e248cf4d0b362c90492d189d7bd68ae85e7/rppg-Toolbox_MMPD/wip/label/UBFC_Comparison.xlsx -------------------------------------------------------------------------------- /rppg-Toolbox_MMPD/wip/label/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/THU-CS-PI/MMPD_rPPG_dataset/b88d2e248cf4d0b362c90492d189d7bd68ae85e7/rppg-Toolbox_MMPD/wip/label/__init__.py -------------------------------------------------------------------------------- /rppg-Toolbox_MMPD/wip/label/comparision_PURE.csv: -------------------------------------------------------------------------------- 1 | sample,PeakDetection,FFT,Sensor,Preference,PD-Sensor,FFT-Sensor 2 | 101,68.21627084,65.91796875,70.12518666,PeakDetection,PeakDetection,4.20721791 3 | 102,74.62082912,73.828125,77.59145427,PeakDetection,2.970625153,3.763329273 4 | 103,77.63401109,76.46484375,77.00811194,FFT,FFT,0.543268195 5 | 104,74.75328947,73.38867188,75.1799273,PeakDetection,0.426637828,1.791255427 6 | 105,71.39588101,70.3125,72.6378837,PeakDetection,1.242002697,2.325383704 7 | 106,69.89409985,69.43359375,72.05473373,PeakDetection,2.160633879,2.621139978 8 | 201,71.96401799,69.43359375,72.98443291,PeakDetection,1.020414922,3.550839163 9 | 202,79.34925216,80.859375,81.45601023,FFT,2.106758065,0.59663523 10 | 203,72.11740042,72.0703125,73.93230486,PeakDetection,1.814904439,1.861992358 11 | 204,72.66055046,70.3125,72.33227777,PeakDetection,0.328272694,2.019777765 12 | 205,74.17714744,75.5859375,74.80115274,PeakDetection,0.624005293,0.784784762 13 | 206,70.51349921,68.5546875,72.51022521,PeakDetection,1.996726008,3.955537714 14 | 301,54.35761589,53.61328125,53.52918685,FFT,0.828429047,0.084094403 15 | 302,57.25826811,57.12890625,56.98753428,FFT,0.270733833,0.141371969 16 | 303,57.51879699,58.0078125,58.43378471,FFT,0.914987722,0.425972214 17 | 304,56.49306345,56.25,56.74741805,PeakDetection,0.254354599,0.497418051 18 | 305,56.23510723,56.25,56.6887038,FFT,0.453596572,0.4387038 19 | 306,58.18759936,55.37109375,59.29270186,PeakDetection,1.105102499,3.921608113 20 | 401,61.35239142,61.5234375,60.25468956,PeakDetection,1.09770186,1.268747936 21 | 402,65.61797753,65.91796875,66.65202283,FFT,1.034045306,0.734054084 22 | 403,69.30917327,70.75195313,68.66889037,PeakDetection,0.640282902,2.083062754 23 | 404,70.36620984,71.63085938,70.18861681,PeakDetection,0.17759303,1.442242565 24 | 405,65.47179758,67.67578125,65.68627451,PeakDetection,0.214476935,1.98950674 25 | 406,64.30235568,65.91796875,64.86323907,PeakDetection,0.560883392,1.054729675 26 | 501,56.86172967,90.52734375,45.97156639,PeakDetection,10.89016329,44.55577736 27 | 502,59.43611887,55.37109375,51.35948859,FFT,8.076630279,4.011605156 28 | 503,53.98388541,50.53710938,50.8158302,FFT,3.168055206,0.278720826 29 | 504,53.08310992,51.85546875,53.66353712,PeakDetection,0.580427198,1.808068368 30 | 505,52.83018868,50.09765625,48.89611725,FFT,3.934071425,1.201538996 31 | 506,52.85638014,50.9765625,55.3379845,PeakDetection,2.481604357,4.361421996 32 | 603,70.68273092,68.5546875,74.58625277,PeakDetection,3.903521848,6.031565272 33 | 604,73.35423197,73.828125,72.30286221,PeakDetection,1.051369761,1.525262786 34 | 605,67.71159875,65.0390625,68.3076528,PeakDetection,0.596054053,3.268590299 35 | 606,68.4375,68.5546875,68.49642127,FFT,0.058921268,0.058266232 36 | 701,126.8805892,126.5625,125.2667185,FFT,1.613870657,1.295781493 37 | 702,100.6075334,134.0332031,134.789348,FFT,34.18181457,0.756144857 38 | 703,127.5622337,126.5625,127.0706958,PeakDetection,0.491537877,0.508195807 39 | 704,126.5720081,125.2441406,126.0055679,PeakDetection,0.566440185,0.761427304 40 | 705,124.4984161,136.2304688,132.3242613,FFT,7.825845225,3.906207475 41 | 706,127.5407292,133.59375,131.7253593,FFT,4.184630095,1.868390657 42 | 801,52.22896333,46.58203125,49.49410559,PeakDetection,2.734857747,2.912074337 43 | 802,60.52009456,60.64453125,58.39284802,PeakDetection,2.127246542,2.25168323 44 | 803,50.06954103,50.53710938,51.43167077,FFT,1.362129738,0.894561392 45 | 804,50.70422535,50.09765625,55.14686541,PeakDetection,4.442640058,5.04920916 46 | 805,52.37068966,52.734375,52.52156965,PeakDetection,0.150879991,0.212805353 47 | 806,54.38916384,54.4921875,54.82616487,FFT,0.43700103,0.333977375 48 | 901,46.42665234,90.52734375,46.53237778,PeakDetection,0.105725446,43.99496597 49 | 902,52.8440367,54.4921875,50.81521197,PeakDetection,2.028824727,3.67697553 50 | 903,48.25737265,47.90039063,48.31095951,PeakDetection,0.053586853,0.410568882 51 | 904,50.70422535,47.90039063,47.96425083,FFT,2.739974522,0.063860205 52 | 905,48.14121423,47.4609375,46.95608629,FFT,1.185127942,0.504851213 53 | 906,48.66129452,49.21875,48.34288613,PeakDetection,0.318408388,0.875863873 54 | 1001,83.17143607,79.1015625,83.48221649,PeakDetection,0.310780422,4.380653995 55 | 1002,97.44637385,93.1640625,98.88562753,PeakDetection,1.439253679,5.72156503 56 | 1003,73.76391982,71.63085938,75.07454506,PeakDetection,1.310625234,3.443685681 57 | 1004,75.11530859,75.5859375,78.36825054,FFT,3.252941952,2.78231304 58 | 1005,74.76340694,73.828125,77.30624839,PeakDetection,2.542841453,3.478123393 59 | 1006,78.19942423,79.1015625,78.47045161,PeakDetection,0.271027378,0.631110887 60 | -------------------------------------------------------------------------------- /rppg-Toolbox_MMPD/wip/label/comparison_UBFC.csv: -------------------------------------------------------------------------------- 1 | subject,PeakDetection,FFT,Sensor,Preferred,,,Sensor-PD,Sensor-FFT 2 | 1,107.95,109.86,106.7,FFT,,,1.25,3.16 3 | 3,95.42,88.77,101.17,PeakDetection,,,5.75,12.4 4 | 4,107.44,112.5,112.27,PeakDetection,,,4.83,0.23 5 | 5,99.40828402,101.0742188,98.27691419,FFT,,BOTH,1.13136983,2.797304556 6 | 8,99.41,101.07,98.28,PeakDetection,,BOTH,1.13,2.79 7 | 9,108.9,107.23,108.19,PeakDetection,,,0.71,0.96 8 | 10,110.5341987,108.984375,109.8088735,PeakDetection,,,0.725325184,0.824498518 9 | 11,123.7388724,128.3203125,96.21050384,PeakDetection,,,27.52836856,32.10980866 10 | 12,68.80733945,69.43359375,65.96596229,PeakDetection,,,2.841377157,3.467631457 11 | 13,107.5697211,109.8632813,107.1013712,PeakDetection,,,0.468349956,2.76191009 12 | 14,77.54688292,70.3125,80.41242649,PeakDetection,,,2.865543573,10.09992649 13 | 15,114.9700599,112.5,115.7866832,PeakDetection,,,0.816623288,3.286683168 14 | 16,93.19526627,90.96679688,91.79846341,PeakDetection,,,1.396802858,0.83166654 15 | 17,86.59793814,88.76953125,85.40310313,PeakDetection,,,1.194835016,3.366428122 16 | 18,118.9349112,124.8046875,103.7622468,PeakDetection,,,15.17266444,21.0424407 17 | 20,127.027027,133.59375,60.22508929,FFT,,,66.80193774,73.36866071 18 | 22,102.6785714,106.3476563,101.7930556,PeakDetection,,,0.885515818,4.55460064 19 | 23,65.72769953,69.43359375,63.28856844,PeakDetection,,,2.439131093,6.145025313 20 | 24,106.3595393,87.01171875,84.59167297,PeakDetection,,,21.76786634,2.420045778 21 | 25,113.8290479,113.3789063,91.53287293,,,both,22.29617502,21.84603332 22 | 26,98.83321894,95.80078125,73.5347138,PeakDetection,,,25.29850514,22.26606745 23 | 27,108.5553997,111.6210938,89.36670161,PeakDetection,,,19.18869811,22.25439214 24 | 30,98.31181728,102.8320313,92.71101085,PeakDetection,,,5.600806431,10.1210204 25 | 31,78.64945382,77.34375,78.23751603,PeakDetection,,,0.41193779,0.893766034 26 | 32,105.5666004,96.6796875,107.328759,PeakDetection,,,1.762158579,10.64907148 27 | 33,117.2135285,113.3789063,117.1761381,,,,0.037390452,3.797231819 28 | 34,116.2444113,115.1367188,116.5466437,PeakDetection,,,0.302232408,1.409924985 29 | 35,104.0156709,116.4550781,102.3579532,PeakDetection,,,1.657717717,14.09712493 30 | 36,118.7592319,121.7285156,119.0246248,PeakDetection,,,0.265392851,2.703890869 31 | 37,60.23976024,55.37109375,62.97210433,PeakDetection,,,2.732344091,7.601010581 32 | 38,109.7345133,112.0605469,109.9967902,PeakDetection,,,0.26227697,2.063756631 33 | 39,84.82142857,86.1328125,85.02252479,PeakDetection,,,0.20109622,1.110287709 34 | 40,86.18453865,87.890625,86.96957544,PeakDetection,,,0.78503679,0.921049556 35 | 41,100.4444444,100.1953125,100.77938,PeakDetection,,,0.334935526,0.584067471 36 | 42,95.39375929,98.4375,93.86798531,PeakDetection,,,1.525773978,4.569514691 37 | 43,99.34771701,97.55859375,97.04197661,PeakDetection,,,2.305740404,0.516617145 38 | 44,82.84142071,79.1015625,82.21315469,PeakDetection,,,0.628266025,3.111592185 39 | 45,110.2040816,111.6210938,110.8200901,PeakDetection,,,0.616008426,0.801003691 40 | 46,97.12556732,91.40625,96.68196032,PeakDetection,,,0.443607001,5.275710321 41 | 47,109.2824887,105.46875,111.3776075,PeakDetection,,,2.095118803,5.908857514 42 | 48,90.97472924,91.40625,87.41854959,,,both,3.556179651,3.987700409 43 | 49,87.91317218,86.1328125,87.56820452,,,both,0.344967653,1.435392023 44 | -------------------------------------------------------------------------------- /rppg-Toolbox_MMPD/wip/label/read_gt_hr.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | 3 | 4 | def read_label(dataset): 5 | df = pd.read_csv("label/comparision_{0}.csv".format(dataset)) 6 | out_dict = df.to_dict(orient='orient') 7 | return out_dict 8 | 9 | 10 | --------------------------------------------------------------------------------