├── data ├── .gitignore ├── data_prep.py ├── data_factory.py └── additional_user_setup │ └── overlapping_pids.json ├── utils ├── path_definitions.py ├── basic_utils │ ├── utils_tex.py │ ├── utils_operation.py │ └── utils_ml.py ├── cv_split.py └── common_settings.py ├── config ├── global_config.yaml ├── README.md └── dl_feat_prep.yaml ├── run.sh ├── README.md ├── generate_balanced_dataset.py ├── generate_prompt.py ├── prompts └── template_prompt.json ├── generate_sliced_data.py └── data_loader ├── data_loader_ml.py └── data_loader_dl.py /data/.gitignore: -------------------------------------------------------------------------------- 1 | !.gitignore 2 | !*.py 3 | !additional_user_setup/ 4 | !additional_user_setup/overlapping_pids.json 5 | !additional_user_setup/split_5fold_pids.json -------------------------------------------------------------------------------- /utils/path_definitions.py: -------------------------------------------------------------------------------- 1 | import os 2 | from pathlib import Path 3 | 4 | PROJECT_ROOT = os.path.dirname(os.path.abspath(Path(__file__).parent)) 5 | 6 | MODEL_PATH = os.path.join(PROJECT_ROOT, "model") 7 | CONFIG_PATH = os.path.join(PROJECT_ROOT, "config") 8 | UTILS_PATH = os.path.join(PROJECT_ROOT, "utils") 9 | DATA_PATH = os.path.join(PROJECT_ROOT, "data") 10 | RAWDATA_PATH = os.path.join(PROJECT_ROOT, "data_raw") 11 | TMP_PATH = os.path.join(PROJECT_ROOT, "tmp") 12 | -------------------------------------------------------------------------------- /config/global_config.yaml: -------------------------------------------------------------------------------- 1 | name: global 2 | 3 | # config for all algorithms 4 | all: 5 | prediction_tasks: ["dep_weekly"] # list of model prediction tasks, should only contains major targets 6 | ds_keys: ["INS-W_1", "INS-W_2", "INS-W_3", "INS-W_4"] # list of datasets to analyze 7 | # ds_keys: ["INS-W-sample_1", "INS-W-sample_2", "INS-W-sample_3", "INS-W-sample_4"] # list of datasets to analyze 8 | 9 | # Default False. Only True when the dataset in ds_keys all support an extended list of feature types. 10 | # Currently only support INS-W 11 | flag_more_feat_types: True 12 | 13 | # config for traditional algorithms 14 | ml: 15 | training_params: 16 | # whether to save and re-use features repetitively 17 | # True only when re-running the exact same algorithm 18 | save_and_reload: False 19 | 20 | # config for deep learning based algorithms 21 | dl: 22 | training_params: 23 | best_epoch_strategy: "direct" # which strategy to pick the best training epoch 24 | # whether to skip training 25 | # True only when the model training is finished 26 | skip_training: False 27 | 28 | -------------------------------------------------------------------------------- /run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # The folder path of the pickel files after preparation of Globem 3 | GLOBEM_RAW_DATA_PATH=/YOUR_PATH_TO_GLOBEM/data/datarepo_max_feature_types 4 | # The folder path to save sliced csv data 5 | SAVED_CSV_DATA_PATH=./csv_data 6 | # The folder path to save preprocessed testset data 7 | SAVED_TESTSET_PATH=./testset 8 | # The file path of template prompt json 9 | TEMPLATE_PROMPT_PATH=./prompts/template_prompt.json 10 | # The file path of generated prompt json 11 | GENERATED_PROMPT_PATH=./prompts/generated_prompts.json 12 | # The output prompt version 13 | PROMPT_VERSION=b 14 | 15 | # First preprocess the data from Globem 16 | python generate_sliced_data.py --save_path $SAVED_CSV_DATA_PATH --pickle_path $GLOBEM_RAW_DATA_PATH 17 | # Second sampling the balanced dataset from the preprocessed data 18 | python generate_balanced_dataset.py --data_path $SAVED_CSV_DATA_PATH --save_path $SAVED_TESTSET_PATH 19 | # Third generate final prompts from the templates and the preprocessed data 20 | python generate_prompt.py --data_path $SAVED_TESTSET_PATH --prompt_path $TEMPLATE_PROMPT_PATH --save_path $GENERATED_PROMPT_PATH --version $PROMPT_VERSION 21 | -------------------------------------------------------------------------------- /utils/basic_utils/utils_tex.py: -------------------------------------------------------------------------------- 1 | from typing import List 2 | import pandas as pd 3 | import numbers 4 | 5 | def table2tex(df: pd.DataFrame, title_cols: List[str] = None, bold_row_idx:int = None) -> List[str]: 6 | """Convert a pd.Dataframe to latex code 7 | 8 | Args: 9 | df (pd.DataFrame): table to be converted 10 | title_cols (list, optional): Title columns. Defaults to None. 11 | bold_row_idx (int, optional): Some rows that needs to have bold font. Defaults to None. 12 | 13 | Returns: 14 | _type_: _description_ 15 | """ 16 | if (title_cols is None): 17 | title_cols = list(df.columns) 18 | title_str = " & ".join([f"\\textbf{{{c.replace('_','-')}}}" for c in title_cols]) 19 | final_s_list = ["\\hline \\hline", title_str + " \\\\", "\\hline"] 20 | for idx, row in df.iterrows(): 21 | s = [] 22 | for item in row: 23 | if (isinstance(item, numbers.Number)): 24 | s_tmp = f"{item:.3f}" 25 | else: 26 | s_tmp = item 27 | s.append(s_tmp) 28 | if ( 29 | (isinstance(bold_row_idx, numbers.Number) and idx == bold_row_idx) or 30 | (isinstance(bold_row_idx, list) and idx in bold_row_idx) 31 | ): 32 | s = [f"\\textbf{{{s_}}}" for s_ in s] 33 | final_s_list.append(" & ".join(s) + " \\\\") 34 | final_s_list.append("\\hline \\hline") 35 | return [s for s in final_s_list] 36 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ### This is the code base for the IMWUT submission of the paper: 2 | > From Classification to Clinical Insights: Towards Analyzing and Reasoning About Mobile and Behavioral Health Data With Large Language Models 3 | 4 | ### Enviroment Setup and Raw Data Preparation 5 | We use Globem as our dataset for the experiments in the paper. 6 | To setup the enviroment and prepare raw data, you can follow the instructions in the `` : 7 | 8 | ### Reproduce the Prompts in the experiments 9 | To reproduce our prompt, you need to set some configuration in the `run.sh` script first. 10 | ``` 11 | # The folder path of the pickel files after preparation of Globem 12 | GLOBEM_RAW_DATA_PATH=/YOUR_PATH_TO_GLOBEM/data/datarepo_max_feature_types 13 | # The folder path to save sliced csv data 14 | SAVED_CSV_DATA_PATH=./csv_data 15 | # The folder path to save preprocessed testset data 16 | SAVED_TESTSET_PATH=./testset 17 | # The file path of template prompt json 18 | TEMPLATE_PROMPT_PATH=./prompts/template_prompt.json 19 | # The file path of generated prompt json 20 | GENERATED_PROMPT_PATH=./prompts/generated_prompts.json 21 | # The output prompt version 22 | PROMPT_VERSION=b 23 | ``` 24 | For the parameter of PROMPT_VERSION, you can check the following table: 25 | 26 | | Prompt Version in the paper | Parameter | 27 | | ------------- | ------------- | 28 | | CoT | a | 29 | | CoT + Exp. | b | 30 | | CoT + Exp. + DSM | c | 31 | 32 | After setting the configuration, try the following command to generate the prompts. 33 | ``` 34 | bash run.sh 35 | ``` -------------------------------------------------------------------------------- /data/data_prep.py: -------------------------------------------------------------------------------- 1 | import os, yaml, sys 2 | sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) 3 | from pathlib import Path 4 | from data_loader import data_loader_ml, data_loader_dl 5 | from utils import path_definitions 6 | from data import data_factory 7 | import wget 8 | import zipfile 9 | 10 | def download_rawsampledata(key:str = "data_raw_sample") -> None: 11 | """ Download and unzip the data """ 12 | 13 | target_path = os.path.join(path_definitions.RAWDATA_PATH, key + ".zip") 14 | if (os.path.exists(target_path)): 15 | return 16 | 17 | url = data_factory.url_dictionary[key] 18 | print("Downloading sample data...") 19 | wget.download(url, out = target_path) 20 | print("Unzipping data...") 21 | with zipfile.ZipFile(target_path,"r") as zip_ref: 22 | zip_ref.extractall(path_definitions.RAWDATA_PATH) 23 | print("Unzipping done!") 24 | 25 | 26 | def convert_rawdata_to_pkldata() -> None: 27 | """ Convert raw data from 'data_raw' folder to pkl data in 'data' folder """ 28 | 29 | with open(os.path.join(os.path.dirname(os.path.abspath(Path(__file__).parent)), 30 | "config", f"global_config.yaml"), "r") as f: 31 | global_config = yaml.safe_load(f) 32 | 33 | ds_keys = global_config["all"]["ds_keys"] 34 | pred_targets = global_config["all"]["prediction_tasks"] 35 | 36 | ds_keys_dict = { 37 | pred_target: ds_keys for pred_target in pred_targets 38 | } 39 | 40 | # prepare pkl file with raw time-series format, only used for non-ML analysis purpose. 41 | # Currently only support dep_weekly 42 | if ("dep_weekly" in pred_targets): 43 | data_loader_ml.data_loader_raw(ds_keys_list=ds_keys) 44 | 45 | if not global_config["all"]["flag_more_feat_types"]: 46 | # prepare pkl file with sliced data format 47 | data_loader_ml.data_loader( 48 | ds_keys_dict={pt: ds_keys for pt in pred_targets}, flag_more_feat_types=False) 49 | # prepare np pkl file with sliced data format for deep models 50 | data_loader_dl.data_loader_np(ds_keys_dict=ds_keys_dict, flag_normalize=True, flag_more_feat_types=False) 51 | else: # prepare pkl file with all sensor types (currently support INS-W) 52 | # prepare pkl file with sliced data format 53 | data_loader_ml.data_loader( 54 | ds_keys_dict={pt: ds_keys for pt in pred_targets}, flag_more_feat_types=True) 55 | # prepare np pkl file with sliced data format for deep models 56 | data_loader_dl.data_loader_np(ds_keys_dict=ds_keys_dict, flag_normalize=True, flag_more_feat_types=True) 57 | 58 | # prepare placeholder to make sure the pipeline will go smoothly for deep models 59 | data_loader_dl.data_loader_dl_placeholder(pred_targets, ds_keys) 60 | 61 | if __name__ == "__main__": 62 | 63 | convert_rawdata_to_pkldata() -------------------------------------------------------------------------------- /data/data_factory.py: -------------------------------------------------------------------------------- 1 | import os, sys 2 | from pathlib import Path 3 | sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) 4 | from utils import path_definitions 5 | 6 | def get_raw_feature_folderpath(phase = 1, institute = "INS"): 7 | path = os.path.join(path_definitions.RAWDATA_PATH , institute + "_" + str(phase) , "FeatureData/") 8 | return path 9 | 10 | def get_survey_filepath(phase = 1, institute = "INS"): 11 | path = os.path.join(path_definitions.RAWDATA_PATH , institute + "_" + str(phase) , "SurveyData/") 12 | return path 13 | 14 | def get_participantsinfo_filepath(phase = 1, institute = "INS"): 15 | path = os.path.join(path_definitions.RAWDATA_PATH , institute + "_" + str(phase) , "ParticipantsInfoData/") 16 | return path 17 | 18 | feature_folder = { 19 | "INS-W": {1:get_raw_feature_folderpath(1, "INS-W"), 20 | 2:get_raw_feature_folderpath(2, "INS-W"), 21 | 3:get_raw_feature_folderpath(3, "INS-W"), 22 | 4:get_raw_feature_folderpath(4, "INS-W")}, 23 | "INS-W-sample": {1:get_raw_feature_folderpath(1, "INS-W-sample"), 24 | 2:get_raw_feature_folderpath(2, "INS-W-sample"), 25 | 3:get_raw_feature_folderpath(3, "INS-W-sample"), 26 | 4:get_raw_feature_folderpath(4, "INS-W-sample")}, 27 | "INS-D": {1:get_raw_feature_folderpath(1, "INS-D"), 28 | 2: get_raw_feature_folderpath(2, "INS-D")}, 29 | } 30 | 31 | survey_folder = { 32 | "INS-W": {1:get_survey_filepath(1, "INS-W"), 33 | 2:get_survey_filepath(2, "INS-W"), 34 | 3:get_survey_filepath(3, "INS-W"), 35 | 4:get_survey_filepath(4, "INS-W"),}, 36 | "INS-W-sample": {1:get_survey_filepath(1, "INS-W-sample"), 37 | 2:get_survey_filepath(2, "INS-W-sample"), 38 | 3:get_survey_filepath(3, "INS-W-sample"), 39 | 4:get_survey_filepath(4, "INS-W-sample"),}, 40 | "INS-D": {1:get_survey_filepath(1, "INS-D"), 41 | 2: get_survey_filepath(2, "INS-D")}, 42 | } 43 | 44 | participants_info_folder = { 45 | "INS-W": {1:get_participantsinfo_filepath(1, "INS-W"), 46 | 2:get_participantsinfo_filepath(2, "INS-W"), 47 | 3:get_participantsinfo_filepath(3, "INS-W"), 48 | 4:get_participantsinfo_filepath(4, "INS-W")}, 49 | "INS-W-sample": {1:get_participantsinfo_filepath(1, "INS-W-sample"), 50 | 2:get_participantsinfo_filepath(2, "INS-W-sample"), 51 | 3:get_participantsinfo_filepath(3, "INS-W-sample"), 52 | 4:get_participantsinfo_filepath(4, "INS-W-sample")}, 53 | "INS-D": {1:get_participantsinfo_filepath(1, "INS-D"), 54 | 2:get_participantsinfo_filepath(2, "INS-D")}, 55 | } 56 | 57 | url_dictionary = { 58 | "data_raw_sample": "https://drive.google.com/uc?export=download&id=1a3cM1joYyPPoYmDCk1U4qM3Q_gjSeJLd" 59 | } 60 | 61 | threshold_book = { 62 | "UCLA_10items_POST": {"threshold_as_false": 24, "threshold_as_true":25}, 63 | } -------------------------------------------------------------------------------- /utils/cv_split.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from sklearn.model_selection._split import _BaseKFold 3 | from sklearn.utils import check_array 4 | from data_loader.data_loader_ml import DataRepo 5 | 6 | class GroupKFoldRandom(_BaseKFold): 7 | """Modifed from the original sklearn GroupKFold to add random state 8 | https://github.com/scikit-learn/scikit-learn/blob/baf828ca1/sklearn/model_selection/_split.py#L453 9 | """ 10 | def __init__(self, n_splits=5, random_state = None): 11 | super().__init__(n_splits, shuffle=True, random_state=random_state) 12 | 13 | def _iter_test_indices(self, X, y, groups): 14 | if groups is None: 15 | raise ValueError("The 'groups' parameter should not be None.") 16 | groups = check_array(groups, ensure_2d=False, dtype=None) 17 | 18 | unique_groups, groups = np.unique(groups, return_inverse=True) 19 | n_groups = len(unique_groups) 20 | 21 | if self.n_splits > n_groups: 22 | raise ValueError( 23 | "Cannot have number of splits n_splits=%d greater" 24 | " than the number of groups: %d." % (self.n_splits, n_groups) 25 | ) 26 | 27 | # Weight groups by their number of occurrences 28 | n_samples_per_group = np.bincount(groups) 29 | 30 | # Distribute the most frequent groups first 31 | indices = np.argsort(n_samples_per_group)[::-1] 32 | n_samples_per_group = n_samples_per_group[indices] 33 | 34 | # Total weight of each fold 35 | n_samples_per_fold = np.zeros(self.n_splits) 36 | 37 | # Mapping from group index to fold index 38 | group_to_fold = np.zeros(len(unique_groups)) 39 | 40 | # Distribute samples by adding the largest weight to the lightest fold 41 | for group_index, weight in enumerate(n_samples_per_group): 42 | lightest_fold = np.argmin(n_samples_per_fold) 43 | n_samples_per_fold[lightest_fold] += weight 44 | group_to_fold[indices[group_index]] = lightest_fold 45 | 46 | indices = group_to_fold[groups] 47 | 48 | for f in range(self.n_splits): 49 | yield np.where(indices == f)[0] 50 | 51 | def split(self, X, y=None, groups=None): 52 | return super().split(X, y, groups) 53 | 54 | def judge_corner_cvsplit(cv:_BaseKFold, data_repo: DataRepo) -> bool: 55 | """Avoid splitting cases with single label or device_type""" 56 | same_label_flag = False 57 | for train_idx, test_idx in cv.split(X = data_repo.X, y = data_repo.y, groups=data_repo.pids): 58 | if (len(set(data_repo.y.iloc[train_idx])) == 1): 59 | same_label_flag = True 60 | break 61 | elif ("device_type" in data_repo.X): 62 | devices_train = data_repo.X.values[train_idx,-1] 63 | train_idx_ios = np.where(devices_train == 1)[0] 64 | train_idx_android = np.where(devices_train != 1)[0] 65 | y_tmp = data_repo.y.iloc[train_idx].values 66 | if (len(set(y_tmp[train_idx_ios])) == 1 or len(set(y_tmp[train_idx_android])) == 1): 67 | same_label_flag = True 68 | break 69 | return same_label_flag 70 | 71 | -------------------------------------------------------------------------------- /generate_balanced_dataset.py: -------------------------------------------------------------------------------- 1 | import os 2 | import random 3 | import numpy as np 4 | import pandas as pd 5 | import argparse 6 | 7 | 8 | def save_samples(file_list, src_folder, dest_folder): 9 | 10 | for file in file_list: 11 | df = pd.read_csv(os.path.join(src_folder, file)) 12 | duration_index = df[(df["depression"] == True) | (df["depression"] == False)].index 13 | duration_df = df.loc[(duration_index[-2] - 27) : duration_index[-2]] 14 | duration_df.to_csv(os.path.join(dest_folder, file), index=False) 15 | 16 | def split_sample_years(samples): 17 | year2 = list() 18 | year3 = list() 19 | year4 = list() 20 | # split the data by years 21 | for i in range(len(samples)): 22 | pid_no = int(samples[i].split(".")[0].split("#")[0].split("_")[1]) 23 | if pid_no < 600: 24 | year2.append(samples[i]) 25 | elif pid_no < 900: 26 | year3.append(samples[i]) 27 | else: 28 | year4.append(samples[i]) 29 | 30 | return year2, year3, year4 31 | 32 | 33 | if __name__ == "__main__": 34 | 35 | parser = argparse.ArgumentParser(description="settings") 36 | parser.add_argument("-d", "--data_path", type=str, default="./tmp_data", help="generated_dataset_path") 37 | parser.add_argument("-s", "--save_path", type=str, default="./tmp_data", help="save_path") 38 | opt = parser.parse_args() 39 | data_path = opt.data_path 40 | save_path = opt.save_path 41 | if not os.path.exists(save_path): 42 | os.makedirs(save_path) 43 | 44 | pid_file_list = os.listdir(data_path) 45 | pid_file_list = sorted(pid_file_list) 46 | positive_label = list() 47 | negative_label = list() 48 | 49 | for pid_file in pid_file_list: 50 | df = pd.read_csv(os.path.join(data_path, pid_file)) 51 | duration_index = df[(df["depression"] == True) | (df["depression"] == False)].index 52 | if np.isnan(df.loc[(duration_index[-2]), "phq4"]): 53 | continue 54 | if df.loc[(duration_index[-2]), "phq4"] > 0 and df.loc[(duration_index[-2]), "phq4"] < 6: 55 | continue 56 | depression_label = df.loc[(duration_index[-2]), "depression"] 57 | 58 | if depression_label == True: 59 | positive_label.append(pid_file) 60 | else: 61 | negative_label.append(pid_file) 62 | 63 | # split the data by years 64 | pos2, pos3, pos4 = split_sample_years(positive_label) 65 | neg2, neg3, neg4 = split_sample_years(negative_label) 66 | # sample balanced data 67 | random_choices = random.sample(pos2, k=15) 68 | save_samples(random_choices, data_path, save_path) 69 | random_choices = random.sample(pos3, k=15) 70 | save_samples(random_choices, data_path, save_path) 71 | random_choices = random.sample(pos4, k=15) 72 | save_samples(random_choices, data_path, save_path) 73 | random_choices = random.sample(neg2, k=15) 74 | save_samples(random_choices, data_path, save_path) 75 | random_choices = random.sample(neg3, k=15) 76 | save_samples(random_choices, data_path, save_path) 77 | random_choices = random.sample(neg4, k=15) 78 | save_samples(random_choices, data_path, save_path) 79 | -------------------------------------------------------------------------------- /generate_prompt.py: -------------------------------------------------------------------------------- 1 | import os 2 | import argparse 3 | import json 4 | import pandas as pd 5 | 6 | 7 | def markdown_refine(markdown_table): 8 | """ 9 | Refine markdown format 10 | """ 11 | # Split the markdown code into lines 12 | lines = markdown_table.split('\n') 13 | # Remove the second line (index 1) 14 | lines.pop(1) 15 | for i, line in enumerate(lines): 16 | if line.startswith('|'): 17 | line = line[1:] 18 | if '|' in line: 19 | lines[i] = '|'.join(segment.strip() for segment in line.split('|')) 20 | # Join the lines back together to get the modified markdown 21 | modified_markdown = '\n'.join(lines) 22 | return modified_markdown 23 | 24 | def generate_prompt(df, prompt, to_markdown, version): 25 | """ 26 | Generate prompt samples 27 | """ 28 | prompt_start = prompt["prompt_start"] 29 | prompt_task_describe = prompt["prompt_task_describe"] 30 | prompt_outline = prompt["prompt_outline"] 31 | prompt_outline_explanation = prompt["prompt_outline_explanation"] 32 | prompt_outline_complete = prompt["prompt_outline_complete"] 33 | data_col = prompt["data_col"] 34 | instructions = prompt["instructions"] 35 | variable_concept = prompt["variable_concept"] 36 | dsm = prompt["dsm"] 37 | 38 | duration_index = df[(df["depression"] == True) | (df["depression"] == False)].index 39 | depression_label = df.loc[(duration_index[0]), "depression"] 40 | df = df.drop(["phq4", "depression", "anxiety_sub", "depress_sub"], axis=1) 41 | 42 | if to_markdown: 43 | df = df.to_markdown(index=False) 44 | df = markdown_refine(df) 45 | 46 | if version == "a": 47 | prompt_sample = f"{prompt_start} \n{prompt_task_describe} \n{prompt_outline} \n{data_col} \n{df} \n{instructions}" 48 | elif version == "b": 49 | prompt_sample = f"{prompt_start} \n{prompt_task_describe} \n{prompt_outline_explanation} \n{data_col} \n{df} \n{variable_concept} \n{instructions}" 50 | elif version == "c": 51 | prompt_sample = f"{prompt_start} \n{prompt_task_describe} \n{prompt_outline_complete} \n{data_col} \n{df} \n{variable_concept} \n{dsm} \n{instructions}" 52 | else: 53 | print("Wrong Prompt Version! Please check!") 54 | return prompt_sample, depression_label 55 | 56 | if __name__ == "__main__": 57 | parser = argparse.ArgumentParser(description="settings") 58 | parser.add_argument("-d", "--data_path", type=str, default="./tmp_data", help="generated_dataset_path") 59 | parser.add_argument("-p", "--prompt_path", type=str, default="./tmp_data", help="prompt_content_path") 60 | parser.add_argument("-s", "--save_path", type=str, default="./tmp_data", help="save_path") 61 | parser.add_argument("-m", "--to_markdown", type=bool, default=True, help="use markdown format") 62 | parser.add_argument("-c", "--version", type=str, default="a", help="choose the prompt version") 63 | opt = parser.parse_args() 64 | data_path = opt.data_path 65 | prompt_path = opt.prompt_path 66 | save_path = opt.save_path 67 | to_markdown = opt.to_markdown 68 | version = opt.version 69 | 70 | with open(prompt_path, 'r', encoding='utf-8') as file: 71 | prompt = json.load(file) 72 | 73 | prompt_samples = dict() 74 | data_list = os.listdir(data_path) 75 | for data_file in data_list: 76 | df = pd.read_csv(os.path.join(data_path, data_file)) 77 | pid = data_file.split("#")[0] 78 | prompt_sample, label = generate_prompt(df, prompt, to_markdown, version) 79 | prompt_samples[pid] = dict() 80 | prompt_samples[pid]["prompt"] = prompt_sample 81 | prompt_samples[pid]["label"] = label 82 | 83 | with open(save_path, 'w', encoding='utf-8') as file: 84 | json.dump(prompt_samples, file, indent=4) 85 | 86 | print(f"Prompts are successfully generated at {save_path}") -------------------------------------------------------------------------------- /prompts/template_prompt.json: -------------------------------------------------------------------------------- 1 | { 2 | "prompt_start": "You are a data analyst helping a psychiatrist understand human activity data.", 3 | "prompt_task_describe": "Task: \nYou will be shown data gathered from a smartphone and smart watch worn by an individual. Your goal is to analyze this data.", 4 | "prompt_outline_complete": "You are presented with the following:\n1. A table consisting of twenty-eight days of collected activity tracking data [Collected Data] \n2. Explanation of the different types of data. [Description of Variables] \n3. DSM-5 Criteria for depression. [Depression Criteria] \n4. Instructions on how to analyze the data [Instructions]", 5 | "prompt_outline_explanation": "You are presented with the following:\n1. A table consisting of twenty-eight days of collected activity tracking data [Collected Data] \n2. Explanation of the different types of data. [Description of Variables] \n3. Instructions on how to analyze the data [Instructions]", 6 | "prompt_outline": "You are presented with the following:\n1. A table consisting of twenty-eight days of collected activity tracking data [Collected Data] \n2. Instructions on how to analyze the data [Instructions]", 7 | "data_col": "Collected Data \ndate|total_distance_traveled(meters)|time_at_home(minutes)|location_entropy|phone_screen_time(hours)|average_phone_use_unlock_duration(minutes)|unique_bluetooth_devices_found_nearby|step_count|number_of_sedentary_episodes|total_time_spent_sedentary(minutes)|total_time_spent_active(minutes)|total_time_asleep(hours)|total_time_spent_awake_while_in_bed(minutes)|time_to_gobed(hours)|time_to_wake(hours)|", 8 | "instructions": "Instructions \n 1. Based on the collected data, hypothesize about the overall mental health of the individual with detail explainations. \n 2. Make your best guess if this person is currently experiencing depression. Please answer only \"Yes\" or \"No\" in this question without any other notes", 9 | "variable_concept": "Description of variables \nFor the duration of the data collection period, data was logged from the individual’s smartphone and a fitness tracking smartwatch. An explanation of the different types of data logged is included below: \nDate: the data in this row was recorded on that date. \ntotal_Distance_traveled(meters): This represents the total distance traveled as measured by the GPS in the participant’s smartphone. This includes walking, driving, and any other modes of transportation. \ntime_at_home(minutes): This is the total time that the participant was at home as determined by their smartphone GPS. \nlocation_entropy: Location entropy measures the variety and frequency of locations (e.g., points-of-interest) visited. It captures the diversity of the locations visited by the user. \nphone_screen_time(minutes): This is the total time the participant’s smartphone was on and unlocked. \nAverage_phone_use_unlock_duration(minutes): This represents the average duration a person used their phone each time they unlocked it. \nphone_call_incoming_duration(minutes): Time spent in received phone calls. \nphone_call_outgoing_duration(minutes): Time spent in phone calls initiated by the person. \nunique_bluetooth_devices_found_nearby: The total number of unique Bluetooth devices detected nearby by the participant’s phone as they go about their day \nstep_count: Total number of steps taken as recorded by their fitness tracker. \nnumber_of_sedentary_episodes: Number of times the user was stationary as measured by their fitness tracker. \ntotal_time_spent_sedentary(minutes), Amount of time the user spent stationary as measured by their fitness tracker. \nnumber_of_activity_episodes: Number of times the user was moving around as measured by their fitness tracker. \ntotal_time_spent_active(minutes): Amount of time the user spent moving around as measured by their fitness tracker. \ntotal_time_asleep(minutes): Total time the user spent asleep as measured by their fitness tracker. \ntotal_time_spent_awake_while_in_bed(minutes): Total time the user spent in bed but awake as measured by their fitness tracker.", 10 | "dsm": "Depression Criteria \n Here is the DSM-5 Criteria for depression. The DSM-5 outlines the following criterion to make a diagnosis of depression. The individual must be experiencing five or more symptoms during the same 2-week period and at least one of the symptoms should be either (1) depressed mood or (2) loss of interest or pleasure. \n" 11 | } -------------------------------------------------------------------------------- /utils/basic_utils/utils_operation.py: -------------------------------------------------------------------------------- 1 | from typing import Dict 2 | import numpy as np 3 | import pandas as pd 4 | from copy import deepcopy 5 | import time 6 | from sklearn.preprocessing import RobustScaler 7 | 8 | def get_dict_element(d: Dict) -> np.ndarray: 9 | """Get the element of a dict as a numpy list 10 | 11 | Args: 12 | d (dictionary) 13 | 14 | Returns: 15 | np.ndarray: the list of the value of all keys in the dict 16 | """ 17 | return np.concatenate([v for k,v in d.items()]) 18 | 19 | def get_df_rows(df: pd.DataFrame, parameters: Dict) -> pd.DataFrame: 20 | """Get a subset of the rules by some columns 21 | 22 | Args: 23 | df (pd.Dataframe): A dataframe to be extracted 24 | parameters (dictionary): {col:value (can be a single or a list of value)} 25 | 26 | Returns: 27 | pd.Dataframe: A dataframe with the selected rows 28 | """ 29 | for col, val in parameters.items(): 30 | if (type(val) is list): 31 | df = df[df[col].isin(val)] 32 | else: 33 | df = df[df[col] == val] 34 | return df 35 | 36 | def discretize_tertiles(df: pd.DataFrame) -> pd.DataFrame: 37 | """Split the data into one to three levels 38 | 39 | Args: 40 | df (pd.DataFrame): Data frame 41 | 42 | Returns: 43 | pd.DataFrame: The data after discretization 44 | """ 45 | df_buf = deepcopy(df) 46 | for col in df.columns: 47 | try: 48 | df_buf[col] = pd.qcut(df[col], q=3, labels=["l", "m", "h"]) 49 | except Exception: 50 | try: 51 | # Cases where there are only two unique values 52 | df_buf[col] = pd.qcut(df[col], q=2, labels=["l", "h"]) 53 | except Exception: 54 | # Cases where everything is the same value 55 | df_buf[col] = pd.qcut(df[col], q=1, labels=["m"]) 56 | return df_buf 57 | 58 | def discretize_data(df:pd.DataFrame, grpbyid:str) -> pd.DataFrame: 59 | """discretize real-value data into three levels, based on certain group 60 | 61 | Args: 62 | df (pandas dataframe): data 63 | grpbyid (string): groupby column 64 | 65 | Returns: 66 | pd.DataFrame: data after descretization 67 | """ 68 | 69 | # from pandarallel import pandarallel 70 | # pandarallel.initialize(progress_bar = False, nb_workers = 8, verbose = 1) 71 | features = list(df.columns) 72 | features_dis = [f for f in features if df[f].dtype.kind in "bifc"] 73 | df_dis = df.groupby(grpbyid).apply(lambda x: discretize_tertiles( 74 | x[features_dis])).reset_index(level=-1, drop=True) 75 | # del pandarallel 76 | return df_dis 77 | 78 | def normalize_robust(df:pd.DataFrame, scaler: RobustScaler) -> pd.DataFrame: 79 | """Normalize the data using quantile, robust to outliers 80 | 81 | Args: 82 | df (pd.DataFrame): data to be normalized 83 | scaler (RobustScaler): sklearn.preprocessing.robustscaler 84 | 85 | Returns: 86 | pd.DataFrame: data after normalization 87 | """ 88 | df_buf = deepcopy(df) 89 | na_count = df_buf.isna().sum(axis = 0).values 90 | notfullna_idx = np.where(na_count < df_buf.shape[0])[0] 91 | cols = list(df_buf.columns[notfullna_idx]) 92 | scl = scaler.fit(df_buf[cols]) 93 | df_buf[cols] = scl.transform(df_buf[cols]) # [5%,95%] to [-1,1] 94 | df_buf[cols] = df_buf[cols].clip(lower = -2, upper = 2) # reduce outlier effect 95 | return df_buf 96 | 97 | 98 | def normalize_data(df, grpbyid, scaler = None) -> pd.DataFrame: 99 | """normalize real-value data, based on certain group 100 | 101 | Args: 102 | df (pandas dataframe): data 103 | grpbyid (string): groupby column 104 | 105 | Returns: 106 | pd.DataFrame: data after normalization 107 | """ 108 | if scaler is None: 109 | scaler = RobustScaler(quantile_range = (5,95), unit_variance = False) 110 | features = list(df.columns) 111 | features_norm = [f for f in features if df[f].dtype.kind in "bifc"] 112 | df_norm = df.groupby(grpbyid).apply(lambda x: normalize_robust( 113 | x[features_norm],scaler)).reset_index(level=-1, drop=True) 114 | return df_norm 115 | 116 | 117 | def func_timer(func, repeat = 10): 118 | """Time a function 119 | 120 | Args: 121 | func : The target function 122 | repeat (int, optional): the number of repetition. Defaults to 10. 123 | """ 124 | start = time.time() 125 | for _ in range(repeat): 126 | func() 127 | end = time.time() 128 | print("avg time: ", (end - start) / repeat) 129 | 130 | def isnotebook() -> bool: 131 | """judge whether the script is ran in a notebook 132 | 133 | Returns: 134 | bool: whether it is in notebook 135 | """ 136 | try: 137 | from IPython import get_ipython 138 | shell = get_ipython().__class__.__name__ 139 | if shell == 'ZMQInteractiveShell': 140 | return True # Jupyter notebook or qtconsole 141 | elif shell == 'TerminalInteractiveShell': 142 | return False # Terminal running IPython 143 | else: 144 | return False # Other type (?) 145 | except NameError: 146 | return False # Probably standard Python interpreter -------------------------------------------------------------------------------- /config/README.md: -------------------------------------------------------------------------------- 1 | # Introduction of Configuration Module 2 | 3 | This is a short introduction of the configuration module. As mentioned in the main [README](../README.md), this module provides the flexibility of controlling different parameters in the other modules. 4 | 5 | The platform leverages a [`global_config.yaml`](./global_config.yaml) to set a small number of parameters that can be widely applied to multiple models. In addition, each model has its unique config file to enable custom adjustment. 6 | 7 | ## Global config 8 | 9 | The [`global_config.yaml`](./global_config.yaml) determines the following setups: 10 | - `all` - applying to all models 11 | - `prediction_tasks`: a list that includes all supported model prediction tasks 12 | - `ds_keys`: a list of dataset keys involved in the evaluation 13 | - `flag_more_feat_types`: whether to use additional feature types. Currently only can be `True` when `ds_keys` only contains `INS-W`. 14 | - `ml` - applying to traditional models 15 | - `save_and_reload`: a flag to indicate whether to save and re-use features repetitively (intermediate files will be saved in `tmp` folder). Default `False`. Be careful when turning this flag on, as it will not update the feature file once it is saved. Set it to `True` only when re-running the exact same algorithm. 16 | - `dl` - applying to deep models 17 | - `best_epoch_strategy`: a flag to choose the best training epoch as the final prediction model: `direct` or `on_test`. 18 | When it is set as `direct`, it will use a standard strategy: picking the best training epoch on the validation/training set. 19 | When it is set as `on_test`, it will use another strategy that involves information leakage. It iterates through all training epochs, and performs the same `direct` strategy at each epoch. Then, the results on the testing set across all epochs are compared to identify the best epoch. The results only indicate whether a model is overfitted, and reflect the theoretical upper bound performance during the training. 20 | - `skip_training`: similar to `save_and_reload` in `ml`, this is a flag to accelerate the deep model evaluation process. A model's intermediate training epoch results will be saved in `tmp` folder. When this flag is turned on, the model can leverage the saved results to re-identify the best epoch. A typical usage case: (1) set `skip_training` as `False` and `best_epoch_strategy` as `direct` to go through the training. (2) set `skip_training` as `True` and `best_epoch_strategy` as `on_test` to find another epoch without the need to re-train the model. 21 | 22 | It is worth noting that `global_config.yaml` will overwrite the individual config files on the same items. This can save the effort of changing individual parameters one by one. 23 | 24 | ## Model config 25 | 26 | Each algorithm can lead to one or more models, and each model is accompanied by one config yaml file with a unique name. 27 | 28 | Here is a list of the current supported models: 29 | - Traditional Machine Learning Model 30 | - [Canzian *et al.*](../algorithm/ml_canzian.py) - [`ml_canzian.yaml`](./ml_canzian.yaml) 31 | - [Saeb *et al.*](../algorithm/ml_saeb.py) - [`ml_saeb.yaml`](./ml_saeb.yaml) 32 | - [Farhan *et al.*](../algorithm/ml_farhan.py) - [`ml_farhan.yaml`](./ml_farhan.yaml) 33 | - [Wahle *et al.*](../algorithm/ml_wahle.py) - [`ml_wahle.yaml`](./ml_wahle.yaml) 34 | - [Lu *et al.*](../algorithm/ml_lu.py) - [`ml_lu.yaml`](./ml_lu.yaml) 35 | - [Wang *et al.*](../algorithm/ml_wang.py) - [`ml_wang.yaml`](./ml_wang.yaml) 36 | - [Xu *et al.* - Interpretable](../algorithm/ml_xu_interpretable.py) - [`ml_xu_interpretable.yaml`](./ml_xu_interpretable.yaml) 37 | - [Xu *et al.* - Personalized](../algorithm/ml_xu_personalized.py) - [`ml_xu_personalized.yaml`](./ml_xu_personalized.yaml) 38 | - [Chikersal *et al.*](../algorithm/ml_chikersal.py) - [`ml_chikersal.yaml`](./ml_chikersal.yaml) 39 | - Deep-learning Model 40 | - ERM 41 | - [ERM-1D-CNN](../algorithm/dl_erm.py) - [`dl_erm_1dCNN.yaml`](./dl_erm_1dCNN.yaml) 42 | - [ERM-2D-CNN](../algorithm/dl_erm.py) - [`dl_erm_2dCNN.yaml`](./dl_erm_2dCNN.yaml) 43 | - [ERM-LSTM](../algorithm/dl_erm.py) - [`dl_erm_LSTM.yaml`](./dl_erm_LSTM.yaml) 44 | - [ERM-Transformer](../algorithm/dl_erm.py) - [`dl_erm_Transformer.yaml`](./dl_erm_Transformer.yaml) 45 | - [Mixup](../algorithm/dl_erm.py) - [`dl_erm_mixup.yaml`](./dl_erm_mixup.yaml) 46 | - DANN 47 | - [DANN - Dataset as Domain](../algorithm/dl_dann.py) - [`dl_dann_ds_as_domain.yaml`](./dl_dann_ds_as_domain.yaml) 48 | - [DANN - Person as Domain](../algorithm/dl_dann.py) - [`dl_dann_person_as_domain.yaml`](./dl_dann_person_as_domain.yaml) 49 | - [IRM](../algorithm/dl_irm.py) - [`dl_irm.yaml`](./dl_irm.yaml) 50 | - CSD 51 | - [CSD - Dataset as Domain](../algorithm/dl_csd.py) - [`dl_csd_ds_as_domain.yaml`](./dl_csd_ds_as_domain.yaml) 52 | - [CSD - Person as Domain](../algorithm/dl_csd.py) - [`dl_csd_person_as_domain.yaml`](./dl_csd_person_as_domain.yaml) 53 | - MLDG 54 | - [MLDG - Dataset as Domain](../algorithm/dl_mldg.py) - [`dl_mldg_ds_as_domain.yaml`](./dl_mldg_ds_as_domain.yaml) 55 | - [MLDG - Person as Domain](../algorithm/dl_mldg.py) - [`dl_mldg_person_as_domain.yaml`](./dl_mldg_person_as_domain.yaml) 56 | - MASF 57 | - [MASF - Dataset as Domain](../algorithm/dl_masf.py) - [`dl_masf_ds_as_domain.yaml`](./dl_masf_ds_as_domain.yaml) 58 | - [MASF - Person as Domain](../algorithm/dl_masf.py) - [`dl_masf_person_as_domain.yaml`](./dl_masf_person_as_domain.yaml) 59 | - [Siamese](../algorithm/dl_siamese.py) - [`dl_siamese.yaml`](./dl_siamese.yaml) 60 | - [Clustering](../algorithm/dl_clustering.py) - [`dl_clustering.yaml`](./dl_clustering.yaml) 61 | - [Reorder](../algorithm/dl_reorder.py) - [`dl_reorder.yaml`](./dl_reorder.yaml) -------------------------------------------------------------------------------- /generate_sliced_data.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pandas as pd 3 | import numpy as np 4 | import argparse 5 | from data_loader import data_loader_ml 6 | 7 | def delete_folder(folder_path): 8 | try: 9 | # Use os.rmdir() to remove an empty folder 10 | os.rmdir(folder_path) 11 | # print(f"Folder '{folder_path}' has been deleted successfully.") 12 | except OSError as e: 13 | # If the folder is not empty, use os.remove() to delete its contents first 14 | if e.errno == 39: 15 | for root, dirs, files in os.walk(folder_path): 16 | for file in files: 17 | os.remove(os.path.join(root, file)) 18 | os.rmdir(folder_path) 19 | # print(f"Folder '{folder_path}' and its contents have been deleted successfully.") 20 | else: 21 | print(f"Error: {e}") 22 | 23 | 24 | title_mapping = { 25 | "date":"date", 26 | 'f_loc:phone_locations_doryab_totaldistance:allday':"total_distance_traveled(meters)", 27 | 'f_loc:phone_locations_doryab_timeathome:allday':"time_at_home(minutes)", 28 | 'f_loc:phone_locations_doryab_locationentropy:allday':"location_entropy", 29 | 'f_screen:phone_screen_rapids_sumdurationunlock:allday':"phone_screen_time(minutes)", 30 | 'f_screen:phone_screen_rapids_avgdurationunlock:allday':"average_phone_use_unlock_duration(minutes)", 31 | 'f_call:phone_calls_rapids_incoming_sumduration:allday':"phone_call_incoming_duration(minutes)", 32 | 'f_call:phone_calls_rapids_outgoing_sumduration:allday':"phone_call_outgoing_duration(minutes)", 33 | 'f_blue:phone_bluetooth_doryab_uniquedevicesothers:allday':"unique_bluetooth_devices_found_nearby", 34 | 'f_steps:fitbit_steps_intraday_rapids_sumsteps:allday':"step_count", 35 | 'f_steps:fitbit_steps_intraday_rapids_countepisodesedentarybout:allday':"number_of_sedentary_episodes", 36 | 'f_steps:fitbit_steps_intraday_rapids_sumdurationsedentarybout:allday':"total_time_spent_sedentary(minutes)", 37 | 'f_steps:fitbit_steps_intraday_rapids_countepisodeactivebout:allday':"number_of_activity_episodes", 38 | 'f_steps:fitbit_steps_intraday_rapids_sumdurationactivebout:allday':"total_time_spent_active(minutes)", 39 | 'f_slp:fitbit_sleep_intraday_rapids_sumdurationasleepunifiedmain:allday':"total_time_asleep(minutes)", 40 | 'f_slp:fitbit_sleep_intraday_rapids_sumdurationawakeunifiedmain:allday':"total_time_spent_awake_while_in_bed(minutes)", 41 | # 'f_slp:fitbit_sleep_summary_rapids_firstbedtimemain:allday':"bedtime(minutes)", 42 | # 'f_slp:fitbit_sleep_summary_rapids_firstwaketimemain:allday':'wakebedtime(minutes)', 43 | } 44 | 45 | round_list = ['f_loc:phone_locations_doryab_totaldistance:allday', 'f_loc:phone_locations_doryab_timeathome:allday', 'f_screen:phone_screen_rapids_sumdurationunlock:allday', 46 | 'f_screen:phone_screen_rapids_avgdurationunlock:allday', 'f_call:phone_calls_rapids_incoming_sumduration:allday', 'f_call:phone_calls_rapids_outgoing_sumduration:allday', 47 | 'f_steps:fitbit_steps_intraday_rapids_sumsteps:allday', 'f_steps:fitbit_steps_intraday_rapids_countepisodesedentarybout:allday', 'f_steps:fitbit_steps_intraday_rapids_sumdurationsedentarybout:allday', 48 | 'f_steps:fitbit_steps_intraday_rapids_countepisodeactivebout:allday', 'f_steps:fitbit_steps_intraday_rapids_sumdurationactivebout:allday', 'f_slp:fitbit_sleep_intraday_rapids_sumdurationasleepunifiedmain:allday', 49 | 'f_slp:fitbit_sleep_intraday_rapids_sumdurationawakeunifiedmain:allday'] 50 | 51 | pd.set_option('display.float_format', '{:.2f}'.format) 52 | placeholder = -1000 53 | 54 | if __name__ == "__main__": 55 | 56 | parser = argparse.ArgumentParser(description="settings") 57 | parser.add_argument("-d", "--save_path", type=str, default="./tmp_data", help="temp data folder") 58 | parser.add_argument("-p", "--pickle_path", type=str, default="./tmp_data", help="pickel folder") 59 | opt = parser.parse_args() 60 | save_path = opt.save_path 61 | pickles_path = opt.pickle_path 62 | tmp_path = "./tmp" 63 | if not os.path.exists(tmp_path): 64 | os.makedirs(tmp_path) 65 | 66 | if not os.path.exists(save_path): 67 | os.makedirs(save_path) 68 | 69 | pids = set() 70 | for pickle_path in os.listdir(pickles_path): 71 | if "1" in pickle_path: 72 | continue 73 | dataset_dict = data_loader_ml.data_loader_raw_single(os.path.join(pickles_path, pickle_path)) 74 | feature_list = list(title_mapping.keys()) 75 | 76 | for index, row in dataset_dict.datapoints.iterrows(): 77 | df = row["X_raw"] 78 | pid = row["pid"] 79 | date = row["date"] 80 | date = date.strftime('%Y-%m-%d %H:%M:%S') 81 | df = df[feature_list] 82 | df = df.round(2) 83 | df[round_list] = df[round_list].round(0) 84 | df = df.rename(columns=title_mapping) 85 | total_elements = df.size 86 | total_na = df.isna().sum().sum() 87 | if (total_na / total_elements) > 0.2: 88 | continue 89 | phq_list = [np.nan] * df.shape[0] 90 | phq_list[-1] = row["y_allraw"]["phq4"] 91 | anxiety_list = [np.nan] * df.shape[0] 92 | anxiety_list[-1] = row["y_allraw"]["anx_weekly_subscale"] 93 | depress_sub_list = [np.nan] * df.shape[0] 94 | depress_sub_list[-1] = row["y_allraw"]["dep_weekly_subscale"] 95 | depress_list = [np.nan] * df.shape[0] 96 | depress_list[-1] = row['y_raw'] 97 | df['phq4'] = phq_list 98 | df['depression'] = depress_list 99 | df['anxiety_sub'] = anxiety_list 100 | df['depress_sub'] = depress_sub_list 101 | 102 | file_name = os.path.join(tmp_path, pid + "_" + date[:10] + ".csv") 103 | 104 | df.to_csv(file_name, index=False) 105 | pids.add(pid) 106 | 107 | tmp_file_list = sorted(os.listdir(tmp_path)) 108 | for pid in pids: 109 | dfs = list() 110 | for i in range(len(tmp_file_list)): 111 | if pid in tmp_file_list[i]: 112 | tmp_df = pd.read_csv(os.path.join(tmp_path, tmp_file_list[i])) 113 | dfs.append(tmp_df) 114 | if len(dfs) < 2: 115 | continue 116 | df = pd.concat(dfs) 117 | df.to_csv(os.path.join(save_path, pid + ".csv"), index=False) 118 | 119 | delete_folder(tmp_path) 120 | -------------------------------------------------------------------------------- /config/dl_feat_prep.yaml: -------------------------------------------------------------------------------- 1 | name: "dl_feat_prep" 2 | 3 | feature_definition: 4 | feature_type: "both" # options: "norm", "nonorm", "both". 5 | 6 | feature_list: [ 7 | "f_screen:phone_screen_rapids_countepisodeunlock_locmap_exercise:afternoon", 8 | "f_screen:phone_screen_rapids_countepisodeunlock_locmap_greens:night", 9 | "f_screen:phone_screen_rapids_firstuseafter00unlock_locmap_exercise:allday", 10 | "f_screen:phone_screen_rapids_firstuseafter00unlock_locmap_exercise:night", 11 | "f_screen:phone_screen_rapids_firstuseafter00unlock_locmap_home:night", 12 | "f_screen:phone_screen_rapids_maxdurationunlock_locmap_exercise:evening", 13 | "f_screen:phone_screen_rapids_maxdurationunlock_locmap_greens:allday", 14 | "f_screen:phone_screen_rapids_maxdurationunlock_locmap_greens:night", 15 | "f_screen:phone_screen_rapids_maxdurationunlock_locmap_home:night", 16 | "f_screen:phone_screen_rapids_maxdurationunlock_locmap_study:evening", 17 | "f_screen:phone_screen_rapids_mindurationunlock_locmap_exercise:evening", 18 | "f_screen:phone_screen_rapids_mindurationunlock_locmap_exercise:morning", 19 | "f_screen:phone_screen_rapids_stddurationunlock_locmap_exercise:evening", 20 | "f_screen:phone_screen_rapids_stddurationunlock_locmap_home:night", 21 | "f_screen:phone_screen_rapids_sumdurationunlock_locmap_exercise:night", 22 | "f_screen:phone_screen_rapids_sumdurationunlock_locmap_home:night", 23 | "f_screen:phone_screen_rapids_sumdurationunlock_locmap_living:night", 24 | "f_slp:fitbit_sleep_intraday_rapids_maxdurationasleepunifiedmain:night", 25 | "f_slp:fitbit_sleep_intraday_rapids_maxdurationawakeunifiedmain:night", 26 | "f_slp:fitbit_sleep_intraday_rapids_mindurationasleepunifiedmain:afternoon", 27 | "f_slp:fitbit_sleep_intraday_rapids_mindurationasleepunifiedmain:night", 28 | "f_slp:fitbit_sleep_intraday_rapids_sumdurationasleepunifiedmain:allday", 29 | "f_slp:fitbit_sleep_summary_rapids_avgdurationasleepmain:allday", 30 | "f_slp:fitbit_sleep_summary_rapids_avgdurationinbedmain:allday", 31 | "f_slp:fitbit_sleep_summary_rapids_lastbedtimemain:allday", 32 | "f_slp:fitbit_sleep_summary_rapids_sumdurationasleepmain:allday", 33 | "f_slp:fitbit_sleep_summary_rapids_sumdurationinbedmain:allday", 34 | "f_steps:fitbit_steps_intraday_rapids_countepisodesedentarybout:evening", 35 | "f_steps:fitbit_steps_intraday_rapids_maxdurationactivebout:morning", 36 | "f_steps:fitbit_steps_intraday_rapids_maxsteps:evening", 37 | "f_steps:fitbit_steps_intraday_rapids_mindurationactivebout:evening", 38 | 39 | "f_loc:phone_locations_doryab_locationentropy_norm:afternoon", 40 | "f_loc:phone_locations_doryab_loglocationvariance_norm:allday", 41 | "f_loc:phone_locations_doryab_minlengthstayatclusters_norm:afternoon", 42 | "f_loc:phone_locations_doryab_outlierstimepercent_norm:morning", 43 | "f_loc:phone_locations_doryab_radiusgyration_norm:allday", 44 | "f_screen:phone_screen_rapids_avgdurationunlock_locmap_exercise_norm:evening", 45 | "f_screen:phone_screen_rapids_avgdurationunlock_locmap_greens_norm:night", 46 | "f_screen:phone_screen_rapids_firstuseafter00unlock_locmap_exercise_norm:allday", 47 | "f_screen:phone_screen_rapids_maxdurationunlock_locmap_exercise_norm:afternoon", 48 | "f_screen:phone_screen_rapids_maxdurationunlock_locmap_exercise_norm:evening", 49 | "f_screen:phone_screen_rapids_maxdurationunlock_locmap_exercise_norm:night", 50 | "f_screen:phone_screen_rapids_maxdurationunlock_locmap_greens_norm:evening", 51 | "f_screen:phone_screen_rapids_sumdurationunlock_locmap_home_norm:morning", 52 | "f_slp:fitbit_sleep_intraday_rapids_maxdurationawakeunifiedmain_norm:afternoon", 53 | "f_slp:fitbit_sleep_intraday_rapids_ratiodurationasleepunifiedwithinmain_norm:afternoon", 54 | "f_slp:fitbit_sleep_intraday_rapids_ratiodurationawakeunifiedwithinmain_norm:afternoon", 55 | "f_steps:fitbit_steps_intraday_rapids_countepisodesedentarybout_norm:afternoon", 56 | "f_steps:fitbit_steps_intraday_rapids_maxdurationsedentarybout_norm:night", 57 | "f_steps:fitbit_steps_intraday_rapids_stdsteps_norm:morning", 58 | ] 59 | 60 | feature_list_more_feat_types: [ 61 | "f_screen:phone_screen_rapids_countepisodeunlock_locmap_exercise:afternoon", 62 | "f_screen:phone_screen_rapids_countepisodeunlock_locmap_greens:night", 63 | "f_screen:phone_screen_rapids_firstuseafter00unlock_locmap_exercise:allday", 64 | "f_screen:phone_screen_rapids_firstuseafter00unlock_locmap_exercise:night", 65 | "f_screen:phone_screen_rapids_firstuseafter00unlock_locmap_home:night", 66 | "f_screen:phone_screen_rapids_maxdurationunlock_locmap_exercise:evening", 67 | "f_screen:phone_screen_rapids_maxdurationunlock_locmap_greens:allday", 68 | "f_screen:phone_screen_rapids_maxdurationunlock_locmap_greens:night", 69 | "f_screen:phone_screen_rapids_maxdurationunlock_locmap_home:night", 70 | "f_screen:phone_screen_rapids_maxdurationunlock_locmap_study:evening", 71 | "f_screen:phone_screen_rapids_mindurationunlock_locmap_exercise:evening", 72 | "f_screen:phone_screen_rapids_mindurationunlock_locmap_exercise:morning", 73 | "f_screen:phone_screen_rapids_stddurationunlock_locmap_exercise:evening", 74 | "f_screen:phone_screen_rapids_stddurationunlock_locmap_home:night", 75 | "f_screen:phone_screen_rapids_sumdurationunlock_locmap_exercise:night", 76 | "f_screen:phone_screen_rapids_sumdurationunlock_locmap_home:night", 77 | "f_screen:phone_screen_rapids_sumdurationunlock_locmap_living:night", 78 | "f_slp:fitbit_sleep_intraday_rapids_maxdurationasleepunifiedmain:night", 79 | "f_slp:fitbit_sleep_intraday_rapids_maxdurationawakeunifiedmain:night", 80 | "f_slp:fitbit_sleep_intraday_rapids_mindurationasleepunifiedmain:afternoon", 81 | "f_slp:fitbit_sleep_intraday_rapids_mindurationasleepunifiedmain:night", 82 | "f_slp:fitbit_sleep_intraday_rapids_sumdurationasleepunifiedmain:allday", 83 | "f_slp:fitbit_sleep_summary_rapids_avgdurationasleepmain:allday", 84 | "f_slp:fitbit_sleep_summary_rapids_avgdurationinbedmain:allday", 85 | "f_slp:fitbit_sleep_summary_rapids_lastbedtimemain:allday", 86 | "f_slp:fitbit_sleep_summary_rapids_sumdurationasleepmain:allday", 87 | "f_slp:fitbit_sleep_summary_rapids_sumdurationinbedmain:allday", 88 | "f_steps:fitbit_steps_intraday_rapids_countepisodesedentarybout:evening", 89 | "f_steps:fitbit_steps_intraday_rapids_maxdurationactivebout:morning", 90 | "f_steps:fitbit_steps_intraday_rapids_maxsteps:evening", 91 | "f_steps:fitbit_steps_intraday_rapids_mindurationactivebout:evening", 92 | "f_blue:phone_bluetooth_doryab_uniquedevicesown:night", 93 | "f_call:phone_calls_rapids_incoming_distinctcontacts:afternoon", 94 | 95 | "f_loc:phone_locations_doryab_locationentropy_norm:afternoon", 96 | "f_loc:phone_locations_doryab_loglocationvariance_norm:allday", 97 | "f_loc:phone_locations_doryab_minlengthstayatclusters_norm:afternoon", 98 | "f_loc:phone_locations_doryab_outlierstimepercent_norm:morning", 99 | "f_loc:phone_locations_doryab_radiusgyration_norm:allday", 100 | "f_screen:phone_screen_rapids_avgdurationunlock_locmap_exercise_norm:evening", 101 | "f_screen:phone_screen_rapids_avgdurationunlock_locmap_greens_norm:night", 102 | "f_screen:phone_screen_rapids_firstuseafter00unlock_locmap_exercise_norm:allday", 103 | "f_screen:phone_screen_rapids_maxdurationunlock_locmap_exercise_norm:afternoon", 104 | "f_screen:phone_screen_rapids_maxdurationunlock_locmap_exercise_norm:evening", 105 | "f_screen:phone_screen_rapids_maxdurationunlock_locmap_exercise_norm:night", 106 | "f_screen:phone_screen_rapids_maxdurationunlock_locmap_greens_norm:evening", 107 | "f_screen:phone_screen_rapids_sumdurationunlock_locmap_home_norm:morning", 108 | "f_slp:fitbit_sleep_intraday_rapids_maxdurationawakeunifiedmain_norm:afternoon", 109 | "f_slp:fitbit_sleep_intraday_rapids_ratiodurationasleepunifiedwithinmain_norm:afternoon", 110 | "f_slp:fitbit_sleep_intraday_rapids_ratiodurationawakeunifiedwithinmain_norm:afternoon", 111 | "f_steps:fitbit_steps_intraday_rapids_countepisodesedentarybout_norm:afternoon", 112 | "f_steps:fitbit_steps_intraday_rapids_maxdurationsedentarybout_norm:night", 113 | "f_steps:fitbit_steps_intraday_rapids_stdsteps_norm:morning", 114 | "f_blue:phone_bluetooth_doryab_countscansmostfrequentdeviceacrossdatasetown_norm:night", 115 | "f_call:phone_calls_rapids_outgoing_count_norm:afternoon", 116 | ] 117 | 118 | -------------------------------------------------------------------------------- /utils/basic_utils/utils_ml.py: -------------------------------------------------------------------------------- 1 | from cProfile import label 2 | from typing import Dict 3 | from sklearn.model_selection import LeaveOneOut, LeaveOneGroupOut, GroupKFold 4 | from sklearn.feature_selection import mutual_info_classif 5 | from sklearn.metrics import confusion_matrix, roc_curve, roc_auc_score, classification_report, accuracy_score 6 | from sklearn import tree, svm, ensemble, linear_model, neural_network, neighbors 7 | import numpy as np 8 | from copy import deepcopy 9 | import pandas as pd 10 | 11 | def results_report_sklearn_multiclass(clf, X, y) -> Dict: 12 | """Results report function that is compatible with sklearn's cross_validate function - multi classification 13 | 14 | Args: 15 | clf (BaseEstimator): classifier that with .predict() and .predict_proba() API 16 | X (pd.DataFrame or np.ndarray): input data 17 | y (list or pd.Series or np.ndarray or ): label 18 | 19 | Returns: 20 | Dict: results dictionary 21 | """ 22 | y_pred = clf.predict(X) 23 | try: 24 | y_pred_prob = clf.predict_proba(X) 25 | flag_rocauc = True 26 | except: 27 | flag_rocauc = False 28 | y_labels = sorted(list(set(y))) 29 | results_dict = classification_report(y_true = y, y_pred = y_pred, output_dict=True, zero_division = 0, labels = y_labels) 30 | 31 | cfmtx = confusion_matrix(y, y_pred, labels=y_labels) 32 | acc = cfmtx.diagonal()/cfmtx.sum(axis=1) 33 | 34 | if (flag_rocauc): 35 | rocauc = roc_auc_score(y_true = y, y_score = y_pred_prob, multi_class='ovr') 36 | 37 | results_dict_new = {} 38 | count = 0 39 | for k, v in results_dict.items(): 40 | if (type(v) is dict): 41 | for kk, vv in v.items(): 42 | results_dict_new[f"{kk}#{k}"] = vv 43 | if (k in y_labels): 44 | results_dict_new[f"acc#{k}"] = acc[count] 45 | count += 1 46 | else: 47 | results_dict_new[k] = v 48 | results_dict_new["rocauc"] = rocauc 49 | return results_dict_new 50 | 51 | def results_report_sklearn(clf, X, y, return_confusion_mtx=False) -> Dict: 52 | """Results report function that is compatible with sklearn's cross_validate function - binary classification 53 | 54 | Args: 55 | clf (BaseEstimator): classifier that with .predict() and .predict_proba() API 56 | X (pd.DataFrame or np.ndarray): input data 57 | y (list or pd.Series or np.ndarray or ): label 58 | return_confusion_mtx (bool, optional): whether to include confusion matrix in the results. Defaults to False. 59 | 60 | Returns: 61 | Dict: results dictionary 62 | """ 63 | results_dict = results_report(y_test = y, y_pred = clf.predict(X), labels = None, 64 | verbose = False, return_confusion_mtx = return_confusion_mtx) 65 | if (len(set(y)) == 1): 66 | results_dict["roc_auc"] = results_dict["balanced_acc"] 67 | else: 68 | roc_auc = roc_auc_score(y_true=y, y_score = clf.predict_proba(X)[:,1]) 69 | results_dict["roc_auc"] = roc_auc 70 | return results_dict 71 | 72 | def results_report_sklearn_noprob(clf, X, y, return_confusion_mtx=False) -> Dict: 73 | """Results report function that is compatible with sklearn's cross_validate function - binary classification 74 | Don't have to support .predict_proba() API 75 | 76 | Args: 77 | clf (BaseEstimator): classifier that with .predict() API 78 | X (pd.DataFrame or np.ndarray): input data 79 | y (list or pd.Series or np.ndarray or ): label 80 | return_confusion_mtx (bool, optional): whether to include confusion matrix in the results. Defaults to False. 81 | 82 | Returns: 83 | Dict: results dictionary 84 | """ 85 | results_dict = results_report(y_test = y, y_pred = clf.predict(X), labels = None, 86 | verbose = False, return_confusion_mtx = return_confusion_mtx) 87 | return results_dict 88 | 89 | def results_report(y_test = None, y_pred = None, 90 | confusion_mtx = None, verbose=True, labels = [0,1], return_confusion_mtx=False): 91 | """Report a number of metrics of binary classification results 92 | 93 | Parameters 94 | ---------- 95 | y_test : list or numpy array 96 | Ground truth list 97 | y_pred : list or numpy array 98 | Prediction list, should be the same as y_test 99 | confusion_mtx : list or numpy 2x2 array, optional, if provided, will not use y_test/y_pred 100 | Confusion Matrix 101 | verbose : bool, optional 102 | Whether show the details 103 | labels: list, optional 104 | The value of y 105 | return_confusion_mtx: bool, optional 106 | Whether to return confusion_mtx 107 | 108 | Returns 109 | ------- 110 | dictionary 111 | Results dict 112 | """ 113 | if (confusion_mtx is None): 114 | try: 115 | confusion_mtx = confusion_matrix(y_true = y_test, y_pred = y_pred, labels = [False,True]) 116 | except: 117 | confusion_mtx = confusion_matrix(y_true = y_test, y_pred = y_pred, labels = labels) 118 | else: 119 | confusion_mtx = np.array(confusion_mtx) 120 | 121 | tn = confusion_mtx[0][0] 122 | fp = confusion_mtx[0][1] 123 | fn = confusion_mtx[1][0] 124 | tp = confusion_mtx[1][1] 125 | 126 | acc, rec, pre, f1 = acc_rec_pre_f1_calc(tp=tp, fp=fp, fn=fn, tn=tn) 127 | _acc, _rec, _pre, f1_neg = acc_rec_pre_f1_calc(tp=tn, fp=fn, fn=fp, tn=tp) 128 | p = tp + fn 129 | n = fp + tn 130 | ssum = p + n 131 | 132 | sens = rec 133 | if (n == 0): 134 | spec = 1 135 | else: 136 | spec = tn / n 137 | balanced_acc = (sens + spec) / 2 138 | 139 | if (((tn + fp) == 0) or ((tn + fn) == 0) or ((tp + fp) == 0) or ((tp + fn) == 0)): 140 | mcc = 1 141 | else: 142 | mcc = (tn * tp - fp * fn) / np.sqrt((tn + fp) * (tn + fn) * (tp + fp) * (tp + fn)) 143 | 144 | p_yes = (tn+fp)*(tn+fn)/ (ssum**2) 145 | p_no = (fn+tp)*(fp+tp)/ (ssum**2) 146 | pp = p_yes + p_no 147 | if (pp == 1): 148 | kappa = 1 149 | else: 150 | kappa = (acc - pp) / (1 - pp) 151 | 152 | results = {"acc": acc, 153 | "balanced_acc": balanced_acc, 154 | "pre": pre, 155 | "rec": rec, 156 | "f1": f1, 157 | "f1_neg":f1_neg, 158 | "mcc": mcc, 159 | "kappa": kappa 160 | } 161 | cfmtx = [[tp,fn],[fp,tn]] 162 | if return_confusion_mtx: 163 | results.update({"cfmtx": cfmtx}) 164 | if (verbose): 165 | results_string = \ 166 | "acc:{:.3f},balacc:{:.3f},pre:{:.3f},rec:{:.3f},f1:{:.3f},f1_neg:{:.3f},mcc:{:.3f},kappa:{:.3f}".\ 167 | format(acc, balanced_acc, pre, rec, f1, f1_neg, mcc, kappa) + ",cfmtx:" + str(cfmtx) 168 | print(results_string) 169 | return results 170 | 171 | 172 | def get_clf(clf_type, parameters, direct_param_flag = False): 173 | """A helper function to get the sklearn classifier. This function can be extended anytime 174 | 175 | Args: 176 | clf_type (str): classifier type, currently support adaboost, svm, rf, dt, lr, mlp, knn 177 | parameters (dict): parameter dict, with the necessary param stored as param_name:param_value 178 | direct_param_flag (bool, optional): Whether to directly passing parameters. Defaults to False. 179 | 180 | Raises: 181 | Exception: unsupported model type 182 | 183 | Returns: 184 | BaseEstimator: classifier 185 | """ 186 | 187 | if (direct_param_flag): 188 | if (clf_type == "adaboost"): 189 | clf = ensemble.AdaBoostClassifier(**parameters) 190 | elif (clf_type == "svm"): 191 | clf = svm.SVC(**parameters) 192 | elif (clf_type == "rf"): 193 | clf = ensemble.RandomForestClassifier(**parameters) 194 | elif (clf_type == "dt"): 195 | clf = tree.DecisionTreeClassifier(**parameters) 196 | elif (clf_type == "lr"): 197 | clf = linear_model.LogisticRegression(**parameters) 198 | elif (clf_type == "mlp"): 199 | clf = neural_network.MLPClassifier(**parameters) 200 | elif (clf_type == "knn"): 201 | clf = neighbors.KNeighborsClassifier(**parameters) 202 | else: 203 | raise Exception("Sorry. clf_type is not supported.") 204 | else: 205 | if (clf_type == "adaboost"): 206 | if ("max_leaf_nodes" not in parameters): 207 | clf = ensemble.AdaBoostClassifier(n_estimators = parameters["n_estimators"], 208 | base_estimator = tree.DecisionTreeClassifier( 209 | max_depth= parameters["max_depth"]), 210 | learning_rate = 1 if "learning_rate" not in parameters else parameters["learning_rate"], 211 | random_state = None if "random_state" not in parameters else parameters["random_state"]) 212 | else: 213 | clf = ensemble.AdaBoostClassifier(n_estimators = parameters["n_estimators"], 214 | base_estimator = tree.DecisionTreeClassifier( 215 | max_leaf_nodes = parameters["max_leaf_nodes"]), 216 | learning_rate = 1 if "learning_rate" not in parameters else parameters["learning_rate"], 217 | random_state = None if "random_state" not in parameters else parameters["random_state"]) 218 | elif (clf_type == "svm"): 219 | clf = svm.SVC(kernel='rbf', C=parameters["C"]) 220 | elif (clf_type == "rf"): 221 | if ("max_leaf_nodes" not in parameters): 222 | clf = ensemble.RandomForestClassifier(n_estimators=parameters["n_estimators"], 223 | max_depth = parameters["max_depth"], random_state = None if "random_state" not in parameters else parameters["random_state"]) 224 | else: 225 | clf = ensemble.RandomForestClassifier(n_estimators=parameters["n_estimators"], 226 | max_leaf_nodes = parameters["max_leaf_nodes"], random_state = None if "random_state" not in parameters else parameters["random_state"]) 227 | elif (clf_type == "dt"): 228 | if ("max_leaf_nodes" not in parameters): 229 | clf = tree.DecisionTreeClassifier(max_depth = parameters["max_depth"]) 230 | else: 231 | clf = tree.DecisionTreeClassifier(max_leaf_nodes = parameters["max_leaf_nodes"]) 232 | elif (clf_type == "lr"): 233 | if (parameters["penalty"] == "elasticnet"): 234 | clf = linear_model.LogisticRegression(penalty = parameters["penalty"], l1_ratio = parameters["l1_ratio"], solver = "saga", C = parameters["C"]) 235 | else: 236 | clf = linear_model.LogisticRegression(penalty = parameters["penalty"], C = parameters["C"]) 237 | elif (clf_type == "mlp"): 238 | clf = neural_network.MLPClassifier(hidden_layer_sizes = parameters["hidden_layer_sizes"], 239 | activation = "relu", 240 | solver = parameters["solver"], 241 | learning_rate_init = parameters["learning_rate_init"]) 242 | 243 | else: 244 | raise Exception("Sorry. clf_type is not supported.") 245 | 246 | return clf 247 | 248 | def acc_rec_pre_f1_calc(tp, fp, fn, tn): 249 | p = tp + fn 250 | n = fp + tn 251 | ssum = p + n 252 | 253 | acc = (tn + tp) / ssum 254 | if (p == 0): 255 | rec = 1 256 | else: 257 | rec = tp / p 258 | 259 | if (tp+fp == 0): 260 | pre = 1 261 | else: 262 | pre = tp / (tp+fp) 263 | 264 | if ((rec + pre) == 0): 265 | f1 = 0 266 | else: 267 | f1 = 2 * rec * pre / (rec + pre) 268 | 269 | return acc, rec, pre, f1 -------------------------------------------------------------------------------- /data_loader/data_loader_ml.py: -------------------------------------------------------------------------------- 1 | import os, sys 2 | sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) 3 | from utils.common_settings import * 4 | from utils import path_definitions 5 | from data import data_factory 6 | 7 | class DatasetDict(): 8 | """A data structor of saving a dataset, including the dataset key, prediction target, 9 | as well as datapoints (as a pandas.DataFrame) """ 10 | def __init__(self, key: str, prediction_target: str, datapoints: pd.DataFrame): 11 | self.key = deepcopy(key) 12 | self.prediction_target = deepcopy(prediction_target) 13 | assert type(datapoints) == pd.DataFrame 14 | assert set(["pid", "date","X_raw","y_raw","device_type"]).issubset(set(datapoints.columns)) 15 | self.datapoints = deepcopy(datapoints) 16 | 17 | class DataRepo(): 18 | """A data structor focused on saving data input matrix X, label y, and participant ID (pid)""" 19 | def __init__(self, X:pd.DataFrame, y:pd.Series, pids:pd.Series): 20 | self.X = deepcopy(X) 21 | self.y = deepcopy(y) 22 | self.pids = deepcopy(pids) 23 | 24 | def __getitem__(self, key): 25 | return DataRepo(self.X[key], self.y[key], self.pids[key]) 26 | 27 | class DataRepo_tf(DataRepo): 28 | """A variant of DataRepo. It has the same structure of DataRepo. 29 | It acts as a special data type for tensorflow dataset.""" 30 | def __init__(self, X:tf.data.Dataset or Dict[str, tf.data.Dataset], y: List[bool] or np.ndarray, pids: List[bool] or np.ndarray): 31 | super().__init__(None, None, None) 32 | self.X = X 33 | self.y = y 34 | self.pids = pids 35 | 36 | class DataRepo_np(DataRepo): 37 | """A variant of DataRepo. It only saves input X as np.ndarray. 38 | So it has the additional structure of saving columns information. 39 | The main difference between DataRepo and DataRepo_np is that 40 | the latter is mainly used for deep learning methods to simply processing. 41 | After processing, a DataRepo_np will be converted to DataRepo_tf, which was then used for model training""" 42 | def __init__(self, data_repo: DataRepo = None, cols: List[str] = None, 43 | X:np.ndarray=None, y:np.ndarray=None, pids:np.ndarray=None): 44 | super().__init__(None, None, None) 45 | if (data_repo is not None): 46 | self.X = np.array([i for i in data_repo.X.values]) 47 | self.y = np.array([[0,1] if y else [1,0] for y in data_repo.y.values]) 48 | self.pids = deepcopy(data_repo.pids.values) 49 | if (cols is not None): 50 | assert len(cols) == self.X.shape[-1] 51 | self.X_cols = deepcopy(cols) 52 | else: 53 | self.X = X 54 | self.y = y 55 | self.pids = pids 56 | self.X_cols = cols 57 | 58 | def __getitem__(self, key): 59 | return DataRepo_np(X = self.X[key], y = self.y[key], pids = self.pids[key], cols = self.X_cols) 60 | 61 | def data_loader_read_label_file(institution:str, phase:int, prediction_target:str) -> Union[pd.DataFrame,str]: 62 | """Load a single label file 63 | 64 | Args: 65 | institution (str): insitution code 66 | phase (int): number of study phase 67 | prediction_target (str): prediction task, current support "dep_endterm" and "dep_weekly" 68 | 69 | Raises: 70 | ValueError: an unsupported prediction target 71 | 72 | Returns: 73 | pd.DataFrame: dataframe of labels 74 | str: prediction target col name 75 | """ 76 | if (prediction_target == "dep_weekly"): 77 | prediction_target_col = "dep" 78 | df_label = pd.read_csv(data_factory.survey_folder[institution][phase] + "dep_weekly.csv") 79 | elif (prediction_target == "dep_endterm"): 80 | prediction_target_col = "dep" 81 | df_label = pd.read_csv(data_factory.survey_folder[institution][phase] + "dep_endterm.csv") 82 | else: 83 | df_label_ema = pd.read_csv(data_factory.survey_folder[institution][phase] + "ema.csv") 84 | df_label_pre = pd.read_csv(data_factory.survey_folder[institution][phase] + "pre.csv") 85 | df_label_post = pd.read_csv(data_factory.survey_folder[institution][phase] + "post.csv") 86 | if prediction_target not in data_factory.threshold_book: 87 | raise ValueError(f"'{prediction_target}' is not defined in threshold book.") 88 | threhold_as_true = data_factory.threshold_book[prediction_target]["threshold_as_true"] 89 | threhold_as_false = data_factory.threshold_book[prediction_target]["threshold_as_false"] 90 | if (threhold_as_true > threhold_as_false): 91 | flag_larger_is_true = True 92 | elif (threhold_as_true < threhold_as_false): 93 | flag_larger_is_true = False 94 | else: 95 | raise ValueError(f"Please specifiy {prediction_target}'s two-side thresholds separately (inclusive). They cannot be the same.") 96 | 97 | # Add new keys into extra pids dict to handle new predict target 98 | with open(os.path.join(path_definitions.DATA_PATH, "additional_user_setup", "overlapping_pids.json"), "r") as f: 99 | overlapping_pids_dict = json.load(f) 100 | with open(os.path.join(path_definitions.DATA_PATH, "additional_user_setup", "split_5fold_pids.json"), "r") as f: 101 | split_5fold_pids_dict = json.load(f) 102 | 103 | prediction_target_col = prediction_target + '_label' 104 | if (prediction_target in df_label_ema.columns): 105 | df_label = deepcopy(df_label_ema) 106 | closer_dep_task = "dep_weekly" 107 | elif (prediction_target in df_label_pre.columns): 108 | df_label = deepcopy(df_label_pre) 109 | closer_dep_task = "dep_endterm" 110 | elif (prediction_target in df_label_post.columns): 111 | df_label = deepcopy(df_label_post) 112 | closer_dep_task = "dep_endterm" 113 | else: 114 | raise ValueError(f"'{prediction_target}' not in the survey file.") 115 | df_label = deepcopy(df_label[~df_label[prediction_target].isna()]) 116 | df_label[prediction_target_col] = df_label[prediction_target].apply( 117 | lambda x : float(x) >= threhold_as_true if flag_larger_is_true else float(x) <= threhold_as_true) 118 | # simply copy the split from the basic task. Can be improved 119 | if (prediction_target not in overlapping_pids_dict): 120 | overlapping_pids_dict[prediction_target] = deepcopy(overlapping_pids_dict[closer_dep_task]) 121 | if (prediction_target not in split_5fold_pids_dict): 122 | split_5fold_pids_dict[prediction_target] = deepcopy(split_5fold_pids_dict[closer_dep_task]) 123 | with open(os.path.join(path_definitions.DATA_PATH, "additional_user_setup", "overlapping_pids.json"), "w") as f: 124 | json.dump(overlapping_pids_dict, f) 125 | with open(os.path.join(path_definitions.DATA_PATH, "additional_user_setup", "split_5fold_pids.json"), "w") as f: 126 | json.dump(split_5fold_pids_dict, f) 127 | 128 | df_label[prediction_target_col+"_raw"] = df_label[prediction_target_col] 129 | df_label["pid"] = df_label["pid"].apply(lambda x : f"{x}#{institution}_{phase}") 130 | df_label = df_label.drop_duplicates(["pid", "date"], keep = "last") 131 | df_label["date"] = pd.to_datetime(df_label["date"]) 132 | return df_label, prediction_target_col 133 | 134 | def data_loader_single_dataset_label_based(institution:str, phase:int, 135 | prediction_target:str, flag_more_feat_types:bool = False) -> pd.DataFrame: 136 | """Load a single dataset for DataRepo of a given institution and phase 137 | 138 | Args: 139 | institution (str): insitution code 140 | phase (int): number of study phase 141 | prediction_target (str): prediction task, current support "dep_endterm" and "dep_weekly" 142 | flag_more_feat_types (bool, optional): whether load all sensor types. Should be False for maximum compatibility. Defaults to False. 143 | 144 | Raises: 145 | ValueError: an unsupported prediction target 146 | 147 | Returns: 148 | pd.DataFrame: dataframe of data points that used as DataRepo.datapoints 149 | """ 150 | df_full_rawdata = pd.read_csv(data_factory.feature_folder[institution][phase] + "rapids.csv", low_memory=False) 151 | df_full_rawdata["date"] = pd.to_datetime(df_full_rawdata["date"]) 152 | df_full_rawdata["pid"] = df_full_rawdata["pid"].apply(lambda x : f"{x}#{institution}_{phase}") 153 | 154 | df_participant_file = pd.read_csv(data_factory.participants_info_folder[institution][phase] + "platform.csv", low_memory=False) 155 | df_participant_file["pid"] = df_participant_file["pid"].apply(lambda x : f"{x}#{institution}_{phase}") 156 | df_participant_file = df_participant_file.set_index("pid") 157 | 158 | df_label, prediction_target_col = data_loader_read_label_file(institution, phase, prediction_target) 159 | 160 | datapoints = [] 161 | 162 | if not flag_more_feat_types: # maximum compatibility of multiple datasets across insitutes 163 | fts = ['f_loc', 'f_screen', 'f_slp', 'f_steps'] 164 | else: 165 | fts = ['f_loc', 'f_screen', 'f_slp', 'f_steps', "f_blue", "f_call"] 166 | retained_features = ["pid", "date"] 167 | for col in df_full_rawdata.columns: 168 | for ft in fts: 169 | if (col.startswith(ft)): 170 | retained_features.append(col) 171 | break 172 | 173 | for idx, row in df_label.iterrows(): 174 | pid = row["pid"] 175 | date_end = row["date"] 176 | date_start = date_end + timedelta(days = -27) # past 4 weeks 177 | 178 | df_data_window = df_full_rawdata[df_full_rawdata["pid"] == pid] 179 | df_data_window = df_data_window[(date_start <= df_data_window["date"]) & (df_data_window["date"] <= date_end)] 180 | if (df_data_window.shape[0] == 0): 181 | continue 182 | df_data_windowplaceholder = pd.DataFrame(pd.date_range(date_start, date_end), columns=["date"]) 183 | df_data_windowplaceholder["pid"] = pid 184 | df_data_window = df_data_windowplaceholder.merge(df_data_window, left_on=["pid","date"], right_on=["pid","date"], how="left") 185 | df_data_window = deepcopy(df_data_window) 186 | 187 | datapoint = {"pid":pid, "date": date_end, 188 | "X_raw": df_data_window[retained_features], "y_raw": row[prediction_target_col], "y_allraw": row, 189 | "device_type": df_participant_file.loc[pid]["platform"].split(";")[0] } 190 | datapoints.append(datapoint) 191 | df_datapoints = pd.DataFrame(datapoints) 192 | 193 | if (prediction_target == "dep_weekly"): 194 | pids_few_response = df_datapoints.groupby("pid").count() 195 | pids_few_response = list(pids_few_response[pids_few_response["date"]<2].index) 196 | df_datapoints = df_datapoints[~df_datapoints["pid"].isin(pids_few_response)] 197 | 198 | return df_datapoints 199 | 200 | def data_loader_single_dataset_raw(institution:str, phase:int, prediction_target:str) -> pd.DataFrame: 201 | """Load a single raw data of a given institution and phase 202 | 203 | Args: 204 | institution (str): insitution code 205 | phase (int): number of study phase 206 | prediction_target (str): prediction task, current support "dep_endterm" and "dep_weekly" 207 | 208 | Raises: 209 | ValueError: an unsupported prediction target 210 | 211 | Returns: 212 | pd.DataFrame: dataframe of all raw data, with per person per day as a row 213 | """ 214 | df_full_rawdata = pd.read_csv(data_factory.feature_folder[institution][phase] + "rapids.csv", low_memory=False) 215 | df_full_rawdata["date"] = pd.to_datetime(df_full_rawdata["date"]) 216 | df_full_rawdata["pid"] = df_full_rawdata["pid"].apply(lambda x : f"{x}#{institution}_{phase}") 217 | 218 | df_participant_file = pd.read_csv(data_factory.participants_info_folder[institution][phase] + "platform.csv", low_memory=False) 219 | df_participant_file["pid"] = df_participant_file["pid"].apply(lambda x : f"{x}#{institution}_{phase}") 220 | df_participant_file = df_participant_file.set_index("pid") 221 | 222 | df_label, prediction_target_col = data_loader_read_label_file(institution, phase, prediction_target) 223 | 224 | retained_features = ["pid", "date"] 225 | retained_features += [c for c in df_full_rawdata.columns if c not in ["pid", "date"]] 226 | 227 | df_full_rawdata_ = df_full_rawdata.merge(df_label, left_on=["pid","date"], right_on=["pid","date"], how="left") 228 | df_full_rawdata_ = df_full_rawdata_[[col for col in df_full_rawdata_.columns if col in [prediction_target_col] + retained_features]] 229 | df_participant_file["platform_split"] = df_participant_file["platform"].apply(lambda x: x.split(";")[0]) 230 | df_full_rawdata_["device_type"] = df_full_rawdata_["pid"].apply(lambda x : df_participant_file.loc[x]["platform_split"]) 231 | df_datapoints = df_full_rawdata_ 232 | 233 | if (prediction_target == "dep_weekly"): 234 | pids_few_response = df_datapoints.groupby("pid").count() 235 | pids_few_response = list(pids_few_response[pids_few_response["date"]<2].index) 236 | df_datapoints = df_datapoints[~df_datapoints["pid"].isin(pids_few_response)] 237 | 238 | return df_datapoints 239 | 240 | 241 | def data_loader_single(prediction_target:str, institution:str, phase:int, flag_more_feat_types:bool = False) -> DatasetDict: 242 | """Helper function to load a single DatasetDict of a given institution and phase. 243 | If the data is already saved as pkl file, load the pkl file directly to accelerate the process. 244 | 245 | Args: 246 | prediction_target (str): prediction task, current support "dep_endterm" and "dep_weekly" 247 | institution (str): insitution code 248 | phase (int): number of study phase 249 | flag_more_feat_types (bool, optional): whether load all sensor types. Should be False for maximum compatibility. Defaults to False. 250 | 251 | Returns: 252 | DatasetDict: data structure of a dataset 253 | """ 254 | ds_key = f"{institution}_{phase}" 255 | if not flag_more_feat_types: 256 | dataset_file_path = os.path.join(path_definitions.DATA_PATH, "datarepo", f"{prediction_target}--{ds_key}.pkl") 257 | else: 258 | dataset_file_path = os.path.join(path_definitions.DATA_PATH, "datarepo_max_feature_types", f"{prediction_target}--{ds_key}.pkl") 259 | 260 | if (os.path.exists(dataset_file_path)): 261 | with open(dataset_file_path, "rb") as f: 262 | dataset = pickle.load(f) 263 | else: 264 | datapoints = data_loader_single_dataset_label_based(institution, phase, prediction_target, flag_more_feat_types) 265 | dataset = DatasetDict(key = ds_key, prediction_target=prediction_target, datapoints=datapoints) 266 | Path(os.path.split(dataset_file_path)[0]).mkdir(parents=True, exist_ok=True) 267 | with open(dataset_file_path, "wb") as f: 268 | pickle.dump(dataset, f) 269 | return dataset 270 | 271 | 272 | def data_loader(ds_keys_dict: Dict[str, List[str]], flag_more_feat_types:bool = False, verbose:bool = True) -> Dict[str, Dict[str, DatasetDict]]: 273 | """Load all DatasetDict give dataset keys 274 | 275 | Args: 276 | ds_keys_dict (Dict[str, List[str]]): A dictionary of dataset key list. {prediction_target: {list of ds_key (institution_phase)}} 277 | flag_more_feat_types (bool, optional): whether load all sensor types. Should be False for maximum compatibility. Defaults to False. 278 | verbose (bool, optional): Whether to display the progress bar and intermediate reuslts. Defaults to True 279 | 280 | Returns: 281 | Dict[str, Dict[str, DatasetDict]]: a dictionary of dictionary of DatasetDict. Level one is prediction_target, level two is ds_key 282 | """ 283 | dataset_dict = {} 284 | 285 | for prediction_target, ds_keys in tqdm(ds_keys_dict.items(), position=0, desc= "loading prediction targets", disable=not verbose): 286 | for ds_key in tqdm(ds_keys, position=1, desc= "dataset keys", leave=False, disable=not verbose): 287 | if (verbose): 288 | tqdm.write("loading " + prediction_target + " " + ds_key + " " + datetime.now().strftime("%d/%m/%Y %H:%M:%S")) 289 | institution, phase = ds_key.split("_") 290 | phase = int(phase) 291 | if flag_more_feat_types: # currently only one institute can have max feature types 292 | assert institution in ["INS-W", "INS-W-sample"] 293 | dataset = data_loader_single(prediction_target, institution, phase, flag_more_feat_types) 294 | if (prediction_target not in dataset_dict): 295 | dataset_dict[prediction_target] = {} 296 | dataset_dict[prediction_target][ds_key] = dataset 297 | 298 | return dataset_dict 299 | 300 | def data_loader_raw_single(dataset_file_path: str) -> pd.DataFrame: 301 | """Helper function to load raw data of a given institution and phase. 302 | If the data is already saved as pkl file, load the pkl file directly to accelerate the process. 303 | 304 | Args: 305 | institution (str): insitution code 306 | phase (int): number of study phase 307 | 308 | Returns: 309 | pd.DataFrame: raw data of a dataset 310 | """ 311 | 312 | if (os.path.exists(dataset_file_path)): 313 | with open(dataset_file_path, "rb") as f: 314 | dataset_df = pickle.load(f) 315 | else: 316 | print("data not exists!s") 317 | dataset_df = None 318 | 319 | return dataset_df 320 | 321 | def data_loader_raw(ds_keys_list: List[str], verbose:bool = True) -> Dict[str, pd.DataFrame]: 322 | """Load all raw data give dataset keys 323 | 324 | Args: 325 | ds_keys_list (List[str]): a list of dataset keys 326 | verbose (bool, optional): Whether to display the progress bar and intermediate reuslts. Defaults to True 327 | 328 | Returns: 329 | Dict[str, pd.DataFrame]: a dictionary of raw data, indexed by dataset keys 330 | """ 331 | dataset_dict = {} 332 | 333 | for ds_key in tqdm(ds_keys_list, desc="loading dataset keys", disable= not verbose): 334 | if verbose: 335 | tqdm.write("loading " + ds_key + " " + datetime.now().strftime("%d/%m/%Y %H:%M:%S")) 336 | institution, phase = ds_key.split("_") 337 | phase = int(phase) 338 | dataset = data_loader_raw_single(institution, phase) 339 | dataset_dict[ds_key] = dataset 340 | return dataset_dict -------------------------------------------------------------------------------- /utils/common_settings.py: -------------------------------------------------------------------------------- 1 | ################ 2 | # Import commonly used packages 3 | ################ 4 | 5 | from typing import Dict, Tuple, List, Union 6 | from pathlib import Path 7 | import argparse 8 | import glob 9 | import random 10 | import numpy as np 11 | import os 12 | import pandas as pd 13 | from matplotlib import pyplot as plt 14 | import math 15 | import gc 16 | import warnings 17 | from copy import deepcopy 18 | import json 19 | import itertools 20 | import collections 21 | from datetime import datetime, timedelta 22 | import time 23 | import ast 24 | import pickle 25 | import sys 26 | import traceback 27 | import uuid 28 | import yaml 29 | import scipy 30 | from scipy import stats 31 | import matplotlib 32 | import matplotlib.ticker as ticker 33 | import matplotlib.image as mpimg 34 | from matplotlib.ticker import MultipleLocator 35 | plt.style.use(['fivethirtyeight','ggplot']) 36 | import seaborn as sns 37 | from tqdm import tqdm 38 | import warnings 39 | 40 | # Set the maximum number of CPU to be used for multiprocessing 41 | import multiprocessing 42 | if "SLURM_JOB_CPUS_PER_NODE" in os.environ: 43 | CPU_COUNT = int(os.environ['SLURM_JOB_CPUS_PER_NODE']) 44 | else: 45 | CPU_COUNT = multiprocessing.cpu_count() 46 | NJOB = int(np.ceil(CPU_COUNT// 2)) 47 | 48 | from multiprocessing import Pool, Manager, set_start_method, get_context 49 | from multiprocessing.pool import ThreadPool 50 | import ray 51 | import swifter 52 | os.environ["MODIN_ENGINE"] = "ray" 53 | 54 | import sklearn 55 | from sklearn.manifold import TSNE 56 | from sklearn.base import BaseEstimator, ClassifierMixin 57 | from sklearn.decomposition import PCA 58 | from sklearn.model_selection import LeaveOneOut,LeaveOneGroupOut,train_test_split, GroupShuffleSplit,GroupKFold, StratifiedGroupKFold 59 | from sklearn.model_selection import cross_validate, cross_val_score 60 | from sklearn.metrics import recall_score, accuracy_score, confusion_matrix, pairwise, pairwise_distances, precision_recall_fscore_support, roc_auc_score, roc_curve 61 | from sklearn import tree, svm, ensemble, linear_model 62 | from sklearn.feature_selection import mutual_info_classif 63 | 64 | from sklearn.model_selection import GridSearchCV 65 | import sklearn.cluster as cluster 66 | from sklearn.preprocessing import MinMaxScaler, StandardScaler, RobustScaler, normalize 67 | 68 | minmax_scaler = MinMaxScaler() 69 | standard_scaler = StandardScaler() 70 | robust_scaler = RobustScaler() 71 | 72 | os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2" 73 | import tensorflow as tf 74 | import tensorflow_addons as tfa 75 | 76 | from tensorflow import keras 77 | from tensorflow.python.keras import backend as K 78 | from tensorflow.keras import layers, activations 79 | 80 | from tensorflow.keras.models import Sequential 81 | from tensorflow.keras.optimizers import Adam, SGD 82 | from tensorflow.keras.models import Model 83 | from tensorflow.keras.callbacks import EarlyStopping, LearningRateScheduler, ModelCheckpoint, ReduceLROnPlateau, Callback 84 | 85 | from tensorflow.keras.layers import Layer, Input, Activation, Lambda, Flatten, Concatenate, add, Average 86 | from tensorflow.keras.layers import BatchNormalization, LayerNormalization 87 | from tensorflow_addons.layers import InstanceNormalization 88 | from tensorflow.keras.layers import Conv1D, Conv2D, ZeroPadding2D, MaxPooling1D, MaxPooling2D, Dense, Dropout, GlobalAveragePooling1D, Cropping2D 89 | from tensorflow.keras.layers import LSTM, Bidirectional, TimeDistributed 90 | from tensorflow.keras.layers import UpSampling1D, UpSampling2D, Reshape, Conv1DTranspose, Conv2DTranspose, InputSpec 91 | 92 | from tensorflow.keras.initializers import glorot_uniform,he_uniform 93 | from tensorflow.keras.regularizers import l2 94 | from tensorflow.keras.utils import plot_model,normalize, Sequence 95 | 96 | sys.path.append(os.path.dirname(os.path.abspath(Path(__file__)))) 97 | from basic_utils import utils_ml 98 | from basic_utils import utils_operation 99 | 100 | ################ 101 | # Ensure reproducibility 102 | ################ 103 | 104 | def set_random_seed(seed): 105 | os.environ['PYTHONHASHSEED']=str(seed) 106 | random.seed(seed) 107 | np.random.seed(seed) 108 | tf.random.set_seed(seed) 109 | tf.keras.utils.set_random_seed(seed) 110 | seed = 42 111 | set_random_seed(seed) 112 | tf.keras.backend.set_floatx('float64') 113 | 114 | ################ 115 | # Define A few commonly used variables 116 | ################ 117 | 118 | wks = ["wkdy", "wkend"] 119 | epochs_4 = ["morning", "afternoon", "evening", "night"] 120 | epochs_5 = epochs_4 + ["allday"] 121 | epochs_6 = epochs_5 + ["14dhist"] 122 | 123 | wks_epoch5 = [x + "_" + y for x in wks for y in epochs_5] 124 | wks_epoch4 = [x + "_" + y for x in wks for y in epochs_4] 125 | wkdy_epoch4 = ["wkdy_" + y for y in epochs_4] 126 | wkend_epoch4 = ["wkend_" + y for y in epochs_4] 127 | schema_defaults = ['pid', 'epoch', 'weekday', 'grouping', 'epoch_weekday_grouping_abbreviated', 'time'] 128 | 129 | feature_types = ['f_blue', 'f_call', 'f_screen', 'f_slp', 'f_steps', 'f_loc', 'f_locMap'] 130 | 131 | with open(os.path.join(os.path.dirname(os.path.abspath(Path(__file__).parent)), 132 | "config", f"global_config.yaml"), "r") as f: 133 | global_config = yaml.safe_load(f) 134 | 135 | daterange_book = { 136 | "INS-W":{ 137 | 1:{ 138 | "start_date" : "2018-04-03", 139 | "end_date" : "2018-06-07" 140 | }, 141 | 2:{ 142 | "start_date" : "2019-03-31", 143 | "end_date" : "2019-06-15" 144 | }, 145 | 3:{ 146 | "start_date" : "2020-03-30", 147 | "end_date" : "2020-06-13" 148 | }, 149 | 4:{ 150 | "start_date" : "2021-03-29", 151 | "end_date" : "2021-06-12" 152 | }, 153 | }, 154 | 155 | "INS-D":{ 156 | 1:{ 157 | "start_date" : "2018-03-26", 158 | "end_date" : "2018-06-10" 159 | }, 160 | 2:{ 161 | "start_date" : "2019-03-25", 162 | "end_date" : "2019-06-09" 163 | } 164 | } 165 | } 166 | 167 | class feature_columns_repo: 168 | def __init__(self): 169 | self.feature_columns_fulldict = {} 170 | self.feature_columns_selecteddict = {} 171 | self.feature_columns_selected = [] 172 | self.feature_columns_selected_dis = [] 173 | self.feature_columns_selected_norm = [] 174 | self.feature_columns_selected_allepoches = [] 175 | self.feature_columns_selected_dis_allepoches = [] 176 | self.feature_columns_selected_epoches = {} 177 | self.feature_columns_selected_epoches_types = {} 178 | 179 | fc_repo = feature_columns_repo() 180 | 181 | def set_feature_columns(sep = ":", epochs = epochs_5): 182 | """ Define a series of feature values in fc_repo """ 183 | global fc_repo 184 | 185 | fc_repo.feature_columns_fulldict = { 186 | "f_blue": ['phone_bluetooth_rapids_countscans', 'phone_bluetooth_rapids_uniquedevices', 'phone_bluetooth_rapids_countscansmostuniquedevice', 'phone_bluetooth_doryab_countscansall', 'phone_bluetooth_doryab_uniquedevicesall', 'phone_bluetooth_doryab_meanscansall', 'phone_bluetooth_doryab_stdscansall', 'phone_bluetooth_doryab_countscansmostfrequentdevicewithinsegmentsall', 'phone_bluetooth_doryab_countscansmostfrequentdeviceacrosssegmentsall', 'phone_bluetooth_doryab_countscansmostfrequentdeviceacrossdatasetall', 'phone_bluetooth_doryab_countscansleastfrequentdevicewithinsegmentsall', 'phone_bluetooth_doryab_countscansleastfrequentdeviceacrosssegmentsall', 'phone_bluetooth_doryab_countscansleastfrequentdeviceacrossdatasetall', 'phone_bluetooth_doryab_countscansown', 'phone_bluetooth_doryab_uniquedevicesown', 'phone_bluetooth_doryab_meanscansown', 'phone_bluetooth_doryab_stdscansown', 'phone_bluetooth_doryab_countscansmostfrequentdevicewithinsegmentsown', 'phone_bluetooth_doryab_countscansmostfrequentdeviceacrosssegmentsown', 'phone_bluetooth_doryab_countscansmostfrequentdeviceacrossdatasetown', 'phone_bluetooth_doryab_countscansleastfrequentdevicewithinsegmentsown', 'phone_bluetooth_doryab_countscansleastfrequentdeviceacrosssegmentsown', 'phone_bluetooth_doryab_countscansleastfrequentdeviceacrossdatasetown', 'phone_bluetooth_doryab_countscansothers', 'phone_bluetooth_doryab_uniquedevicesothers', 'phone_bluetooth_doryab_meanscansothers', 'phone_bluetooth_doryab_stdscansothers', 'phone_bluetooth_doryab_countscansmostfrequentdevicewithinsegmentsothers', 'phone_bluetooth_doryab_countscansmostfrequentdeviceacrosssegmentsothers', 'phone_bluetooth_doryab_countscansmostfrequentdeviceacrossdatasetothers', 'phone_bluetooth_doryab_countscansleastfrequentdevicewithinsegmentsothers', 'phone_bluetooth_doryab_countscansleastfrequentdeviceacrosssegmentsothers', 'phone_bluetooth_doryab_countscansleastfrequentdeviceacrossdatasetothers'], 187 | "f_call": ['phone_calls_rapids_missed_count', 'phone_calls_rapids_missed_distinctcontacts', 'phone_calls_rapids_missed_timefirstcall', 'phone_calls_rapids_missed_timelastcall', 'phone_calls_rapids_missed_countmostfrequentcontact', 'phone_calls_rapids_incoming_count', 'phone_calls_rapids_incoming_distinctcontacts', 'phone_calls_rapids_incoming_meanduration', 'phone_calls_rapids_incoming_sumduration', 'phone_calls_rapids_incoming_minduration', 'phone_calls_rapids_incoming_maxduration', 'phone_calls_rapids_incoming_stdduration', 'phone_calls_rapids_incoming_modeduration', 'phone_calls_rapids_incoming_entropyduration', 'phone_calls_rapids_incoming_timefirstcall', 'phone_calls_rapids_incoming_timelastcall', 'phone_calls_rapids_incoming_countmostfrequentcontact', 'phone_calls_rapids_outgoing_count', 'phone_calls_rapids_outgoing_distinctcontacts', 'phone_calls_rapids_outgoing_meanduration', 'phone_calls_rapids_outgoing_sumduration', 'phone_calls_rapids_outgoing_minduration', 'phone_calls_rapids_outgoing_maxduration', 'phone_calls_rapids_outgoing_stdduration', 'phone_calls_rapids_outgoing_modeduration', 'phone_calls_rapids_outgoing_entropyduration', 'phone_calls_rapids_outgoing_timefirstcall', 'phone_calls_rapids_outgoing_timelastcall', 'phone_calls_rapids_outgoing_countmostfrequentcontact'], 188 | "f_loc": ["phone_locations_barnett_avgflightdur", "phone_locations_barnett_avgflightlen", "phone_locations_barnett_circdnrtn", "phone_locations_barnett_disttravelled", "phone_locations_barnett_hometime", "phone_locations_barnett_maxdiam", "phone_locations_barnett_maxhomedist", "phone_locations_barnett_probpause", "phone_locations_barnett_rog", "phone_locations_barnett_siglocentropy", "phone_locations_barnett_siglocsvisited", "phone_locations_barnett_stdflightdur", "phone_locations_barnett_stdflightlen", "phone_locations_barnett_wkenddayrtn", "phone_locations_doryab_avglengthstayatclusters", "phone_locations_doryab_avgspeed", "phone_locations_doryab_homelabel", "phone_locations_doryab_locationentropy", "phone_locations_doryab_locationvariance", "phone_locations_doryab_loglocationvariance", "phone_locations_doryab_maxlengthstayatclusters", "phone_locations_doryab_minlengthstayatclusters", "phone_locations_doryab_movingtostaticratio", "phone_locations_doryab_normalizedlocationentropy", "phone_locations_doryab_numberlocationtransitions", "phone_locations_doryab_numberofsignificantplaces", "phone_locations_doryab_outlierstimepercent", "phone_locations_doryab_radiusgyration", "phone_locations_doryab_stdlengthstayatclusters", "phone_locations_doryab_timeathome", "phone_locations_doryab_timeattop1location", "phone_locations_doryab_timeattop2location", "phone_locations_doryab_timeattop3location", "phone_locations_doryab_totaldistance", "phone_locations_doryab_varspeed", 'phone_locations_locmap_duration_in_locmap_study', 'phone_locations_locmap_percent_in_locmap_study', 'phone_locations_locmap_duration_in_locmap_exercise', 'phone_locations_locmap_percent_in_locmap_exercise', 'phone_locations_locmap_duration_in_locmap_greens', 'phone_locations_locmap_percent_in_locmap_greens'], 189 | "f_screen": ['phone_screen_rapids_countepisodeunlock', 'phone_screen_rapids_sumdurationunlock', 'phone_screen_rapids_maxdurationunlock', 'phone_screen_rapids_mindurationunlock', 'phone_screen_rapids_avgdurationunlock', 'phone_screen_rapids_stddurationunlock', 'phone_screen_rapids_firstuseafter00unlock', 'phone_screen_rapids_countepisodeunlock_locmap_exercise', 'phone_screen_rapids_sumdurationunlock_locmap_exercise', 'phone_screen_rapids_maxdurationunlock_locmap_exercise', 'phone_screen_rapids_mindurationunlock_locmap_exercise', 'phone_screen_rapids_avgdurationunlock_locmap_exercise', 'phone_screen_rapids_stddurationunlock_locmap_exercise', 'phone_screen_rapids_firstuseafter00unlock_locmap_exercise', 'phone_screen_rapids_countepisodeunlock_locmap_greens', 'phone_screen_rapids_sumdurationunlock_locmap_greens', 'phone_screen_rapids_maxdurationunlock_locmap_greens', 'phone_screen_rapids_mindurationunlock_locmap_greens', 'phone_screen_rapids_avgdurationunlock_locmap_greens', 'phone_screen_rapids_stddurationunlock_locmap_greens', 'phone_screen_rapids_firstuseafter00unlock_locmap_greens', 'phone_screen_rapids_countepisodeunlock_locmap_living', 'phone_screen_rapids_sumdurationunlock_locmap_living', 'phone_screen_rapids_maxdurationunlock_locmap_living', 'phone_screen_rapids_mindurationunlock_locmap_living', 'phone_screen_rapids_avgdurationunlock_locmap_living', 'phone_screen_rapids_stddurationunlock_locmap_living', 'phone_screen_rapids_firstuseafter00unlock_locmap_living', 'phone_screen_rapids_countepisodeunlock_locmap_study', 'phone_screen_rapids_sumdurationunlock_locmap_study', 'phone_screen_rapids_maxdurationunlock_locmap_study', 'phone_screen_rapids_mindurationunlock_locmap_study', 'phone_screen_rapids_avgdurationunlock_locmap_study', 'phone_screen_rapids_stddurationunlock_locmap_study', 'phone_screen_rapids_firstuseafter00unlock_locmap_study', 'phone_screen_rapids_countepisodeunlock_locmap_home', 'phone_screen_rapids_sumdurationunlock_locmap_home', 'phone_screen_rapids_maxdurationunlock_locmap_home', 'phone_screen_rapids_mindurationunlock_locmap_home', 'phone_screen_rapids_avgdurationunlock_locmap_home', 'phone_screen_rapids_stddurationunlock_locmap_home', 'phone_screen_rapids_firstuseafter00unlock_locmap_home'], 190 | "f_slp": ['fitbit_sleep_summary_rapids_sumdurationafterwakeupmain', 'fitbit_sleep_summary_rapids_sumdurationasleepmain', 'fitbit_sleep_summary_rapids_sumdurationawakemain', 'fitbit_sleep_summary_rapids_sumdurationtofallasleepmain', 'fitbit_sleep_summary_rapids_sumdurationinbedmain', 'fitbit_sleep_summary_rapids_avgefficiencymain', 'fitbit_sleep_summary_rapids_avgdurationafterwakeupmain', 'fitbit_sleep_summary_rapids_avgdurationasleepmain', 'fitbit_sleep_summary_rapids_avgdurationawakemain', 'fitbit_sleep_summary_rapids_avgdurationtofallasleepmain', 'fitbit_sleep_summary_rapids_avgdurationinbedmain', 'fitbit_sleep_summary_rapids_countepisodemain', 'fitbit_sleep_summary_rapids_firstbedtimemain', 'fitbit_sleep_summary_rapids_lastbedtimemain', 'fitbit_sleep_summary_rapids_firstwaketimemain', 'fitbit_sleep_summary_rapids_lastwaketimemain', 'fitbit_sleep_intraday_rapids_avgdurationasleepunifiedmain', 'fitbit_sleep_intraday_rapids_avgdurationawakeunifiedmain', 'fitbit_sleep_intraday_rapids_maxdurationasleepunifiedmain', 'fitbit_sleep_intraday_rapids_maxdurationawakeunifiedmain', 'fitbit_sleep_intraday_rapids_sumdurationasleepunifiedmain', 'fitbit_sleep_intraday_rapids_sumdurationawakeunifiedmain', 'fitbit_sleep_intraday_rapids_countepisodeasleepunifiedmain', 'fitbit_sleep_intraday_rapids_countepisodeawakeunifiedmain', 'fitbit_sleep_intraday_rapids_stddurationasleepunifiedmain', 'fitbit_sleep_intraday_rapids_stddurationawakeunifiedmain', 'fitbit_sleep_intraday_rapids_mindurationasleepunifiedmain', 'fitbit_sleep_intraday_rapids_mindurationawakeunifiedmain', 'fitbit_sleep_intraday_rapids_mediandurationasleepunifiedmain', 'fitbit_sleep_intraday_rapids_mediandurationawakeunifiedmain', 'fitbit_sleep_intraday_rapids_ratiocountasleepunifiedwithinmain', 'fitbit_sleep_intraday_rapids_ratiocountawakeunifiedwithinmain', 'fitbit_sleep_intraday_rapids_ratiodurationasleepunifiedwithinmain', 'fitbit_sleep_intraday_rapids_ratiodurationawakeunifiedwithinmain'], 191 | "f_steps": ['fitbit_steps_summary_rapids_maxsumsteps', 'fitbit_steps_summary_rapids_minsumsteps', 'fitbit_steps_summary_rapids_avgsumsteps', 'fitbit_steps_summary_rapids_mediansumsteps', 'fitbit_steps_summary_rapids_stdsumsteps', 'fitbit_steps_intraday_rapids_sumsteps', 'fitbit_steps_intraday_rapids_maxsteps', 'fitbit_steps_intraday_rapids_minsteps', 'fitbit_steps_intraday_rapids_avgsteps', 'fitbit_steps_intraday_rapids_stdsteps', 'fitbit_steps_intraday_rapids_countepisodesedentarybout', 'fitbit_steps_intraday_rapids_sumdurationsedentarybout', 'fitbit_steps_intraday_rapids_maxdurationsedentarybout', 'fitbit_steps_intraday_rapids_mindurationsedentarybout', 'fitbit_steps_intraday_rapids_avgdurationsedentarybout', 'fitbit_steps_intraday_rapids_stddurationsedentarybout', 'fitbit_steps_intraday_rapids_countepisodeactivebout', 'fitbit_steps_intraday_rapids_sumdurationactivebout', 'fitbit_steps_intraday_rapids_maxdurationactivebout', 'fitbit_steps_intraday_rapids_mindurationactivebout', 'fitbit_steps_intraday_rapids_avgdurationactivebout', 'fitbit_steps_intraday_rapids_stddurationactivebout'], 192 | } 193 | fc_repo.feature_columns_selecteddict = fc_repo.feature_columns_fulldict 194 | 195 | 196 | fc_repo.feature_columns_selected_origin = [] 197 | fc_repo.feature_columns_selected = [] 198 | for f, fl in fc_repo.feature_columns_selecteddict.items(): 199 | fc_repo.feature_columns_selected_origin += fl 200 | fc_repo.feature_columns_selected += [f + sep + x for x in fl] 201 | 202 | fc_repo.feature_columns_selected_dis = [] 203 | for f, fl in fc_repo.feature_columns_selecteddict.items(): 204 | fc_repo.feature_columns_selected_dis += [f + sep + x + "_dis" for x in fl] 205 | 206 | fc_repo.feature_columns_selected_norm = [] 207 | for f, fl in fc_repo.feature_columns_selecteddict.items(): 208 | fc_repo.feature_columns_selected_norm += [f + sep + x + "_norm" for x in fl] 209 | 210 | fc_repo.feature_columns_selected_allepoches = [] 211 | for f in fc_repo.feature_columns_selected: 212 | fc_repo.feature_columns_selected_allepoches += [f + sep + x for x in epochs] 213 | 214 | fc_repo.feature_columns_selected_dis_allepoches = [] 215 | for f in fc_repo.feature_columns_selected_dis: 216 | fc_repo.feature_columns_selected_dis_allepoches += [f + sep + x for x in epochs] 217 | 218 | 219 | fc_repo.feature_columns_selected_epoches = {} 220 | fc_repo.feature_columns_selected_types = {} 221 | fc_repo.feature_columns_selected_epoches_types = {} 222 | 223 | for epoch in epochs: 224 | fc_repo.feature_columns_selected_epoches[epoch] = [f + sep + epoch for f in fc_repo.feature_columns_selected] 225 | fc_repo.feature_columns_selected_epoches_types[epoch] = {} 226 | for f in fc_repo.feature_columns_selected: 227 | ft = f.split(sep)[0] 228 | if (ft in fc_repo.feature_columns_selected_epoches_types[epoch]): 229 | fc_repo.feature_columns_selected_epoches_types[epoch][ft].append(f + sep + epoch) 230 | else: 231 | fc_repo.feature_columns_selected_epoches_types[epoch][ft] = [f + sep + epoch] 232 | for f in fc_repo.feature_columns_selected: 233 | ft = f.split(sep)[0] 234 | if (ft in fc_repo.feature_columns_selected_types): 235 | fc_repo.feature_columns_selected_types[ft] += [f + sep + epoch for epoch in epochs] 236 | else: 237 | fc_repo.feature_columns_selected_types[ft] = [f + sep + epoch for epoch in epochs] 238 | 239 | fc_repo.feature_columns_selected_dis_epoches = {} 240 | fc_repo.feature_columns_selected_dis_epoches_types = {} 241 | 242 | for epoch in epochs: 243 | fc_repo.feature_columns_selected_dis_epoches[epoch] = [f + sep + epoch for f in fc_repo.feature_columns_selected_dis] 244 | fc_repo.feature_columns_selected_dis_epoches_types[epoch] = {} 245 | for f in fc_repo.feature_columns_selected_dis: 246 | ft = f.split(sep)[0] 247 | if (ft in fc_repo.feature_columns_selected_dis_epoches_types[epoch]): 248 | fc_repo.feature_columns_selected_dis_epoches_types[epoch][ft].append(f + sep + epoch) 249 | else: 250 | fc_repo.feature_columns_selected_dis_epoches_types[epoch][ft] = [f + sep + epoch] 251 | 252 | fc_repo.feature_columns_selected_norm_epoches = {} 253 | fc_repo.feature_columns_selected_norm_epoches_types = {} 254 | 255 | for epoch in epochs: 256 | fc_repo.feature_columns_selected_norm_epoches[epoch] = [f + sep + epoch for f in fc_repo.feature_columns_selected_norm] 257 | fc_repo.feature_columns_selected_norm_epoches_types[epoch] = {} 258 | for f in fc_repo.feature_columns_selected_norm: 259 | ft = f.split(sep)[0] 260 | if (ft in fc_repo.feature_columns_selected_norm_epoches_types[epoch]): 261 | fc_repo.feature_columns_selected_norm_epoches_types[epoch][ft].append(f + sep + epoch) 262 | else: 263 | fc_repo.feature_columns_selected_norm_epoches_types[epoch][ft] = [f + sep + epoch] 264 | 265 | set_feature_columns(sep = ":", epochs=epochs_6) 266 | 267 | def globalize(func): 268 | """Put a function into the global environment so that multiprocessing is easier""" 269 | def result(*args, **kwargs): 270 | return func(*args, **kwargs) 271 | result.__name__ = result.__qualname__ = uuid.uuid4().hex 272 | setattr(sys.modules[result.__module__], result.__name__, result) 273 | return result 274 | 275 | def get_min_count_class(labels, groups): 276 | df_tmp = pd.DataFrame([labels,groups]).T 277 | df_tmp.columns = ["label", "group"] 278 | num_min = df_tmp.groupby("label").apply(lambda x : len(set(x["group"]))).min() 279 | return num_min -------------------------------------------------------------------------------- /data_loader/data_loader_dl.py: -------------------------------------------------------------------------------- 1 | import os, sys 2 | sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) 3 | from utils.common_settings import * 4 | from data_loader import data_loader_ml 5 | from data_loader.data_loader_ml import DatasetDict, DataRepo, DataRepo_np 6 | 7 | from utils import path_definitions 8 | 9 | 10 | class MultiSourceDataGenerator(): 11 | """ Data Generator for deep model training and evaluation """ 12 | def __init__(self, data_repo_dict: Dict[str, DataRepo], is_training = True, 13 | generate_by = "across_dataset", 14 | batch_size=32, shuffle=True, flag_y_vector=True, 15 | mixup = "across", mixup_alpha=0.2, **kwargs): 16 | 17 | self.X_dict = {k:v.X for k, v in data_repo_dict.items()} 18 | self.y_dict = {k:v.y for k, v in data_repo_dict.items()} 19 | self.pids_dict = {k:v.pids for k, v in data_repo_dict.items()} 20 | self.is_training = is_training 21 | self.dataset_list = list(self.X_dict.keys()) 22 | self.dataset_dict = {k:idx for idx , k in enumerate(self.dataset_list)} 23 | 24 | # Define individual information 25 | # which will be used for individual data feeding setup 26 | self.person_list_dict = {k:sorted(list(set(v))) for k, v in self.pids_dict.items()} 27 | person_dict_tmp = {p:{"k":k,"i":np.where(self.pids_dict[k] == p)[0]} for k,v in self.person_list_dict.items() for p in v} 28 | self.person_dict = {} 29 | person_counter = 0 30 | for pid in itertools.chain.from_iterable(itertools.zip_longest(*list(self.person_list_dict.values()))): 31 | if (not pid): continue 32 | self.person_dict[pid] = {"person_idx": person_counter, "dataset_key": person_dict_tmp[pid]["k"], 33 | "data_idx": person_dict_tmp[pid]["i"], "data_len": len(person_dict_tmp[pid]["i"])} 34 | person_counter += 1 35 | self.person_datalen_list = [v["data_len"] for pid ,v in self.person_dict.items()] 36 | self.person_idx_dict = {k: np.array([self.person_dict[p]["person_idx"] for p in v]) for k, v in self.pids_dict.items()} 37 | self.person_list = list(self.person_dict.keys()) 38 | 39 | self.X_dim = len(self.X_dict[self.dataset_list[0]].shape) - 1 40 | self.flag_y_vector = flag_y_vector 41 | self.sample_num_dict = {k:len(self.X_dict[k]) for k in self.X_dict} 42 | self.sample_num_min = min(self.sample_num_dict.values()) 43 | 44 | # Define the generator type 45 | # within_person: generate data one person' data at one step 46 | # across_person: generate data for multiple people at one step 47 | # within_dataset: generate data within one dataset at one step 48 | # across_dataset: generate data across multiple datasets at one step 49 | self.generate_by = generate_by 50 | assert self.generate_by in ["within_person", "across_person", "within_dataset", "across_dataset"] 51 | self.mixup_alpha = mixup_alpha 52 | self.shuffle = shuffle 53 | self.mixup = mixup 54 | if (self.generate_by == "across_dataset"): 55 | assert self.mixup in ["across", "within", None] 56 | else: 57 | assert self.mixup in ["within", None] 58 | 59 | # Define batch size based on different data generation setup 60 | self.batch_size_total = batch_size 61 | if (self.generate_by == "across_dataset"): 62 | if (self.mixup == "across"): 63 | self.step_size = self.batch_size_total 64 | self.step_per_epoch = self.sample_num_min // self.batch_size_total 65 | # when there is only one dataset, degrade to within mixup 66 | if (len(self.dataset_list) == 1): 67 | self.generate_by = "within_dataset" 68 | self.step_size = self.batch_size_total 69 | self.step_per_epoch = self.sample_num_min // self.batch_size_total * len(self.dataset_list) 70 | else: 71 | self.step_size = batch_size // len(self.X_dict) 72 | self.step_per_epoch = self.sample_num_min // self.step_size 73 | elif (self.generate_by == "within_dataset"): 74 | self.step_size = self.batch_size_total 75 | self.step_per_epoch = self.sample_num_min // self.batch_size_total * len(self.dataset_list) 76 | elif (self.generate_by == "within_person"): 77 | self.step_size = self.batch_size_total 78 | self.step_per_epoch = len(self.person_dict) 79 | elif (self.generate_by == "across_person"): 80 | self.step_size = self.batch_size_total 81 | self.step_per_epoch = max(self.person_datalen_list, key = self.person_datalen_list.count) // self.batch_size_total 82 | if (self.step_per_epoch == 0): 83 | self.step_per_epoch = 1 84 | 85 | self.iter_counter = 0 86 | 87 | # define the output shape of the generator 88 | self.input_shape = list(self.X_dict[self.dataset_list[0]].shape[1:]) 89 | self.tf_output_signature = ({ 90 | "input_X": tf.TensorSpec(shape=[None] + self.input_shape, dtype = tf.float64), 91 | "input_y": tf.TensorSpec(shape=(None, 2) if self.flag_y_vector else (None), dtype = tf.float64), 92 | "input_dataset": tf.TensorSpec(shape=(None), dtype = tf.int64), 93 | "input_person": tf.TensorSpec(shape=(None), dtype = tf.int64), 94 | }, tf.TensorSpec(shape=(None, 2) if self.flag_y_vector else (None), dtype = tf.float64)) 95 | 96 | def __call__(self): 97 | while True: 98 | indexes_dict = self.__get_exploration_order() 99 | 100 | # if val/test, just return one step with all data 101 | if (not self.is_training): 102 | X = np.concatenate(list(self.X_dict.values())) 103 | y = np.concatenate(list(self.y_dict.values())) 104 | dsidx = np.zeros(len(y)) 105 | personidx = np.zeros(len(y)) 106 | 107 | dsidx = np.concatenate([self.dataset_dict[ds] * np.ones(len(self.y_dict[ds])) for ds in self.dataset_list]) 108 | personidx = np.array([self.person_dict[p]["person_idx"] for ds in self.dataset_list for p in self.pids_dict[ds]]) 109 | 110 | if (not self.flag_y_vector): 111 | y = np.argmax(y, axis = 1) 112 | yield {"input_X":X, "input_y":y, "input_dataset": dsidx, "input_person": personidx}, y 113 | break 114 | else: 115 | # if train, return data based on different generator types 116 | if (self.generate_by == "across_dataset"): 117 | if (self.mixup == "across"): 118 | for i in range(self.step_per_epoch): 119 | batch_ids_dict = {} 120 | for k, indexes in indexes_dict.items(): 121 | batch_ids_dict[k] = indexes[i * self.step_size:(i + 1) * self.step_size] 122 | X, y = self.__data_generation_between(batch_ids_dict) 123 | if (not self.flag_y_vector): 124 | y = np.argmax(y, axis = 1) 125 | # due to the mixup, it's hard to maintain the ds and person idx 126 | dsidx = np.zeros(len(y)) 127 | personidx = np.zeros(len(y)) 128 | yield {"input_X":X, "input_y":y, "input_dataset": dsidx, "input_person": personidx}, y 129 | else: 130 | for i in range(self.step_per_epoch): 131 | X_dict = {} 132 | y_dict = {} 133 | dataset_dict = {} 134 | person_dict = {} 135 | for k, indexes in indexes_dict.items(): 136 | batch_ids = indexes[i * self.step_size:(i + 1) * self.step_size] 137 | X, y = self.__data_generation_within(k, batch_ids) 138 | X_dict[k] = X 139 | y_dict[k] = y 140 | dataset_dict[k] = self.dataset_dict[k] * np.ones(len(y)) 141 | person_dict[k] = self.person_idx_dict[k][batch_ids] 142 | X = np.concatenate(list(X_dict.values())) 143 | y = np.concatenate(list(y_dict.values())) 144 | dsidx = np.concatenate(list(dataset_dict.values())) 145 | personidx = np.concatenate(list(person_dict.values())) 146 | if (not self.flag_y_vector): 147 | y = np.argmax(y, axis = 1) 148 | if (self.mixup is not None): # people are mixed 149 | personidx = np.zeros(len(y)) 150 | yield {"input_X":X, "input_y":y, "input_dataset": dsidx, "input_person":personidx}, y 151 | elif (self.generate_by == "within_dataset"): 152 | for i in range(self.step_per_epoch): 153 | dataset_idx = i % len(self.dataset_list) 154 | dataset_key = self.dataset_list[dataset_idx] 155 | j = i // len(self.dataset_list) 156 | batch_ids = indexes_dict[dataset_key][j * self.step_size:(j + 1) * self.step_size] 157 | X, y = self.__data_generation_within(dataset_key, batch_ids) 158 | dsidx = dataset_idx * np.ones(len(y)) 159 | personidx = self.person_idx_dict[dataset_key][batch_ids] 160 | 161 | if (not self.flag_y_vector): 162 | y = np.argmax(y, axis = 1) 163 | if (self.mixup is not None): # people are mixed 164 | personidx = np.zeros(len(y)) 165 | yield {"input_X":X, "input_y":y, "input_dataset": dsidx, "input_person": personidx}, y 166 | elif (self.generate_by == "within_person"): 167 | for i in range(self.step_per_epoch): 168 | persons = self.person_list[i: (i+1)] 169 | X_dict = {} 170 | y_dict = {} 171 | dataset_dict = {} 172 | person_dict = {} 173 | for person in persons: 174 | info = self.person_dict[person] 175 | k = info["dataset_key"] 176 | batch_ids = info["data_idx"] 177 | if (self.step_size is not None and self.step_size < len(batch_ids)): 178 | batch_ids = np.random.choice(batch_ids, size=self.step_size, replace = False) 179 | X, y = self.__data_generation_within(k, batch_ids) 180 | X_dict[person] = X 181 | y_dict[person] = y 182 | dataset_dict[person] = self.dataset_dict[k] * np.ones(len(y)) 183 | person_dict[person] = info["person_idx"] * np.ones(len(y)) 184 | X = np.concatenate(list(X_dict.values())) 185 | y = np.concatenate(list(y_dict.values())) 186 | dsidx = np.concatenate(list(dataset_dict.values())) 187 | personidx = np.concatenate(list(person_dict.values())) 188 | 189 | if (not self.flag_y_vector): 190 | y = np.argmax(y, axis = 1) 191 | yield {"input_X":X, "input_y":y, "input_dataset": dsidx, "input_person": personidx}, y 192 | elif (self.generate_by == "across_person"): 193 | for i in range(self.step_per_epoch): 194 | X_dict = {} 195 | y_dict = {} 196 | dataset_dict = {} 197 | person_dict = {} 198 | for person in self.person_list: 199 | info = self.person_dict[person] 200 | k = info["dataset_key"] 201 | ids_raw = info["data_idx"] 202 | ids_raw_len = len(ids_raw) 203 | if (self.step_size <= ids_raw_len): 204 | start = (i * self.step_size) % ids_raw_len 205 | end = ((i+1)*self.step_size) % ids_raw_len 206 | if (start < end): 207 | batch_ids = ids_raw[start : end] 208 | else: 209 | batch_ids = np.concatenate([ids_raw[start :], ids_raw[: end]]) 210 | else: 211 | batch_ids_first = np.concatenate([ids_raw for _ in range(self.step_size // ids_raw_len)]) 212 | batch_ids_second = ids_raw[: (self.step_size % ids_raw_len)] 213 | batch_ids = np.concatenate([batch_ids_first, batch_ids_second]) 214 | X, y = self.__data_generation_within(k, batch_ids) 215 | X_dict[person] = X 216 | y_dict[person] = y 217 | dataset_dict[person] = self.dataset_dict[k] * np.ones(len(y)) 218 | person_dict[person] = info["person_idx"] * np.ones(len(y)) 219 | X = np.concatenate(list(X_dict.values())) 220 | y = np.concatenate(list(y_dict.values())) 221 | dsidx = np.concatenate(list(dataset_dict.values())) 222 | personidx = np.concatenate(list(person_dict.values())) 223 | if (not self.flag_y_vector): 224 | y = np.argmax(y, axis = 1) 225 | yield {"input_X":X, "input_y":y, "input_dataset": dsidx, "input_person": personidx}, y 226 | 227 | self.iter_counter += 1 228 | 229 | def __get_exploration_order(self): 230 | """ Shuffle data when necessary """ 231 | indexes_dict = {k: np.arange(v) for k, v in self.sample_num_dict.items()} 232 | # indexes = np.arange(self.sample_num) 233 | if self.shuffle and self.is_training: 234 | for k in indexes_dict: 235 | np.random.shuffle(indexes_dict[k]) 236 | np.random.shuffle(self.person_list) 237 | return indexes_dict 238 | 239 | def __data_generation_within(self, dataset_key, batch_ids): 240 | """ Generate mixup data within datasets """ 241 | 242 | X1 = self.X_dict[dataset_key][batch_ids] 243 | y1 = self.y_dict[dataset_key][batch_ids] 244 | 245 | if self.mixup == "within": 246 | X2 = self.X_dict[dataset_key][np.random.permutation(batch_ids)] 247 | y2 = self.y_dict[dataset_key][np.random.permutation(batch_ids)] 248 | X, y = self.__mixup(X1, y1, [], X2, y2, []) 249 | else: 250 | X = X1 251 | y = y1 252 | 253 | return X, y 254 | 255 | def __data_generation_between(self, batch_ids_dict): 256 | """ Generate mixup data across datasets """ 257 | X1, y1, dataset1 = [], [], [] 258 | X2, y2, dataset2 = [], [], [] 259 | length = min([len(batch_ids_dict[k]) for k in batch_ids_dict]) 260 | for _ in range(length): 261 | dataset_key1, dataset_key2 = np.random.choice(self.dataset_list, 2, replace=False) 262 | idx1, idx2 = np.random.randint(low=0, high=length, size=2) 263 | X1.append(self.X_dict[dataset_key1][batch_ids_dict[dataset_key1][idx1]]) 264 | y1.append(self.y_dict[dataset_key1][batch_ids_dict[dataset_key1][idx1]]) 265 | dataset1.append(dataset_key1) 266 | X2.append(self.X_dict[dataset_key2][batch_ids_dict[dataset_key2][idx2]]) 267 | y2.append(self.y_dict[dataset_key2][batch_ids_dict[dataset_key2][idx2]]) 268 | dataset2.append(dataset_key2) 269 | X, y = self.__mixup(X1, y1, dataset1, X2, y2, dataset2) 270 | return X, y 271 | 272 | 273 | def __mixup(self, X1, y1, dataset1, X2, y2, dataset2): 274 | """ Mixuping data of two sides """ 275 | l = np.random.beta(self.mixup_alpha, self.mixup_alpha, len(X1)) 276 | if (self.X_dim == 3): 277 | X_l = l.reshape(len(X1), 1, 1, 1) 278 | elif (self.X_dim == 2): 279 | X_l = l.reshape(len(X1), 1, 1) 280 | elif (self.X_dim == 1): 281 | X_l = l.reshape(len(X1), 1) 282 | else: 283 | print("X_dim seems very large") 284 | y_l = l.reshape(len(X1), 1) 285 | X = X1 * X_l + X2 * (1 - X_l) 286 | y = y1 * y_l + y2 * (1 - y_l) 287 | 288 | return X, y 289 | 290 | def normalize_along_axis(data: np.ndarray, axis:int = -2, method:str = "robust") -> np.ndarray: 291 | """Normalize the data along a given axis 292 | 293 | Args: 294 | data (np.ndarray): dataframe to be normalized 295 | axis (int, optional): dimension to be normalized along. Defaults to -2. 296 | method (str, optional): current support "standard" (minus mean and std) 297 | or "robust" (minus median and divided 5-95 quantile range). Defaults to "robust". 298 | 299 | Returns: 300 | np.ndarray: normalized dataframe 301 | """ 302 | if (method == "standard"): 303 | return (data - np.mean(data, axis = axis, keepdims=True)) / (np.std(data, axis = axis, keepdims=True) + 1e-9) 304 | elif (method == "robust"): 305 | q_small, q_center, q_large = np.nanpercentile(data, q = [5,50,95], axis = axis, keepdims=True) 306 | r = q_large - q_small + 1e-9 307 | data_scale = (data - q_center) / r 308 | return np.clip(data_scale, a_min = -2, a_max = 2) 309 | 310 | def data_loader_np(ds_keys_dict: dict, flag_normalize:bool = True, flag_more_feat_types:bool = False, verbose:bool = True) -> Dict[str, Dict[str, DataRepo_np]]: 311 | """Prep a dictionary of DataRepo_np for deep learning purpose 312 | 313 | Args: 314 | ds_keys_dict (dictionary): a dictionary of pairs 315 | flag_normalize (bool, optional): whether to use normalized features. Defaults to True. 316 | flag_more_feat_types (bool, optional): whether load all sensor types. 317 | Should be False for maximum compatibility. Defaults to False. 318 | verbose (bool, optional): Whether to display the progress bar and intermediate reuslts. Defaults to True 319 | 320 | Raises: 321 | ValueError: Incompatible input shape 322 | 323 | Returns: 324 | Dict[str, Dict[str, DataRepo_np]]: a dictionary of dictionary of DataRepo_np, 325 | with the first level as prediction target, and the second level as ds_key 326 | """ 327 | 328 | if (not flag_normalize): 329 | data_repo_np_dict = {} 330 | for pred_target, ds_keys in tqdm(ds_keys_dict.items(), position=0, desc= "prediction targets", disable=not verbose): 331 | for ds_key in tqdm(ds_keys, position=1, desc= "dataset keys", leave=False, disable= not verbose): 332 | institution, phase = ds_key.split("_") 333 | phase = int(phase) 334 | if pred_target not in data_repo_np_dict: 335 | data_repo_np_dict[pred_target] = {} 336 | if flag_more_feat_types: 337 | dataset_file_np_path = os.path.join(path_definitions.DATA_PATH, "np_max_feature_types", f"{pred_target}--{ds_key}--np.pkl") 338 | else: 339 | dataset_file_np_path = os.path.join(path_definitions.DATA_PATH, "np", f"{pred_target}--{ds_key}--np.pkl") 340 | if (os.path.exists(dataset_file_np_path)): 341 | if (verbose): 342 | tqdm.write(pred_target + " " + ds_key + " read np " + datetime.now().strftime("%d/%m/%Y %H:%M:%S")) 343 | with open(dataset_file_np_path, "rb") as f: 344 | data_repo_np_dict[pred_target][ds_key] = pickle.load(f) 345 | else: 346 | dataset = data_loader_ml.data_loader_single(pred_target, institution, phase, 347 | flag_more_feat_types=flag_more_feat_types) 348 | feat_prep = dl_feat_preparation(flag_use_features="both", 349 | flag_feature_selection=None, 350 | flag_more_feat_types=flag_more_feat_types, 351 | verbose=1 if verbose else 0) 352 | if (verbose): 353 | tqdm.write(pred_target + " " + ds_key + " compute np " + datetime.now().strftime("%d/%m/%Y %H:%M:%S")) 354 | data_repo_np = DataRepo_np(feat_prep.prep_data_repo(dataset), 355 | cols = feat_prep.feature_list) 356 | Path(os.path.split(dataset_file_np_path)[0]).mkdir(parents=True, exist_ok=True) 357 | with open(dataset_file_np_path, "wb") as f: 358 | pickle.dump(data_repo_np, f) 359 | data_repo_np_dict[pred_target][ds_key] = deepcopy(data_repo_np) 360 | return data_repo_np_dict 361 | else: 362 | data_repo_np_norm_dict = {} 363 | for pred_target, ds_keys in tqdm(ds_keys_dict.items(), position=0, desc= "prediction targets", disable=not verbose): 364 | for ds_key in tqdm(ds_keys, position=1, desc= "dataset keys", leave=False, disable=not verbose): 365 | institution, phase = ds_key.split("_") 366 | phase = int(phase) 367 | if pred_target not in data_repo_np_norm_dict: 368 | data_repo_np_norm_dict[pred_target] = {} 369 | if flag_more_feat_types: 370 | dataset_file_np_path = os.path.join(path_definitions.DATA_PATH, "np_norm_max_feature_types", f"{pred_target}--{ds_key}--np.pkl") 371 | dataset_file_np_path_nonorm = os.path.join(path_definitions.DATA_PATH, "np_max_feature_types", f"{pred_target}--{ds_key}--np.pkl") 372 | else: 373 | dataset_file_np_path = os.path.join(path_definitions.DATA_PATH, "np_norm", f"{pred_target}--{ds_key}--np_norm.pkl") 374 | dataset_file_np_path_nonorm = os.path.join(path_definitions.DATA_PATH, "np", f"{pred_target}--{ds_key}--np.pkl") 375 | if (os.path.exists(dataset_file_np_path)): 376 | if (verbose): 377 | tqdm.write(pred_target + " " + ds_key + " read np norm " + datetime.now().strftime("%d/%m/%Y %H:%M:%S")) 378 | with open(dataset_file_np_path, "rb") as f: 379 | data_repo_np_norm_dict[pred_target][ds_key] = pickle.load(f) 380 | else: 381 | dataset = data_loader_ml.data_loader_single(pred_target, institution, phase, 382 | flag_more_feat_types=flag_more_feat_types) 383 | feat_prep = dl_feat_preparation(flag_use_features="both", 384 | flag_feature_selection=None, 385 | flag_more_feat_types=flag_more_feat_types, 386 | verbose=1 if verbose else 0) 387 | if (verbose): 388 | tqdm.write(pred_target + " " + ds_key + " compute np norm " + datetime.now().strftime("%d/%m/%Y %H:%M:%S")) 389 | 390 | data_repo_np = DataRepo_np(feat_prep.prep_data_repo(dataset), 391 | cols = feat_prep.feature_list) 392 | Path(os.path.split(dataset_file_np_path_nonorm)[0]).mkdir(parents=True, exist_ok=True) 393 | with open(dataset_file_np_path_nonorm, "wb") as f: 394 | pickle.dump(data_repo_np, f) 395 | # Ignore the norm features as they are already normalized on each individual's behavior 396 | feature_idx_tobenormed = [idx for idx,f in enumerate(feat_prep.feature_list) if "_norm:" not in f] 397 | data_repo_np_norm_dict[pred_target][ds_key] = deepcopy(data_repo_np) 398 | X_shape = data_repo_np_norm_dict[pred_target][ds_key].X.shape 399 | if (len(X_shape) == 3): 400 | data_repo_np_norm_dict[pred_target][ds_key].X[:,:,feature_idx_tobenormed] = \ 401 | normalize_along_axis(data_repo_np_norm_dict[pred_target][ds_key].X[:,:,feature_idx_tobenormed], axis = -2, method = "robust") 402 | elif (len(X_shape) == 2): 403 | data_repo_np_norm_dict[pred_target][ds_key].X[:,feature_idx_tobenormed] = \ 404 | normalize_along_axis(data_repo_np_norm_dict[pred_target][ds_key].X[:,feature_idx_tobenormed], axis = -2, method = "robust") 405 | else: 406 | raise ValueError(f"X's shape is {X_shape}") 407 | Path(os.path.split(dataset_file_np_path)[0]).mkdir(parents=True, exist_ok=True) 408 | with open(dataset_file_np_path, "wb") as f: 409 | pickle.dump(data_repo_np_norm_dict[pred_target][ds_key], f) 410 | return data_repo_np_norm_dict 411 | 412 | def prep_repo_np_dict_feature_prep(data_repo_np_dict:Dict[str, Dict[str, DataRepo_np]], 413 | ndim:int = 2, selected_feature_idx:List[int] = None) -> Dict[str, Dict[str, DataRepo_np]]: 414 | """ Take features and process dimensions when necessary """ 415 | for pred_target in data_repo_np_dict: 416 | ds_keys = list(data_repo_np_dict[pred_target].keys()) 417 | new_feature_idx = list(np.arange(data_repo_np_dict[pred_target][ds_keys[0]].X.shape[-1])) 418 | if (selected_feature_idx): 419 | new_feature_idx = list(selected_feature_idx) 420 | 421 | for ds_key in ds_keys: 422 | data_repo_np_dict[pred_target][ds_key].X = data_repo_np_dict[pred_target][ds_key].X[:,:,new_feature_idx] 423 | 424 | if (ndim == 1): # aggregate across days 425 | for k, v in data_repo_np_dict[pred_target].items(): 426 | data_repo_np_dict[pred_target][k].X = np.concatenate([np.mean(v.X, axis = 1), np.std(v.X, axis = 1)], axis=-1) 427 | elif (ndim == 3): # simply define the num of channels to be 1 428 | for k, v in data_repo_np_dict[pred_target].items(): 429 | data_repo_np_dict[pred_target][k].X = np.expand_dims(v.X, axis = -1) 430 | else: # do nothing 431 | pass 432 | 433 | return data_repo_np_dict 434 | 435 | def data_loader_dl_placeholder(pred_targets: List[str], ds_keys_target: List[str], verbose:bool = True): 436 | """ Load the data placeholder when doing a training. 437 | This can accelerate the process as dl model will load np instead """ 438 | datadict_filepath = os.path.join(path_definitions.DATA_PATH, "dataset_dict_dl_placeholder.pkl") 439 | 440 | def generate_dl_placeholder(): 441 | ds_keys = global_config["all"]["ds_keys"] 442 | 443 | dataset_dict = data_loader_ml.data_loader({pt: ds_keys for pt in global_config["all"]["prediction_tasks"]}, verbose=verbose) 444 | for pt, dsd_ds in dataset_dict.items(): 445 | for ds, dsd in dsd_ds.items(): 446 | dataset_dict[pt][ds].datapoints = dataset_dict[pt][ds].datapoints.iloc[:2] 447 | with open(datadict_filepath, "wb") as f: 448 | pickle.dump(dataset_dict, f) 449 | return dataset_dict 450 | 451 | if (os.path.exists(datadict_filepath)): 452 | try: 453 | with open(datadict_filepath, "rb") as f: 454 | dataset_dict = pickle.load(f) 455 | assert set(pred_targets).issubset(set(dataset_dict.keys())) 456 | for pred_target in pred_targets: 457 | assert set(ds_keys_target).issubset(set(dataset_dict[pred_target].keys())) 458 | except: 459 | dataset_dict = generate_dl_placeholder() 460 | else: 461 | dataset_dict = generate_dl_placeholder() 462 | return dataset_dict 463 | 464 | 465 | class dl_feat_preparation(): 466 | """A class to help feature perparation for deep learning models """ 467 | 468 | def __init__(self, config_name = "dl_feat_prep", flag_more_feat_types = False, verbose = 0, **kwargs): 469 | super().__init__() 470 | 471 | with open(os.path.join(path_definitions.CONFIG_PATH, f"{config_name}.yaml"), "r") as f: 472 | self.config = yaml.safe_load(f) 473 | 474 | all_feats = [] 475 | if (flag_more_feat_types): 476 | feature_type_list = ['f_loc', 'f_screen', 'f_slp', 'f_steps', "f_blue", "f_call"] 477 | else: 478 | feature_type_list = ['f_loc', 'f_screen', 'f_slp', 'f_steps'] 479 | for epoch in epochs_5: 480 | all_feats += [f for ft in feature_type_list for f in fc_repo.feature_columns_selected_epoches_types[epoch][ft]] 481 | 482 | self.feature_list_nonorm = deepcopy(all_feats) 483 | self.feature_list_norm = [] 484 | for f in all_feats: 485 | ft, fn, seg = f.split(":") 486 | new_f = f"{ft}:{fn}_norm:{seg}" 487 | self.feature_list_norm.append(new_f) 488 | self.feature_list = self.feature_list_nonorm + self.feature_list_norm 489 | 490 | if (flag_more_feat_types): 491 | self.selected_feature_list = self.config["feature_definition"]["feature_list_more_feat_types"] 492 | else: 493 | self.selected_feature_list = self.config["feature_definition"]["feature_list"] 494 | self.selected_feature_idx = [self.feature_list.index(f) for f in self.selected_feature_list] 495 | 496 | self.NAFILL = 0 497 | self.verbose = verbose 498 | 499 | def prep_data_repo_aggregate(self, dataset:DatasetDict, flag_train:bool = True, calc_method="last") -> DataRepo: 500 | """Basic feature calculation to obtain either calculate median or get the last day's feature value""" 501 | 502 | assert calc_method in ["last", "stats"] 503 | 504 | df_datapoints = deepcopy(dataset.datapoints) 505 | 506 | if (calc_method == "last"): 507 | def get_last(df): 508 | return pd.Series(data = df[self.feature_list].iloc[-1].values, index = self.feature_list).T 509 | X_tmp = df_datapoints["X_raw"].apply(lambda x : get_last(x)) 510 | else: 511 | @ray.remote 512 | def get_stats(df): 513 | median_tmp = pd.Series(data = df[self.feature_list].iloc[-14:].median().values, index = [f + "#median" for f in self.feature_list]).T 514 | return pd.concat([median_tmp]) 515 | 516 | X_tmp = ray.get([get_stats.remote(df) for df in df_datapoints["X_raw"]]) 517 | X_tmp = pd.DataFrame(X_tmp) 518 | X_tmp.index = df_datapoints.index 519 | 520 | # filter 521 | shape1 = X_tmp.shape 522 | X_tmp = X_tmp[X_tmp.isna().sum(axis = 1) < X_tmp.shape[1] / 2] # filter very empty days 523 | shape2 = X_tmp.shape 524 | del_rows = shape1[0] - shape2[0] 525 | 526 | X = deepcopy(X_tmp) 527 | 528 | if (self.verbose > 0): 529 | print(f"filter {del_rows} rows") 530 | print(f"NA rate: {100* X.isna().sum().sum() / X.shape[0] / X.shape[1]}%" ) 531 | X = X.fillna(X.median()) 532 | X = X.fillna(0) # for those completely empty features (e.g., one dataset does not have the feature) 533 | 534 | y = df_datapoints["y_raw"].loc[X.index] 535 | pids = df_datapoints["pid"].loc[X.index] 536 | 537 | self.data_repo = DataRepo(X=X, y=y, pids=pids) 538 | return self.data_repo 539 | 540 | def prep_data_repo(self, dataset:DatasetDict, flag_train:bool = True) -> DataRepo: 541 | """Basic feature calculation to obtain median""" 542 | 543 | df_datapoints = deepcopy(dataset.datapoints) 544 | 545 | df_datapoints_X = df_datapoints["X_raw"].apply(lambda df : df[self.feature_list].iloc[-28:]) 546 | 547 | @globalize 548 | def impute(df): 549 | return df.fillna(df.median(axis = 0),axis=0).fillna(self.NAFILL).values 550 | 551 | with Pool(NJOB) as pool: 552 | results = list(tqdm(pool.imap(impute, df_datapoints_X.values), 553 | total = len(df_datapoints_X), position = 2, leave=False, desc = "Feature processing", disable=int(self.verbose)==0)) 554 | 555 | df_results = [pd.DataFrame(r, index= df_datapoints_X.iloc[0].index, columns= df_datapoints_X.iloc[0].columns) for r in results] 556 | 557 | X = pd.Series(df_results, index=df_datapoints_X.index) 558 | 559 | y = df_datapoints["y_raw"].loc[X.index] 560 | pids = df_datapoints["pid"].loc[X.index] 561 | 562 | self.data_repo = DataRepo(X=X, y=y, pids=pids) 563 | return self.data_repo -------------------------------------------------------------------------------- /data/additional_user_setup/overlapping_pids.json: -------------------------------------------------------------------------------- 1 | { 2 | "dep_endterm": 3 | { 4 | "INS-W_1": 5 | { 6 | "INS-W_1": 7 | [ 8 | "INS-W_015#INS-W_1", 9 | "INS-W_042#INS-W_1", 10 | "INS-W_046#INS-W_1", 11 | "INS-W_048#INS-W_1", 12 | "INS-W_055#INS-W_1", 13 | "INS-W_058#INS-W_1", 14 | "INS-W_076#INS-W_1", 15 | "INS-W_082#INS-W_1", 16 | "INS-W_094#INS-W_1", 17 | "INS-W_096#INS-W_1", 18 | "INS-W_098#INS-W_1", 19 | "INS-W_099#INS-W_1", 20 | "INS-W_108#INS-W_1", 21 | "INS-W_117#INS-W_1", 22 | "INS-W_125#INS-W_1", 23 | "INS-W_136#INS-W_1", 24 | "INS-W_149#INS-W_1", 25 | "INS-W_153#INS-W_1", 26 | "INS-W_156#INS-W_1", 27 | "INS-W_169#INS-W_1", 28 | "INS-W_176#INS-W_1", 29 | "INS-W_191#INS-W_1" 30 | ], 31 | "INS-W_2": 32 | [ 33 | "INS-W_310#INS-W_2", 34 | "INS-W_316#INS-W_2", 35 | "INS-W_326#INS-W_2", 36 | "INS-W_330#INS-W_2", 37 | "INS-W_333#INS-W_2", 38 | "INS-W_341#INS-W_2", 39 | "INS-W_343#INS-W_2", 40 | "INS-W_345#INS-W_2", 41 | "INS-W_367#INS-W_2", 42 | "INS-W_375#INS-W_2", 43 | "INS-W_408#INS-W_2", 44 | "INS-W_447#INS-W_2", 45 | "INS-W_449#INS-W_2", 46 | "INS-W_450#INS-W_2", 47 | "INS-W_452#INS-W_2", 48 | "INS-W_453#INS-W_2", 49 | "INS-W_474#INS-W_2", 50 | "INS-W_476#INS-W_2", 51 | "INS-W_483#INS-W_2", 52 | "INS-W_484#INS-W_2", 53 | "INS-W_492#INS-W_2", 54 | "INS-W_529#INS-W_2" 55 | ], 56 | "INS-W_3": 57 | [ 58 | "INS-W_603#INS-W_3", 59 | "INS-W_614#INS-W_3", 60 | "INS-W_615#INS-W_3", 61 | "INS-W_617#INS-W_3", 62 | "INS-W_622#INS-W_3", 63 | "INS-W_636#INS-W_3", 64 | "INS-W_637#INS-W_3", 65 | "INS-W_645#INS-W_3", 66 | "INS-W_647#INS-W_3", 67 | "INS-W_659#INS-W_3", 68 | "INS-W_661#INS-W_3", 69 | "INS-W_666#INS-W_3", 70 | "INS-W_669#INS-W_3", 71 | "INS-W_678#INS-W_3", 72 | "INS-W_680#INS-W_3", 73 | "INS-W_685#INS-W_3", 74 | "INS-W_688#INS-W_3", 75 | "INS-W_722#INS-W_3" 76 | ], 77 | "INS-W_4": 78 | [ 79 | "INS-W_1003#INS-W_4", 80 | "INS-W_1004#INS-W_4", 81 | "INS-W_1010#INS-W_4", 82 | "INS-W_1013#INS-W_4", 83 | "INS-W_1018#INS-W_4", 84 | "INS-W_1039#INS-W_4", 85 | "INS-W_1206#INS-W_4", 86 | "INS-W_903#INS-W_4", 87 | "INS-W_911#INS-W_4", 88 | "INS-W_915#INS-W_4", 89 | "INS-W_918#INS-W_4", 90 | "INS-W_925#INS-W_4", 91 | "INS-W_933#INS-W_4", 92 | "INS-W_935#INS-W_4", 93 | "INS-W_942#INS-W_4", 94 | "INS-W_953#INS-W_4", 95 | "INS-W_966#INS-W_4", 96 | "INS-W_969#INS-W_4", 97 | "INS-W_971#INS-W_4", 98 | "INS-W_985#INS-W_4", 99 | "INS-W_987#INS-W_4", 100 | "INS-W_996#INS-W_4" 101 | ] 102 | }, 103 | "INS-W_2": 104 | { 105 | "INS-W_1": 106 | [ 107 | "INS-W_015#INS-W_1", 108 | "INS-W_042#INS-W_1", 109 | "INS-W_046#INS-W_1", 110 | "INS-W_048#INS-W_1", 111 | "INS-W_055#INS-W_1", 112 | "INS-W_058#INS-W_1", 113 | "INS-W_076#INS-W_1", 114 | "INS-W_082#INS-W_1", 115 | "INS-W_094#INS-W_1", 116 | "INS-W_096#INS-W_1", 117 | "INS-W_098#INS-W_1", 118 | "INS-W_099#INS-W_1", 119 | "INS-W_108#INS-W_1", 120 | "INS-W_117#INS-W_1", 121 | "INS-W_125#INS-W_1", 122 | "INS-W_136#INS-W_1", 123 | "INS-W_149#INS-W_1", 124 | "INS-W_153#INS-W_1", 125 | "INS-W_156#INS-W_1", 126 | "INS-W_169#INS-W_1", 127 | "INS-W_176#INS-W_1", 128 | "INS-W_191#INS-W_1" 129 | ], 130 | "INS-W_2": 131 | [ 132 | "INS-W_310#INS-W_2", 133 | "INS-W_316#INS-W_2", 134 | "INS-W_326#INS-W_2", 135 | "INS-W_330#INS-W_2", 136 | "INS-W_333#INS-W_2", 137 | "INS-W_341#INS-W_2", 138 | "INS-W_343#INS-W_2", 139 | "INS-W_345#INS-W_2", 140 | "INS-W_367#INS-W_2", 141 | "INS-W_375#INS-W_2", 142 | "INS-W_408#INS-W_2", 143 | "INS-W_447#INS-W_2", 144 | "INS-W_449#INS-W_2", 145 | "INS-W_450#INS-W_2", 146 | "INS-W_452#INS-W_2", 147 | "INS-W_453#INS-W_2", 148 | "INS-W_474#INS-W_2", 149 | "INS-W_476#INS-W_2", 150 | "INS-W_483#INS-W_2", 151 | "INS-W_484#INS-W_2", 152 | "INS-W_492#INS-W_2", 153 | "INS-W_529#INS-W_2" 154 | ], 155 | "INS-W_3": 156 | [ 157 | "INS-W_603#INS-W_3", 158 | "INS-W_614#INS-W_3", 159 | "INS-W_615#INS-W_3", 160 | "INS-W_617#INS-W_3", 161 | "INS-W_622#INS-W_3", 162 | "INS-W_636#INS-W_3", 163 | "INS-W_637#INS-W_3", 164 | "INS-W_645#INS-W_3", 165 | "INS-W_647#INS-W_3", 166 | "INS-W_659#INS-W_3", 167 | "INS-W_661#INS-W_3", 168 | "INS-W_666#INS-W_3", 169 | "INS-W_669#INS-W_3", 170 | "INS-W_678#INS-W_3", 171 | "INS-W_680#INS-W_3", 172 | "INS-W_685#INS-W_3", 173 | "INS-W_688#INS-W_3", 174 | "INS-W_722#INS-W_3" 175 | ], 176 | "INS-W_4": 177 | [ 178 | "INS-W_1003#INS-W_4", 179 | "INS-W_1004#INS-W_4", 180 | "INS-W_1010#INS-W_4", 181 | "INS-W_1013#INS-W_4", 182 | "INS-W_1018#INS-W_4", 183 | "INS-W_1039#INS-W_4", 184 | "INS-W_1206#INS-W_4", 185 | "INS-W_903#INS-W_4", 186 | "INS-W_911#INS-W_4", 187 | "INS-W_915#INS-W_4", 188 | "INS-W_918#INS-W_4", 189 | "INS-W_925#INS-W_4", 190 | "INS-W_933#INS-W_4", 191 | "INS-W_935#INS-W_4", 192 | "INS-W_942#INS-W_4", 193 | "INS-W_953#INS-W_4", 194 | "INS-W_966#INS-W_4", 195 | "INS-W_969#INS-W_4", 196 | "INS-W_971#INS-W_4", 197 | "INS-W_985#INS-W_4", 198 | "INS-W_987#INS-W_4", 199 | "INS-W_996#INS-W_4" 200 | ] 201 | }, 202 | "INS-W_3": 203 | { 204 | "INS-W_1": 205 | [ 206 | "INS-W_002#INS-W_1", 207 | "INS-W_015#INS-W_1", 208 | "INS-W_020#INS-W_1", 209 | "INS-W_023#INS-W_1", 210 | "INS-W_042#INS-W_1", 211 | "INS-W_046#INS-W_1", 212 | "INS-W_048#INS-W_1", 213 | "INS-W_055#INS-W_1", 214 | "INS-W_058#INS-W_1", 215 | "INS-W_076#INS-W_1", 216 | "INS-W_082#INS-W_1", 217 | "INS-W_084#INS-W_1", 218 | "INS-W_095#INS-W_1", 219 | "INS-W_096#INS-W_1", 220 | "INS-W_098#INS-W_1", 221 | "INS-W_108#INS-W_1", 222 | "INS-W_109#INS-W_1", 223 | "INS-W_110#INS-W_1", 224 | "INS-W_117#INS-W_1", 225 | "INS-W_125#INS-W_1", 226 | "INS-W_149#INS-W_1", 227 | "INS-W_153#INS-W_1", 228 | "INS-W_156#INS-W_1", 229 | "INS-W_158#INS-W_1", 230 | "INS-W_169#INS-W_1", 231 | "INS-W_176#INS-W_1", 232 | "INS-W_193#INS-W_1", 233 | "INS-W_196#INS-W_1", 234 | "INS-W_198#INS-W_1", 235 | "INS-W_209#INS-W_1" 236 | ], 237 | "INS-W_2": 238 | [ 239 | "INS-W_307#INS-W_2", 240 | "INS-W_310#INS-W_2", 241 | "INS-W_316#INS-W_2", 242 | "INS-W_317#INS-W_2", 243 | "INS-W_326#INS-W_2", 244 | "INS-W_327#INS-W_2", 245 | "INS-W_328#INS-W_2", 246 | "INS-W_330#INS-W_2", 247 | "INS-W_331#INS-W_2", 248 | "INS-W_341#INS-W_2", 249 | "INS-W_342#INS-W_2", 250 | "INS-W_343#INS-W_2", 251 | "INS-W_345#INS-W_2", 252 | "INS-W_347#INS-W_2", 253 | "INS-W_348#INS-W_2", 254 | "INS-W_351#INS-W_2", 255 | "INS-W_354#INS-W_2", 256 | "INS-W_355#INS-W_2", 257 | "INS-W_356#INS-W_2", 258 | "INS-W_357#INS-W_2", 259 | "INS-W_359#INS-W_2", 260 | "INS-W_365#INS-W_2", 261 | "INS-W_369#INS-W_2", 262 | "INS-W_370#INS-W_2", 263 | "INS-W_373#INS-W_2", 264 | "INS-W_374#INS-W_2", 265 | "INS-W_375#INS-W_2", 266 | "INS-W_376#INS-W_2", 267 | "INS-W_380#INS-W_2", 268 | "INS-W_383#INS-W_2", 269 | "INS-W_388#INS-W_2", 270 | "INS-W_391#INS-W_2", 271 | "INS-W_395#INS-W_2", 272 | "INS-W_398#INS-W_2", 273 | "INS-W_403#INS-W_2", 274 | "INS-W_405#INS-W_2", 275 | "INS-W_406#INS-W_2", 276 | "INS-W_416#INS-W_2", 277 | "INS-W_421#INS-W_2", 278 | "INS-W_430#INS-W_2", 279 | "INS-W_436#INS-W_2", 280 | "INS-W_439#INS-W_2", 281 | "INS-W_440#INS-W_2", 282 | "INS-W_445#INS-W_2", 283 | "INS-W_446#INS-W_2", 284 | "INS-W_447#INS-W_2", 285 | "INS-W_449#INS-W_2", 286 | "INS-W_452#INS-W_2", 287 | "INS-W_453#INS-W_2", 288 | "INS-W_454#INS-W_2", 289 | "INS-W_466#INS-W_2", 290 | "INS-W_467#INS-W_2", 291 | "INS-W_474#INS-W_2", 292 | "INS-W_475#INS-W_2", 293 | "INS-W_476#INS-W_2", 294 | "INS-W_478#INS-W_2", 295 | "INS-W_480#INS-W_2", 296 | "INS-W_481#INS-W_2", 297 | "INS-W_483#INS-W_2", 298 | "INS-W_484#INS-W_2", 299 | "INS-W_486#INS-W_2", 300 | "INS-W_492#INS-W_2", 301 | "INS-W_501#INS-W_2", 302 | "INS-W_506#INS-W_2", 303 | "INS-W_507#INS-W_2", 304 | "INS-W_511#INS-W_2", 305 | "INS-W_513#INS-W_2", 306 | "INS-W_524#INS-W_2", 307 | "INS-W_529#INS-W_2", 308 | "INS-W_530#INS-W_2", 309 | "INS-W_540#INS-W_2", 310 | "INS-W_559#INS-W_2", 311 | "INS-W_564#INS-W_2", 312 | "INS-W_566#INS-W_2" 313 | ], 314 | "INS-W_3": 315 | [ 316 | "INS-W_601#INS-W_3", 317 | "INS-W_602#INS-W_3", 318 | "INS-W_603#INS-W_3", 319 | "INS-W_604#INS-W_3", 320 | "INS-W_605#INS-W_3", 321 | "INS-W_606#INS-W_3", 322 | "INS-W_607#INS-W_3", 323 | "INS-W_608#INS-W_3", 324 | "INS-W_609#INS-W_3", 325 | "INS-W_610#INS-W_3", 326 | "INS-W_611#INS-W_3", 327 | "INS-W_612#INS-W_3", 328 | "INS-W_613#INS-W_3", 329 | "INS-W_614#INS-W_3", 330 | "INS-W_615#INS-W_3", 331 | "INS-W_616#INS-W_3", 332 | "INS-W_617#INS-W_3", 333 | "INS-W_618#INS-W_3", 334 | "INS-W_619#INS-W_3", 335 | "INS-W_620#INS-W_3", 336 | "INS-W_621#INS-W_3", 337 | "INS-W_622#INS-W_3", 338 | "INS-W_623#INS-W_3", 339 | "INS-W_624#INS-W_3", 340 | "INS-W_625#INS-W_3", 341 | "INS-W_626#INS-W_3", 342 | "INS-W_628#INS-W_3", 343 | "INS-W_632#INS-W_3", 344 | "INS-W_633#INS-W_3", 345 | "INS-W_636#INS-W_3", 346 | "INS-W_637#INS-W_3", 347 | "INS-W_641#INS-W_3", 348 | "INS-W_643#INS-W_3", 349 | "INS-W_644#INS-W_3", 350 | "INS-W_645#INS-W_3", 351 | "INS-W_646#INS-W_3", 352 | "INS-W_647#INS-W_3", 353 | "INS-W_649#INS-W_3", 354 | "INS-W_650#INS-W_3", 355 | "INS-W_651#INS-W_3", 356 | "INS-W_652#INS-W_3", 357 | "INS-W_653#INS-W_3", 358 | "INS-W_654#INS-W_3", 359 | "INS-W_655#INS-W_3", 360 | "INS-W_657#INS-W_3", 361 | "INS-W_658#INS-W_3", 362 | "INS-W_659#INS-W_3", 363 | "INS-W_660#INS-W_3", 364 | "INS-W_661#INS-W_3", 365 | "INS-W_663#INS-W_3", 366 | "INS-W_664#INS-W_3", 367 | "INS-W_665#INS-W_3", 368 | "INS-W_666#INS-W_3", 369 | "INS-W_669#INS-W_3", 370 | "INS-W_670#INS-W_3", 371 | "INS-W_672#INS-W_3", 372 | "INS-W_673#INS-W_3", 373 | "INS-W_674#INS-W_3", 374 | "INS-W_676#INS-W_3", 375 | "INS-W_677#INS-W_3", 376 | "INS-W_678#INS-W_3", 377 | "INS-W_680#INS-W_3", 378 | "INS-W_682#INS-W_3", 379 | "INS-W_683#INS-W_3", 380 | "INS-W_684#INS-W_3", 381 | "INS-W_685#INS-W_3", 382 | "INS-W_686#INS-W_3", 383 | "INS-W_687#INS-W_3", 384 | "INS-W_688#INS-W_3", 385 | "INS-W_691#INS-W_3", 386 | "INS-W_693#INS-W_3", 387 | "INS-W_694#INS-W_3", 388 | "INS-W_700#INS-W_3", 389 | "INS-W_706#INS-W_3", 390 | "INS-W_707#INS-W_3", 391 | "INS-W_708#INS-W_3", 392 | "INS-W_709#INS-W_3", 393 | "INS-W_710#INS-W_3", 394 | "INS-W_711#INS-W_3", 395 | "INS-W_714#INS-W_3", 396 | "INS-W_718#INS-W_3", 397 | "INS-W_719#INS-W_3", 398 | "INS-W_720#INS-W_3", 399 | "INS-W_721#INS-W_3", 400 | "INS-W_722#INS-W_3", 401 | "INS-W_723#INS-W_3", 402 | "INS-W_724#INS-W_3", 403 | "INS-W_725#INS-W_3", 404 | "INS-W_726#INS-W_3", 405 | "INS-W_727#INS-W_3", 406 | "INS-W_728#INS-W_3", 407 | "INS-W_729#INS-W_3", 408 | "INS-W_732#INS-W_3", 409 | "INS-W_734#INS-W_3", 410 | "INS-W_737#INS-W_3", 411 | "INS-W_738#INS-W_3", 412 | "INS-W_740#INS-W_3", 413 | "INS-W_742#INS-W_3", 414 | "INS-W_743#INS-W_3", 415 | "INS-W_747#INS-W_3", 416 | "INS-W_748#INS-W_3", 417 | "INS-W_750#INS-W_3", 418 | "INS-W_751#INS-W_3", 419 | "INS-W_752#INS-W_3", 420 | "INS-W_753#INS-W_3", 421 | "INS-W_757#INS-W_3" 422 | ], 423 | "INS-W_4": 424 | [ 425 | "INS-W_1002#INS-W_4", 426 | "INS-W_1004#INS-W_4", 427 | "INS-W_1005#INS-W_4", 428 | "INS-W_1006#INS-W_4", 429 | "INS-W_1008#INS-W_4", 430 | "INS-W_1009#INS-W_4", 431 | "INS-W_1010#INS-W_4", 432 | "INS-W_1011#INS-W_4", 433 | "INS-W_1012#INS-W_4", 434 | "INS-W_1013#INS-W_4", 435 | "INS-W_1014#INS-W_4", 436 | "INS-W_1015#INS-W_4", 437 | "INS-W_1016#INS-W_4", 438 | "INS-W_1017#INS-W_4", 439 | "INS-W_1018#INS-W_4", 440 | "INS-W_1019#INS-W_4", 441 | "INS-W_1024#INS-W_4", 442 | "INS-W_1028#INS-W_4", 443 | "INS-W_1037#INS-W_4", 444 | "INS-W_1038#INS-W_4", 445 | "INS-W_1040#INS-W_4", 446 | "INS-W_1044#INS-W_4", 447 | "INS-W_1080#INS-W_4", 448 | "INS-W_1200#INS-W_4", 449 | "INS-W_1201#INS-W_4", 450 | "INS-W_1204#INS-W_4", 451 | "INS-W_1206#INS-W_4", 452 | "INS-W_1210#INS-W_4", 453 | "INS-W_1212#INS-W_4", 454 | "INS-W_1213#INS-W_4", 455 | "INS-W_1216#INS-W_4", 456 | "INS-W_1220#INS-W_4", 457 | "INS-W_1221#INS-W_4", 458 | "INS-W_901#INS-W_4", 459 | "INS-W_903#INS-W_4", 460 | "INS-W_904#INS-W_4", 461 | "INS-W_905#INS-W_4", 462 | "INS-W_910#INS-W_4", 463 | "INS-W_911#INS-W_4", 464 | "INS-W_912#INS-W_4", 465 | "INS-W_913#INS-W_4", 466 | "INS-W_914#INS-W_4", 467 | "INS-W_916#INS-W_4", 468 | "INS-W_918#INS-W_4", 469 | "INS-W_922#INS-W_4", 470 | "INS-W_923#INS-W_4", 471 | "INS-W_925#INS-W_4", 472 | "INS-W_928#INS-W_4", 473 | "INS-W_929#INS-W_4", 474 | "INS-W_931#INS-W_4", 475 | "INS-W_932#INS-W_4", 476 | "INS-W_933#INS-W_4", 477 | "INS-W_934#INS-W_4", 478 | "INS-W_935#INS-W_4", 479 | "INS-W_936#INS-W_4", 480 | "INS-W_937#INS-W_4", 481 | "INS-W_938#INS-W_4", 482 | "INS-W_939#INS-W_4", 483 | "INS-W_940#INS-W_4", 484 | "INS-W_941#INS-W_4", 485 | "INS-W_942#INS-W_4", 486 | "INS-W_943#INS-W_4", 487 | "INS-W_944#INS-W_4", 488 | "INS-W_945#INS-W_4", 489 | "INS-W_946#INS-W_4", 490 | "INS-W_947#INS-W_4", 491 | "INS-W_948#INS-W_4", 492 | "INS-W_950#INS-W_4", 493 | "INS-W_951#INS-W_4", 494 | "INS-W_953#INS-W_4", 495 | "INS-W_955#INS-W_4", 496 | "INS-W_958#INS-W_4", 497 | "INS-W_961#INS-W_4", 498 | "INS-W_962#INS-W_4", 499 | "INS-W_963#INS-W_4", 500 | "INS-W_964#INS-W_4", 501 | "INS-W_965#INS-W_4", 502 | "INS-W_966#INS-W_4", 503 | "INS-W_968#INS-W_4", 504 | "INS-W_969#INS-W_4", 505 | "INS-W_970#INS-W_4", 506 | "INS-W_971#INS-W_4", 507 | "INS-W_972#INS-W_4", 508 | "INS-W_973#INS-W_4", 509 | "INS-W_975#INS-W_4", 510 | "INS-W_976#INS-W_4", 511 | "INS-W_977#INS-W_4", 512 | "INS-W_978#INS-W_4", 513 | "INS-W_979#INS-W_4", 514 | "INS-W_980#INS-W_4", 515 | "INS-W_981#INS-W_4", 516 | "INS-W_982#INS-W_4", 517 | "INS-W_983#INS-W_4", 518 | "INS-W_984#INS-W_4", 519 | "INS-W_985#INS-W_4", 520 | "INS-W_986#INS-W_4", 521 | "INS-W_987#INS-W_4", 522 | "INS-W_988#INS-W_4", 523 | "INS-W_989#INS-W_4", 524 | "INS-W_991#INS-W_4", 525 | "INS-W_993#INS-W_4", 526 | "INS-W_994#INS-W_4", 527 | "INS-W_995#INS-W_4", 528 | "INS-W_997#INS-W_4", 529 | "INS-W_998#INS-W_4", 530 | "INS-W_999#INS-W_4" 531 | ] 532 | }, 533 | "INS-W_4": 534 | { 535 | "INS-W_1": 536 | [ 537 | "INS-W_002#INS-W_1", 538 | "INS-W_009#INS-W_1", 539 | "INS-W_015#INS-W_1", 540 | "INS-W_020#INS-W_1", 541 | "INS-W_023#INS-W_1", 542 | "INS-W_042#INS-W_1", 543 | "INS-W_046#INS-W_1", 544 | "INS-W_048#INS-W_1", 545 | "INS-W_055#INS-W_1", 546 | "INS-W_058#INS-W_1", 547 | "INS-W_063#INS-W_1", 548 | "INS-W_068#INS-W_1", 549 | "INS-W_076#INS-W_1", 550 | "INS-W_082#INS-W_1", 551 | "INS-W_084#INS-W_1", 552 | "INS-W_087#INS-W_1", 553 | "INS-W_094#INS-W_1", 554 | "INS-W_095#INS-W_1", 555 | "INS-W_096#INS-W_1", 556 | "INS-W_098#INS-W_1", 557 | "INS-W_099#INS-W_1", 558 | "INS-W_108#INS-W_1", 559 | "INS-W_109#INS-W_1", 560 | "INS-W_110#INS-W_1", 561 | "INS-W_117#INS-W_1", 562 | "INS-W_125#INS-W_1", 563 | "INS-W_136#INS-W_1", 564 | "INS-W_149#INS-W_1", 565 | "INS-W_153#INS-W_1", 566 | "INS-W_156#INS-W_1", 567 | "INS-W_158#INS-W_1", 568 | "INS-W_169#INS-W_1", 569 | "INS-W_176#INS-W_1", 570 | "INS-W_191#INS-W_1", 571 | "INS-W_193#INS-W_1", 572 | "INS-W_196#INS-W_1", 573 | "INS-W_198#INS-W_1", 574 | "INS-W_209#INS-W_1" 575 | ], 576 | "INS-W_2": 577 | [ 578 | "INS-W_304#INS-W_2", 579 | "INS-W_305#INS-W_2", 580 | "INS-W_306#INS-W_2", 581 | "INS-W_307#INS-W_2", 582 | "INS-W_310#INS-W_2", 583 | "INS-W_311#INS-W_2", 584 | "INS-W_316#INS-W_2", 585 | "INS-W_317#INS-W_2", 586 | "INS-W_326#INS-W_2", 587 | "INS-W_327#INS-W_2", 588 | "INS-W_328#INS-W_2", 589 | "INS-W_330#INS-W_2", 590 | "INS-W_331#INS-W_2", 591 | "INS-W_333#INS-W_2", 592 | "INS-W_340#INS-W_2", 593 | "INS-W_341#INS-W_2", 594 | "INS-W_342#INS-W_2", 595 | "INS-W_343#INS-W_2", 596 | "INS-W_345#INS-W_2", 597 | "INS-W_347#INS-W_2", 598 | "INS-W_348#INS-W_2", 599 | "INS-W_351#INS-W_2", 600 | "INS-W_353#INS-W_2", 601 | "INS-W_354#INS-W_2", 602 | "INS-W_355#INS-W_2", 603 | "INS-W_356#INS-W_2", 604 | "INS-W_357#INS-W_2", 605 | "INS-W_359#INS-W_2", 606 | "INS-W_365#INS-W_2", 607 | "INS-W_367#INS-W_2", 608 | "INS-W_369#INS-W_2", 609 | "INS-W_370#INS-W_2", 610 | "INS-W_373#INS-W_2", 611 | "INS-W_374#INS-W_2", 612 | "INS-W_375#INS-W_2", 613 | "INS-W_376#INS-W_2", 614 | "INS-W_380#INS-W_2", 615 | "INS-W_383#INS-W_2", 616 | "INS-W_388#INS-W_2", 617 | "INS-W_391#INS-W_2", 618 | "INS-W_395#INS-W_2", 619 | "INS-W_398#INS-W_2", 620 | "INS-W_399#INS-W_2", 621 | "INS-W_403#INS-W_2", 622 | "INS-W_404#INS-W_2", 623 | "INS-W_405#INS-W_2", 624 | "INS-W_406#INS-W_2", 625 | "INS-W_408#INS-W_2", 626 | "INS-W_412#INS-W_2", 627 | "INS-W_413#INS-W_2", 628 | "INS-W_416#INS-W_2", 629 | "INS-W_418#INS-W_2", 630 | "INS-W_421#INS-W_2", 631 | "INS-W_430#INS-W_2", 632 | "INS-W_436#INS-W_2", 633 | "INS-W_439#INS-W_2", 634 | "INS-W_440#INS-W_2", 635 | "INS-W_445#INS-W_2", 636 | "INS-W_446#INS-W_2", 637 | "INS-W_447#INS-W_2", 638 | "INS-W_449#INS-W_2", 639 | "INS-W_450#INS-W_2", 640 | "INS-W_452#INS-W_2", 641 | "INS-W_453#INS-W_2", 642 | "INS-W_454#INS-W_2", 643 | "INS-W_466#INS-W_2", 644 | "INS-W_467#INS-W_2", 645 | "INS-W_473#INS-W_2", 646 | "INS-W_474#INS-W_2", 647 | "INS-W_475#INS-W_2", 648 | "INS-W_476#INS-W_2", 649 | "INS-W_478#INS-W_2", 650 | "INS-W_480#INS-W_2", 651 | "INS-W_481#INS-W_2", 652 | "INS-W_483#INS-W_2", 653 | "INS-W_484#INS-W_2", 654 | "INS-W_486#INS-W_2", 655 | "INS-W_488#INS-W_2", 656 | "INS-W_492#INS-W_2", 657 | "INS-W_494#INS-W_2", 658 | "INS-W_501#INS-W_2", 659 | "INS-W_502#INS-W_2", 660 | "INS-W_506#INS-W_2", 661 | "INS-W_507#INS-W_2", 662 | "INS-W_511#INS-W_2", 663 | "INS-W_513#INS-W_2", 664 | "INS-W_524#INS-W_2", 665 | "INS-W_529#INS-W_2", 666 | "INS-W_530#INS-W_2", 667 | "INS-W_540#INS-W_2", 668 | "INS-W_542#INS-W_2", 669 | "INS-W_543#INS-W_2", 670 | "INS-W_550#INS-W_2", 671 | "INS-W_559#INS-W_2", 672 | "INS-W_560#INS-W_2", 673 | "INS-W_561#INS-W_2", 674 | "INS-W_563#INS-W_2", 675 | "INS-W_564#INS-W_2", 676 | "INS-W_566#INS-W_2" 677 | ], 678 | "INS-W_3": 679 | [ 680 | "INS-W_601#INS-W_3", 681 | "INS-W_602#INS-W_3", 682 | "INS-W_603#INS-W_3", 683 | "INS-W_604#INS-W_3", 684 | "INS-W_605#INS-W_3", 685 | "INS-W_606#INS-W_3", 686 | "INS-W_607#INS-W_3", 687 | "INS-W_608#INS-W_3", 688 | "INS-W_609#INS-W_3", 689 | "INS-W_610#INS-W_3", 690 | "INS-W_611#INS-W_3", 691 | "INS-W_612#INS-W_3", 692 | "INS-W_613#INS-W_3", 693 | "INS-W_614#INS-W_3", 694 | "INS-W_615#INS-W_3", 695 | "INS-W_616#INS-W_3", 696 | "INS-W_617#INS-W_3", 697 | "INS-W_618#INS-W_3", 698 | "INS-W_619#INS-W_3", 699 | "INS-W_620#INS-W_3", 700 | "INS-W_621#INS-W_3", 701 | "INS-W_622#INS-W_3", 702 | "INS-W_623#INS-W_3", 703 | "INS-W_624#INS-W_3", 704 | "INS-W_625#INS-W_3", 705 | "INS-W_626#INS-W_3", 706 | "INS-W_628#INS-W_3", 707 | "INS-W_632#INS-W_3", 708 | "INS-W_633#INS-W_3", 709 | "INS-W_636#INS-W_3", 710 | "INS-W_637#INS-W_3", 711 | "INS-W_641#INS-W_3", 712 | "INS-W_643#INS-W_3", 713 | "INS-W_644#INS-W_3", 714 | "INS-W_645#INS-W_3", 715 | "INS-W_646#INS-W_3", 716 | "INS-W_647#INS-W_3", 717 | "INS-W_649#INS-W_3", 718 | "INS-W_650#INS-W_3", 719 | "INS-W_651#INS-W_3", 720 | "INS-W_652#INS-W_3", 721 | "INS-W_653#INS-W_3", 722 | "INS-W_654#INS-W_3", 723 | "INS-W_655#INS-W_3", 724 | "INS-W_657#INS-W_3", 725 | "INS-W_658#INS-W_3", 726 | "INS-W_659#INS-W_3", 727 | "INS-W_660#INS-W_3", 728 | "INS-W_661#INS-W_3", 729 | "INS-W_663#INS-W_3", 730 | "INS-W_664#INS-W_3", 731 | "INS-W_665#INS-W_3", 732 | "INS-W_666#INS-W_3", 733 | "INS-W_669#INS-W_3", 734 | "INS-W_670#INS-W_3", 735 | "INS-W_672#INS-W_3", 736 | "INS-W_673#INS-W_3", 737 | "INS-W_674#INS-W_3", 738 | "INS-W_676#INS-W_3", 739 | "INS-W_677#INS-W_3", 740 | "INS-W_678#INS-W_3", 741 | "INS-W_680#INS-W_3", 742 | "INS-W_682#INS-W_3", 743 | "INS-W_683#INS-W_3", 744 | "INS-W_684#INS-W_3", 745 | "INS-W_685#INS-W_3", 746 | "INS-W_686#INS-W_3", 747 | "INS-W_687#INS-W_3", 748 | "INS-W_688#INS-W_3", 749 | "INS-W_691#INS-W_3", 750 | "INS-W_693#INS-W_3", 751 | "INS-W_694#INS-W_3", 752 | "INS-W_700#INS-W_3", 753 | "INS-W_706#INS-W_3", 754 | "INS-W_707#INS-W_3", 755 | "INS-W_708#INS-W_3", 756 | "INS-W_709#INS-W_3", 757 | "INS-W_710#INS-W_3", 758 | "INS-W_711#INS-W_3", 759 | "INS-W_714#INS-W_3", 760 | "INS-W_718#INS-W_3", 761 | "INS-W_719#INS-W_3", 762 | "INS-W_720#INS-W_3", 763 | "INS-W_721#INS-W_3", 764 | "INS-W_722#INS-W_3", 765 | "INS-W_723#INS-W_3", 766 | "INS-W_724#INS-W_3", 767 | "INS-W_725#INS-W_3", 768 | "INS-W_726#INS-W_3", 769 | "INS-W_727#INS-W_3", 770 | "INS-W_728#INS-W_3", 771 | "INS-W_729#INS-W_3", 772 | "INS-W_732#INS-W_3", 773 | "INS-W_734#INS-W_3", 774 | "INS-W_737#INS-W_3", 775 | "INS-W_738#INS-W_3", 776 | "INS-W_740#INS-W_3", 777 | "INS-W_742#INS-W_3", 778 | "INS-W_743#INS-W_3", 779 | "INS-W_747#INS-W_3", 780 | "INS-W_748#INS-W_3", 781 | "INS-W_750#INS-W_3", 782 | "INS-W_751#INS-W_3", 783 | "INS-W_752#INS-W_3", 784 | "INS-W_753#INS-W_3", 785 | "INS-W_757#INS-W_3" 786 | ], 787 | "INS-W_4": 788 | [ 789 | "INS-W_1001#INS-W_4", 790 | "INS-W_1002#INS-W_4", 791 | "INS-W_1003#INS-W_4", 792 | "INS-W_1004#INS-W_4", 793 | "INS-W_1005#INS-W_4", 794 | "INS-W_1006#INS-W_4", 795 | "INS-W_1007#INS-W_4", 796 | "INS-W_1008#INS-W_4", 797 | "INS-W_1009#INS-W_4", 798 | "INS-W_1010#INS-W_4", 799 | "INS-W_1011#INS-W_4", 800 | "INS-W_1012#INS-W_4", 801 | "INS-W_1013#INS-W_4", 802 | "INS-W_1014#INS-W_4", 803 | "INS-W_1015#INS-W_4", 804 | "INS-W_1016#INS-W_4", 805 | "INS-W_1017#INS-W_4", 806 | "INS-W_1018#INS-W_4", 807 | "INS-W_1019#INS-W_4", 808 | "INS-W_1024#INS-W_4", 809 | "INS-W_1026#INS-W_4", 810 | "INS-W_1028#INS-W_4", 811 | "INS-W_1036#INS-W_4", 812 | "INS-W_1037#INS-W_4", 813 | "INS-W_1038#INS-W_4", 814 | "INS-W_1039#INS-W_4", 815 | "INS-W_1040#INS-W_4", 816 | "INS-W_1044#INS-W_4", 817 | "INS-W_1061#INS-W_4", 818 | "INS-W_1080#INS-W_4", 819 | "INS-W_1200#INS-W_4", 820 | "INS-W_1201#INS-W_4", 821 | "INS-W_1203#INS-W_4", 822 | "INS-W_1204#INS-W_4", 823 | "INS-W_1205#INS-W_4", 824 | "INS-W_1206#INS-W_4", 825 | "INS-W_1210#INS-W_4", 826 | "INS-W_1211#INS-W_4", 827 | "INS-W_1212#INS-W_4", 828 | "INS-W_1213#INS-W_4", 829 | "INS-W_1216#INS-W_4", 830 | "INS-W_1220#INS-W_4", 831 | "INS-W_1221#INS-W_4", 832 | "INS-W_900#INS-W_4", 833 | "INS-W_901#INS-W_4", 834 | "INS-W_902#INS-W_4", 835 | "INS-W_903#INS-W_4", 836 | "INS-W_904#INS-W_4", 837 | "INS-W_905#INS-W_4", 838 | "INS-W_906#INS-W_4", 839 | "INS-W_907#INS-W_4", 840 | "INS-W_908#INS-W_4", 841 | "INS-W_909#INS-W_4", 842 | "INS-W_910#INS-W_4", 843 | "INS-W_911#INS-W_4", 844 | "INS-W_912#INS-W_4", 845 | "INS-W_913#INS-W_4", 846 | "INS-W_914#INS-W_4", 847 | "INS-W_915#INS-W_4", 848 | "INS-W_916#INS-W_4", 849 | "INS-W_917#INS-W_4", 850 | "INS-W_918#INS-W_4", 851 | "INS-W_922#INS-W_4", 852 | "INS-W_923#INS-W_4", 853 | "INS-W_924#INS-W_4", 854 | "INS-W_925#INS-W_4", 855 | "INS-W_926#INS-W_4", 856 | "INS-W_927#INS-W_4", 857 | "INS-W_928#INS-W_4", 858 | "INS-W_929#INS-W_4", 859 | "INS-W_931#INS-W_4", 860 | "INS-W_932#INS-W_4", 861 | "INS-W_933#INS-W_4", 862 | "INS-W_934#INS-W_4", 863 | "INS-W_935#INS-W_4", 864 | "INS-W_936#INS-W_4", 865 | "INS-W_937#INS-W_4", 866 | "INS-W_938#INS-W_4", 867 | "INS-W_939#INS-W_4", 868 | "INS-W_940#INS-W_4", 869 | "INS-W_941#INS-W_4", 870 | "INS-W_942#INS-W_4", 871 | "INS-W_943#INS-W_4", 872 | "INS-W_944#INS-W_4", 873 | "INS-W_945#INS-W_4", 874 | "INS-W_946#INS-W_4", 875 | "INS-W_947#INS-W_4", 876 | "INS-W_948#INS-W_4", 877 | "INS-W_950#INS-W_4", 878 | "INS-W_951#INS-W_4", 879 | "INS-W_953#INS-W_4", 880 | "INS-W_954#INS-W_4", 881 | "INS-W_955#INS-W_4", 882 | "INS-W_956#INS-W_4", 883 | "INS-W_957#INS-W_4", 884 | "INS-W_958#INS-W_4", 885 | "INS-W_959#INS-W_4", 886 | "INS-W_960#INS-W_4", 887 | "INS-W_961#INS-W_4", 888 | "INS-W_962#INS-W_4", 889 | "INS-W_963#INS-W_4", 890 | "INS-W_964#INS-W_4", 891 | "INS-W_965#INS-W_4", 892 | "INS-W_966#INS-W_4", 893 | "INS-W_968#INS-W_4", 894 | "INS-W_969#INS-W_4", 895 | "INS-W_970#INS-W_4", 896 | "INS-W_971#INS-W_4", 897 | "INS-W_972#INS-W_4", 898 | "INS-W_973#INS-W_4", 899 | "INS-W_974#INS-W_4", 900 | "INS-W_975#INS-W_4", 901 | "INS-W_976#INS-W_4", 902 | "INS-W_977#INS-W_4", 903 | "INS-W_978#INS-W_4", 904 | "INS-W_979#INS-W_4", 905 | "INS-W_980#INS-W_4", 906 | "INS-W_981#INS-W_4", 907 | "INS-W_982#INS-W_4", 908 | "INS-W_983#INS-W_4", 909 | "INS-W_984#INS-W_4", 910 | "INS-W_985#INS-W_4", 911 | "INS-W_986#INS-W_4", 912 | "INS-W_987#INS-W_4", 913 | "INS-W_988#INS-W_4", 914 | "INS-W_989#INS-W_4", 915 | "INS-W_991#INS-W_4", 916 | "INS-W_992#INS-W_4", 917 | "INS-W_993#INS-W_4", 918 | "INS-W_994#INS-W_4", 919 | "INS-W_995#INS-W_4", 920 | "INS-W_996#INS-W_4", 921 | "INS-W_997#INS-W_4", 922 | "INS-W_998#INS-W_4", 923 | "INS-W_999#INS-W_4" 924 | ] 925 | }, 926 | "INS-D_1": 927 | { 928 | "INS-D_1": 929 | [ 930 | "INS-D_0002#INS-D_1", 931 | "INS-D_0005#INS-D_1", 932 | "INS-D_0006#INS-D_1", 933 | "INS-D_0012#INS-D_1", 934 | "INS-D_0013#INS-D_1", 935 | "INS-D_0014#INS-D_1", 936 | "INS-D_0015#INS-D_1", 937 | "INS-D_0022#INS-D_1", 938 | "INS-D_0023#INS-D_1", 939 | "INS-D_0024#INS-D_1", 940 | "INS-D_0025#INS-D_1", 941 | "INS-D_0026#INS-D_1", 942 | "INS-D_0027#INS-D_1", 943 | "INS-D_0028#INS-D_1", 944 | "INS-D_0030#INS-D_1", 945 | "INS-D_0031#INS-D_1", 946 | "INS-D_0032#INS-D_1", 947 | "INS-D_0033#INS-D_1", 948 | "INS-D_0045#INS-D_1", 949 | "INS-D_0046#INS-D_1", 950 | "INS-D_0047#INS-D_1", 951 | "INS-D_0050#INS-D_1", 952 | "INS-D_0052#INS-D_1", 953 | "INS-D_0053#INS-D_1", 954 | "INS-D_0054#INS-D_1", 955 | "INS-D_0055#INS-D_1", 956 | "INS-D_0059#INS-D_1", 957 | "INS-D_0060#INS-D_1", 958 | "INS-D_0061#INS-D_1", 959 | "INS-D_0062#INS-D_1", 960 | "INS-D_0064#INS-D_1", 961 | "INS-D_0068#INS-D_1", 962 | "INS-D_0069#INS-D_1", 963 | "INS-D_0071#INS-D_1", 964 | "INS-D_0073#INS-D_1", 965 | "INS-D_0077#INS-D_1", 966 | "INS-D_0081#INS-D_1", 967 | "INS-D_0082#INS-D_1", 968 | "INS-D_0084#INS-D_1", 969 | "INS-D_0087#INS-D_1", 970 | "INS-D_0089#INS-D_1", 971 | "INS-D_0092#INS-D_1", 972 | "INS-D_0096#INS-D_1", 973 | "INS-D_0097#INS-D_1", 974 | "INS-D_0098#INS-D_1", 975 | "INS-D_0099#INS-D_1", 976 | "INS-D_0100#INS-D_1", 977 | "INS-D_0101#INS-D_1", 978 | "INS-D_0102#INS-D_1", 979 | "INS-D_0103#INS-D_1", 980 | "INS-D_0105#INS-D_1", 981 | "INS-D_0108#INS-D_1", 982 | "INS-D_0115#INS-D_1" 983 | ], 984 | "INS-D_2": 985 | [ 986 | "INS-D_0002#INS-D_2", 987 | "INS-D_0005#INS-D_2", 988 | "INS-D_0006#INS-D_2", 989 | "INS-D_0012#INS-D_2", 990 | "INS-D_0013#INS-D_2", 991 | "INS-D_0014#INS-D_2", 992 | "INS-D_0015#INS-D_2", 993 | "INS-D_0022#INS-D_2", 994 | "INS-D_0023#INS-D_2", 995 | "INS-D_0024#INS-D_2", 996 | "INS-D_0025#INS-D_2", 997 | "INS-D_0026#INS-D_2", 998 | "INS-D_0027#INS-D_2", 999 | "INS-D_0028#INS-D_2", 1000 | "INS-D_0030#INS-D_2", 1001 | "INS-D_0031#INS-D_2", 1002 | "INS-D_0032#INS-D_2", 1003 | "INS-D_0033#INS-D_2", 1004 | "INS-D_0045#INS-D_2", 1005 | "INS-D_0046#INS-D_2", 1006 | "INS-D_0047#INS-D_2", 1007 | "INS-D_0050#INS-D_2", 1008 | "INS-D_0052#INS-D_2", 1009 | "INS-D_0053#INS-D_2", 1010 | "INS-D_0054#INS-D_2", 1011 | "INS-D_0055#INS-D_2", 1012 | "INS-D_0059#INS-D_2", 1013 | "INS-D_0060#INS-D_2", 1014 | "INS-D_0061#INS-D_2", 1015 | "INS-D_0062#INS-D_2", 1016 | "INS-D_0064#INS-D_2", 1017 | "INS-D_0068#INS-D_2", 1018 | "INS-D_0069#INS-D_2", 1019 | "INS-D_0071#INS-D_2", 1020 | "INS-D_0073#INS-D_2", 1021 | "INS-D_0077#INS-D_2", 1022 | "INS-D_0081#INS-D_2", 1023 | "INS-D_0082#INS-D_2", 1024 | "INS-D_0084#INS-D_2", 1025 | "INS-D_0087#INS-D_2", 1026 | "INS-D_0089#INS-D_2", 1027 | "INS-D_0092#INS-D_2", 1028 | "INS-D_0096#INS-D_2", 1029 | "INS-D_0097#INS-D_2", 1030 | "INS-D_0098#INS-D_2", 1031 | "INS-D_0099#INS-D_2", 1032 | "INS-D_0100#INS-D_2", 1033 | "INS-D_0101#INS-D_2", 1034 | "INS-D_0102#INS-D_2", 1035 | "INS-D_0103#INS-D_2", 1036 | "INS-D_0105#INS-D_2", 1037 | "INS-D_0108#INS-D_2", 1038 | "INS-D_0115#INS-D_2" 1039 | ] 1040 | }, 1041 | "INS-D_2": 1042 | { 1043 | "INS-D_1": 1044 | [ 1045 | "INS-D_0002#INS-D_1", 1046 | "INS-D_0005#INS-D_1", 1047 | "INS-D_0006#INS-D_1", 1048 | "INS-D_0012#INS-D_1", 1049 | "INS-D_0013#INS-D_1", 1050 | "INS-D_0014#INS-D_1", 1051 | "INS-D_0015#INS-D_1", 1052 | "INS-D_0022#INS-D_1", 1053 | "INS-D_0023#INS-D_1", 1054 | "INS-D_0024#INS-D_1", 1055 | "INS-D_0025#INS-D_1", 1056 | "INS-D_0026#INS-D_1", 1057 | "INS-D_0027#INS-D_1", 1058 | "INS-D_0028#INS-D_1", 1059 | "INS-D_0030#INS-D_1", 1060 | "INS-D_0031#INS-D_1", 1061 | "INS-D_0032#INS-D_1", 1062 | "INS-D_0033#INS-D_1", 1063 | "INS-D_0045#INS-D_1", 1064 | "INS-D_0046#INS-D_1", 1065 | "INS-D_0047#INS-D_1", 1066 | "INS-D_0050#INS-D_1", 1067 | "INS-D_0052#INS-D_1", 1068 | "INS-D_0053#INS-D_1", 1069 | "INS-D_0054#INS-D_1", 1070 | "INS-D_0055#INS-D_1", 1071 | "INS-D_0059#INS-D_1", 1072 | "INS-D_0060#INS-D_1", 1073 | "INS-D_0061#INS-D_1", 1074 | "INS-D_0062#INS-D_1", 1075 | "INS-D_0064#INS-D_1", 1076 | "INS-D_0068#INS-D_1", 1077 | "INS-D_0069#INS-D_1", 1078 | "INS-D_0071#INS-D_1", 1079 | "INS-D_0073#INS-D_1", 1080 | "INS-D_0077#INS-D_1", 1081 | "INS-D_0081#INS-D_1", 1082 | "INS-D_0082#INS-D_1", 1083 | "INS-D_0084#INS-D_1", 1084 | "INS-D_0087#INS-D_1", 1085 | "INS-D_0089#INS-D_1", 1086 | "INS-D_0092#INS-D_1", 1087 | "INS-D_0096#INS-D_1", 1088 | "INS-D_0097#INS-D_1", 1089 | "INS-D_0098#INS-D_1", 1090 | "INS-D_0099#INS-D_1", 1091 | "INS-D_0100#INS-D_1", 1092 | "INS-D_0101#INS-D_1", 1093 | "INS-D_0102#INS-D_1", 1094 | "INS-D_0103#INS-D_1", 1095 | "INS-D_0105#INS-D_1", 1096 | "INS-D_0108#INS-D_1", 1097 | "INS-D_0115#INS-D_1" 1098 | ], 1099 | "INS-D_2": 1100 | [ 1101 | "INS-D_0002#INS-D_2", 1102 | "INS-D_0005#INS-D_2", 1103 | "INS-D_0006#INS-D_2", 1104 | "INS-D_0012#INS-D_2", 1105 | "INS-D_0013#INS-D_2", 1106 | "INS-D_0014#INS-D_2", 1107 | "INS-D_0015#INS-D_2", 1108 | "INS-D_0022#INS-D_2", 1109 | "INS-D_0023#INS-D_2", 1110 | "INS-D_0024#INS-D_2", 1111 | "INS-D_0025#INS-D_2", 1112 | "INS-D_0026#INS-D_2", 1113 | "INS-D_0027#INS-D_2", 1114 | "INS-D_0028#INS-D_2", 1115 | "INS-D_0030#INS-D_2", 1116 | "INS-D_0031#INS-D_2", 1117 | "INS-D_0032#INS-D_2", 1118 | "INS-D_0033#INS-D_2", 1119 | "INS-D_0045#INS-D_2", 1120 | "INS-D_0046#INS-D_2", 1121 | "INS-D_0047#INS-D_2", 1122 | "INS-D_0050#INS-D_2", 1123 | "INS-D_0052#INS-D_2", 1124 | "INS-D_0053#INS-D_2", 1125 | "INS-D_0054#INS-D_2", 1126 | "INS-D_0055#INS-D_2", 1127 | "INS-D_0059#INS-D_2", 1128 | "INS-D_0060#INS-D_2", 1129 | "INS-D_0061#INS-D_2", 1130 | "INS-D_0062#INS-D_2", 1131 | "INS-D_0064#INS-D_2", 1132 | "INS-D_0068#INS-D_2", 1133 | "INS-D_0069#INS-D_2", 1134 | "INS-D_0071#INS-D_2", 1135 | "INS-D_0073#INS-D_2", 1136 | "INS-D_0077#INS-D_2", 1137 | "INS-D_0081#INS-D_2", 1138 | "INS-D_0082#INS-D_2", 1139 | "INS-D_0084#INS-D_2", 1140 | "INS-D_0087#INS-D_2", 1141 | "INS-D_0089#INS-D_2", 1142 | "INS-D_0092#INS-D_2", 1143 | "INS-D_0096#INS-D_2", 1144 | "INS-D_0097#INS-D_2", 1145 | "INS-D_0098#INS-D_2", 1146 | "INS-D_0099#INS-D_2", 1147 | "INS-D_0100#INS-D_2", 1148 | "INS-D_0101#INS-D_2", 1149 | "INS-D_0102#INS-D_2", 1150 | "INS-D_0103#INS-D_2", 1151 | "INS-D_0105#INS-D_2", 1152 | "INS-D_0108#INS-D_2", 1153 | "INS-D_0115#INS-D_2" 1154 | ] 1155 | } 1156 | }, 1157 | "dep_weekly": 1158 | { 1159 | "INS-W_1": 1160 | { 1161 | "INS-W_1": 1162 | [ 1163 | "INS-W_002#INS-W_1", 1164 | "INS-W_015#INS-W_1", 1165 | "INS-W_042#INS-W_1", 1166 | "INS-W_046#INS-W_1", 1167 | "INS-W_048#INS-W_1", 1168 | "INS-W_055#INS-W_1", 1169 | "INS-W_058#INS-W_1", 1170 | "INS-W_076#INS-W_1", 1171 | "INS-W_082#INS-W_1", 1172 | "INS-W_094#INS-W_1", 1173 | "INS-W_096#INS-W_1", 1174 | "INS-W_098#INS-W_1", 1175 | "INS-W_099#INS-W_1", 1176 | "INS-W_108#INS-W_1", 1177 | "INS-W_117#INS-W_1", 1178 | "INS-W_125#INS-W_1", 1179 | "INS-W_136#INS-W_1", 1180 | "INS-W_149#INS-W_1", 1181 | "INS-W_153#INS-W_1", 1182 | "INS-W_156#INS-W_1", 1183 | "INS-W_169#INS-W_1", 1184 | "INS-W_176#INS-W_1", 1185 | "INS-W_191#INS-W_1" 1186 | ], 1187 | "INS-W_2": 1188 | [ 1189 | "INS-W_310#INS-W_2", 1190 | "INS-W_316#INS-W_2", 1191 | "INS-W_326#INS-W_2", 1192 | "INS-W_330#INS-W_2", 1193 | "INS-W_333#INS-W_2", 1194 | "INS-W_341#INS-W_2", 1195 | "INS-W_343#INS-W_2", 1196 | "INS-W_345#INS-W_2", 1197 | "INS-W_367#INS-W_2", 1198 | "INS-W_375#INS-W_2", 1199 | "INS-W_408#INS-W_2", 1200 | "INS-W_447#INS-W_2", 1201 | "INS-W_449#INS-W_2", 1202 | "INS-W_450#INS-W_2", 1203 | "INS-W_452#INS-W_2", 1204 | "INS-W_453#INS-W_2", 1205 | "INS-W_474#INS-W_2", 1206 | "INS-W_476#INS-W_2", 1207 | "INS-W_483#INS-W_2", 1208 | "INS-W_484#INS-W_2", 1209 | "INS-W_492#INS-W_2", 1210 | "INS-W_529#INS-W_2", 1211 | "INS-W_530#INS-W_2" 1212 | ], 1213 | "INS-W_3": 1214 | [ 1215 | "INS-W_603#INS-W_3", 1216 | "INS-W_614#INS-W_3", 1217 | "INS-W_615#INS-W_3", 1218 | "INS-W_617#INS-W_3", 1219 | "INS-W_622#INS-W_3", 1220 | "INS-W_636#INS-W_3", 1221 | "INS-W_637#INS-W_3", 1222 | "INS-W_645#INS-W_3", 1223 | "INS-W_647#INS-W_3", 1224 | "INS-W_659#INS-W_3", 1225 | "INS-W_661#INS-W_3", 1226 | "INS-W_664#INS-W_3", 1227 | "INS-W_666#INS-W_3", 1228 | "INS-W_669#INS-W_3", 1229 | "INS-W_678#INS-W_3", 1230 | "INS-W_680#INS-W_3", 1231 | "INS-W_685#INS-W_3", 1232 | "INS-W_688#INS-W_3", 1233 | "INS-W_722#INS-W_3" 1234 | ], 1235 | "INS-W_4": 1236 | [ 1237 | "INS-W_1003#INS-W_4", 1238 | "INS-W_1004#INS-W_4", 1239 | "INS-W_1010#INS-W_4", 1240 | "INS-W_1013#INS-W_4", 1241 | "INS-W_1018#INS-W_4", 1242 | "INS-W_1039#INS-W_4", 1243 | "INS-W_1206#INS-W_4", 1244 | "INS-W_903#INS-W_4", 1245 | "INS-W_911#INS-W_4", 1246 | "INS-W_915#INS-W_4", 1247 | "INS-W_918#INS-W_4", 1248 | "INS-W_925#INS-W_4", 1249 | "INS-W_933#INS-W_4", 1250 | "INS-W_935#INS-W_4", 1251 | "INS-W_942#INS-W_4", 1252 | "INS-W_953#INS-W_4", 1253 | "INS-W_966#INS-W_4", 1254 | "INS-W_969#INS-W_4", 1255 | "INS-W_971#INS-W_4", 1256 | "INS-W_985#INS-W_4", 1257 | "INS-W_987#INS-W_4", 1258 | "INS-W_995#INS-W_4", 1259 | "INS-W_996#INS-W_4" 1260 | ] 1261 | }, 1262 | "INS-W_2": 1263 | { 1264 | "INS-W_1": 1265 | [ 1266 | "INS-W_002#INS-W_1", 1267 | "INS-W_015#INS-W_1", 1268 | "INS-W_042#INS-W_1", 1269 | "INS-W_046#INS-W_1", 1270 | "INS-W_048#INS-W_1", 1271 | "INS-W_055#INS-W_1", 1272 | "INS-W_058#INS-W_1", 1273 | "INS-W_076#INS-W_1", 1274 | "INS-W_082#INS-W_1", 1275 | "INS-W_094#INS-W_1", 1276 | "INS-W_096#INS-W_1", 1277 | "INS-W_098#INS-W_1", 1278 | "INS-W_099#INS-W_1", 1279 | "INS-W_108#INS-W_1", 1280 | "INS-W_117#INS-W_1", 1281 | "INS-W_125#INS-W_1", 1282 | "INS-W_136#INS-W_1", 1283 | "INS-W_149#INS-W_1", 1284 | "INS-W_153#INS-W_1", 1285 | "INS-W_156#INS-W_1", 1286 | "INS-W_169#INS-W_1", 1287 | "INS-W_176#INS-W_1", 1288 | "INS-W_191#INS-W_1" 1289 | ], 1290 | "INS-W_2": 1291 | [ 1292 | "INS-W_310#INS-W_2", 1293 | "INS-W_316#INS-W_2", 1294 | "INS-W_326#INS-W_2", 1295 | "INS-W_330#INS-W_2", 1296 | "INS-W_333#INS-W_2", 1297 | "INS-W_341#INS-W_2", 1298 | "INS-W_343#INS-W_2", 1299 | "INS-W_345#INS-W_2", 1300 | "INS-W_367#INS-W_2", 1301 | "INS-W_375#INS-W_2", 1302 | "INS-W_408#INS-W_2", 1303 | "INS-W_447#INS-W_2", 1304 | "INS-W_449#INS-W_2", 1305 | "INS-W_450#INS-W_2", 1306 | "INS-W_452#INS-W_2", 1307 | "INS-W_453#INS-W_2", 1308 | "INS-W_474#INS-W_2", 1309 | "INS-W_476#INS-W_2", 1310 | "INS-W_483#INS-W_2", 1311 | "INS-W_484#INS-W_2", 1312 | "INS-W_492#INS-W_2", 1313 | "INS-W_529#INS-W_2", 1314 | "INS-W_530#INS-W_2" 1315 | ], 1316 | "INS-W_3": 1317 | [ 1318 | "INS-W_603#INS-W_3", 1319 | "INS-W_614#INS-W_3", 1320 | "INS-W_615#INS-W_3", 1321 | "INS-W_617#INS-W_3", 1322 | "INS-W_622#INS-W_3", 1323 | "INS-W_636#INS-W_3", 1324 | "INS-W_637#INS-W_3", 1325 | "INS-W_645#INS-W_3", 1326 | "INS-W_647#INS-W_3", 1327 | "INS-W_659#INS-W_3", 1328 | "INS-W_661#INS-W_3", 1329 | "INS-W_664#INS-W_3", 1330 | "INS-W_666#INS-W_3", 1331 | "INS-W_669#INS-W_3", 1332 | "INS-W_678#INS-W_3", 1333 | "INS-W_680#INS-W_3", 1334 | "INS-W_685#INS-W_3", 1335 | "INS-W_688#INS-W_3", 1336 | "INS-W_722#INS-W_3" 1337 | ], 1338 | "INS-W_4": 1339 | [ 1340 | "INS-W_1003#INS-W_4", 1341 | "INS-W_1004#INS-W_4", 1342 | "INS-W_1010#INS-W_4", 1343 | "INS-W_1013#INS-W_4", 1344 | "INS-W_1018#INS-W_4", 1345 | "INS-W_1039#INS-W_4", 1346 | "INS-W_1206#INS-W_4", 1347 | "INS-W_903#INS-W_4", 1348 | "INS-W_911#INS-W_4", 1349 | "INS-W_915#INS-W_4", 1350 | "INS-W_918#INS-W_4", 1351 | "INS-W_925#INS-W_4", 1352 | "INS-W_933#INS-W_4", 1353 | "INS-W_935#INS-W_4", 1354 | "INS-W_942#INS-W_4", 1355 | "INS-W_953#INS-W_4", 1356 | "INS-W_966#INS-W_4", 1357 | "INS-W_969#INS-W_4", 1358 | "INS-W_971#INS-W_4", 1359 | "INS-W_985#INS-W_4", 1360 | "INS-W_987#INS-W_4", 1361 | "INS-W_995#INS-W_4", 1362 | "INS-W_996#INS-W_4" 1363 | ] 1364 | }, 1365 | "INS-W_3": 1366 | { 1367 | "INS-W_1": 1368 | [ 1369 | "INS-W_002#INS-W_1", 1370 | "INS-W_015#INS-W_1", 1371 | "INS-W_020#INS-W_1", 1372 | "INS-W_023#INS-W_1", 1373 | "INS-W_042#INS-W_1", 1374 | "INS-W_046#INS-W_1", 1375 | "INS-W_048#INS-W_1", 1376 | "INS-W_055#INS-W_1", 1377 | "INS-W_058#INS-W_1", 1378 | "INS-W_076#INS-W_1", 1379 | "INS-W_082#INS-W_1", 1380 | "INS-W_084#INS-W_1", 1381 | "INS-W_095#INS-W_1", 1382 | "INS-W_096#INS-W_1", 1383 | "INS-W_098#INS-W_1", 1384 | "INS-W_108#INS-W_1", 1385 | "INS-W_109#INS-W_1", 1386 | "INS-W_110#INS-W_1", 1387 | "INS-W_117#INS-W_1", 1388 | "INS-W_125#INS-W_1", 1389 | "INS-W_149#INS-W_1", 1390 | "INS-W_153#INS-W_1", 1391 | "INS-W_156#INS-W_1", 1392 | "INS-W_158#INS-W_1", 1393 | "INS-W_169#INS-W_1", 1394 | "INS-W_176#INS-W_1", 1395 | "INS-W_193#INS-W_1", 1396 | "INS-W_196#INS-W_1", 1397 | "INS-W_198#INS-W_1", 1398 | "INS-W_209#INS-W_1" 1399 | ], 1400 | "INS-W_2": 1401 | [ 1402 | "INS-W_307#INS-W_2", 1403 | "INS-W_310#INS-W_2", 1404 | "INS-W_316#INS-W_2", 1405 | "INS-W_317#INS-W_2", 1406 | "INS-W_326#INS-W_2", 1407 | "INS-W_327#INS-W_2", 1408 | "INS-W_328#INS-W_2", 1409 | "INS-W_330#INS-W_2", 1410 | "INS-W_331#INS-W_2", 1411 | "INS-W_341#INS-W_2", 1412 | "INS-W_342#INS-W_2", 1413 | "INS-W_343#INS-W_2", 1414 | "INS-W_345#INS-W_2", 1415 | "INS-W_347#INS-W_2", 1416 | "INS-W_348#INS-W_2", 1417 | "INS-W_351#INS-W_2", 1418 | "INS-W_354#INS-W_2", 1419 | "INS-W_355#INS-W_2", 1420 | "INS-W_356#INS-W_2", 1421 | "INS-W_357#INS-W_2", 1422 | "INS-W_359#INS-W_2", 1423 | "INS-W_365#INS-W_2", 1424 | "INS-W_369#INS-W_2", 1425 | "INS-W_370#INS-W_2", 1426 | "INS-W_373#INS-W_2", 1427 | "INS-W_374#INS-W_2", 1428 | "INS-W_375#INS-W_2", 1429 | "INS-W_376#INS-W_2", 1430 | "INS-W_380#INS-W_2", 1431 | "INS-W_383#INS-W_2", 1432 | "INS-W_388#INS-W_2", 1433 | "INS-W_391#INS-W_2", 1434 | "INS-W_395#INS-W_2", 1435 | "INS-W_398#INS-W_2", 1436 | "INS-W_403#INS-W_2", 1437 | "INS-W_405#INS-W_2", 1438 | "INS-W_406#INS-W_2", 1439 | "INS-W_416#INS-W_2", 1440 | "INS-W_421#INS-W_2", 1441 | "INS-W_430#INS-W_2", 1442 | "INS-W_436#INS-W_2", 1443 | "INS-W_439#INS-W_2", 1444 | "INS-W_440#INS-W_2", 1445 | "INS-W_445#INS-W_2", 1446 | "INS-W_446#INS-W_2", 1447 | "INS-W_447#INS-W_2", 1448 | "INS-W_449#INS-W_2", 1449 | "INS-W_452#INS-W_2", 1450 | "INS-W_453#INS-W_2", 1451 | "INS-W_454#INS-W_2", 1452 | "INS-W_466#INS-W_2", 1453 | "INS-W_467#INS-W_2", 1454 | "INS-W_474#INS-W_2", 1455 | "INS-W_475#INS-W_2", 1456 | "INS-W_476#INS-W_2", 1457 | "INS-W_478#INS-W_2", 1458 | "INS-W_480#INS-W_2", 1459 | "INS-W_481#INS-W_2", 1460 | "INS-W_483#INS-W_2", 1461 | "INS-W_484#INS-W_2", 1462 | "INS-W_486#INS-W_2", 1463 | "INS-W_492#INS-W_2", 1464 | "INS-W_501#INS-W_2", 1465 | "INS-W_506#INS-W_2", 1466 | "INS-W_507#INS-W_2", 1467 | "INS-W_511#INS-W_2", 1468 | "INS-W_513#INS-W_2", 1469 | "INS-W_524#INS-W_2", 1470 | "INS-W_529#INS-W_2", 1471 | "INS-W_530#INS-W_2", 1472 | "INS-W_540#INS-W_2", 1473 | "INS-W_559#INS-W_2", 1474 | "INS-W_564#INS-W_2", 1475 | "INS-W_566#INS-W_2" 1476 | ], 1477 | "INS-W_3": 1478 | [ 1479 | "INS-W_601#INS-W_3", 1480 | "INS-W_602#INS-W_3", 1481 | "INS-W_603#INS-W_3", 1482 | "INS-W_604#INS-W_3", 1483 | "INS-W_605#INS-W_3", 1484 | "INS-W_606#INS-W_3", 1485 | "INS-W_607#INS-W_3", 1486 | "INS-W_608#INS-W_3", 1487 | "INS-W_609#INS-W_3", 1488 | "INS-W_610#INS-W_3", 1489 | "INS-W_611#INS-W_3", 1490 | "INS-W_612#INS-W_3", 1491 | "INS-W_613#INS-W_3", 1492 | "INS-W_614#INS-W_3", 1493 | "INS-W_615#INS-W_3", 1494 | "INS-W_616#INS-W_3", 1495 | "INS-W_617#INS-W_3", 1496 | "INS-W_618#INS-W_3", 1497 | "INS-W_619#INS-W_3", 1498 | "INS-W_620#INS-W_3", 1499 | "INS-W_621#INS-W_3", 1500 | "INS-W_622#INS-W_3", 1501 | "INS-W_623#INS-W_3", 1502 | "INS-W_624#INS-W_3", 1503 | "INS-W_625#INS-W_3", 1504 | "INS-W_626#INS-W_3", 1505 | "INS-W_628#INS-W_3", 1506 | "INS-W_632#INS-W_3", 1507 | "INS-W_633#INS-W_3", 1508 | "INS-W_636#INS-W_3", 1509 | "INS-W_637#INS-W_3", 1510 | "INS-W_641#INS-W_3", 1511 | "INS-W_643#INS-W_3", 1512 | "INS-W_644#INS-W_3", 1513 | "INS-W_645#INS-W_3", 1514 | "INS-W_646#INS-W_3", 1515 | "INS-W_647#INS-W_3", 1516 | "INS-W_649#INS-W_3", 1517 | "INS-W_650#INS-W_3", 1518 | "INS-W_651#INS-W_3", 1519 | "INS-W_652#INS-W_3", 1520 | "INS-W_653#INS-W_3", 1521 | "INS-W_654#INS-W_3", 1522 | "INS-W_655#INS-W_3", 1523 | "INS-W_657#INS-W_3", 1524 | "INS-W_658#INS-W_3", 1525 | "INS-W_659#INS-W_3", 1526 | "INS-W_660#INS-W_3", 1527 | "INS-W_661#INS-W_3", 1528 | "INS-W_663#INS-W_3", 1529 | "INS-W_664#INS-W_3", 1530 | "INS-W_665#INS-W_3", 1531 | "INS-W_666#INS-W_3", 1532 | "INS-W_669#INS-W_3", 1533 | "INS-W_670#INS-W_3", 1534 | "INS-W_672#INS-W_3", 1535 | "INS-W_673#INS-W_3", 1536 | "INS-W_674#INS-W_3", 1537 | "INS-W_676#INS-W_3", 1538 | "INS-W_677#INS-W_3", 1539 | "INS-W_678#INS-W_3", 1540 | "INS-W_680#INS-W_3", 1541 | "INS-W_682#INS-W_3", 1542 | "INS-W_683#INS-W_3", 1543 | "INS-W_684#INS-W_3", 1544 | "INS-W_685#INS-W_3", 1545 | "INS-W_686#INS-W_3", 1546 | "INS-W_687#INS-W_3", 1547 | "INS-W_688#INS-W_3", 1548 | "INS-W_691#INS-W_3", 1549 | "INS-W_693#INS-W_3", 1550 | "INS-W_694#INS-W_3", 1551 | "INS-W_700#INS-W_3", 1552 | "INS-W_706#INS-W_3", 1553 | "INS-W_707#INS-W_3", 1554 | "INS-W_708#INS-W_3", 1555 | "INS-W_709#INS-W_3", 1556 | "INS-W_710#INS-W_3", 1557 | "INS-W_711#INS-W_3", 1558 | "INS-W_714#INS-W_3", 1559 | "INS-W_718#INS-W_3", 1560 | "INS-W_719#INS-W_3", 1561 | "INS-W_720#INS-W_3", 1562 | "INS-W_721#INS-W_3", 1563 | "INS-W_722#INS-W_3", 1564 | "INS-W_723#INS-W_3", 1565 | "INS-W_724#INS-W_3", 1566 | "INS-W_725#INS-W_3", 1567 | "INS-W_726#INS-W_3", 1568 | "INS-W_727#INS-W_3", 1569 | "INS-W_728#INS-W_3", 1570 | "INS-W_729#INS-W_3", 1571 | "INS-W_732#INS-W_3", 1572 | "INS-W_734#INS-W_3", 1573 | "INS-W_737#INS-W_3", 1574 | "INS-W_738#INS-W_3", 1575 | "INS-W_740#INS-W_3", 1576 | "INS-W_742#INS-W_3", 1577 | "INS-W_743#INS-W_3", 1578 | "INS-W_747#INS-W_3", 1579 | "INS-W_748#INS-W_3", 1580 | "INS-W_750#INS-W_3", 1581 | "INS-W_751#INS-W_3", 1582 | "INS-W_752#INS-W_3", 1583 | "INS-W_753#INS-W_3", 1584 | "INS-W_757#INS-W_3" 1585 | ], 1586 | "INS-W_4": 1587 | [ 1588 | "INS-W_1002#INS-W_4", 1589 | "INS-W_1004#INS-W_4", 1590 | "INS-W_1005#INS-W_4", 1591 | "INS-W_1006#INS-W_4", 1592 | "INS-W_1008#INS-W_4", 1593 | "INS-W_1009#INS-W_4", 1594 | "INS-W_1010#INS-W_4", 1595 | "INS-W_1011#INS-W_4", 1596 | "INS-W_1012#INS-W_4", 1597 | "INS-W_1013#INS-W_4", 1598 | "INS-W_1014#INS-W_4", 1599 | "INS-W_1015#INS-W_4", 1600 | "INS-W_1016#INS-W_4", 1601 | "INS-W_1017#INS-W_4", 1602 | "INS-W_1018#INS-W_4", 1603 | "INS-W_1019#INS-W_4", 1604 | "INS-W_1024#INS-W_4", 1605 | "INS-W_1028#INS-W_4", 1606 | "INS-W_1037#INS-W_4", 1607 | "INS-W_1038#INS-W_4", 1608 | "INS-W_1040#INS-W_4", 1609 | "INS-W_1044#INS-W_4", 1610 | "INS-W_1080#INS-W_4", 1611 | "INS-W_1200#INS-W_4", 1612 | "INS-W_1201#INS-W_4", 1613 | "INS-W_1204#INS-W_4", 1614 | "INS-W_1206#INS-W_4", 1615 | "INS-W_1210#INS-W_4", 1616 | "INS-W_1212#INS-W_4", 1617 | "INS-W_1213#INS-W_4", 1618 | "INS-W_1216#INS-W_4", 1619 | "INS-W_1220#INS-W_4", 1620 | "INS-W_1221#INS-W_4", 1621 | "INS-W_901#INS-W_4", 1622 | "INS-W_903#INS-W_4", 1623 | "INS-W_904#INS-W_4", 1624 | "INS-W_905#INS-W_4", 1625 | "INS-W_910#INS-W_4", 1626 | "INS-W_911#INS-W_4", 1627 | "INS-W_912#INS-W_4", 1628 | "INS-W_913#INS-W_4", 1629 | "INS-W_914#INS-W_4", 1630 | "INS-W_916#INS-W_4", 1631 | "INS-W_918#INS-W_4", 1632 | "INS-W_922#INS-W_4", 1633 | "INS-W_923#INS-W_4", 1634 | "INS-W_925#INS-W_4", 1635 | "INS-W_928#INS-W_4", 1636 | "INS-W_929#INS-W_4", 1637 | "INS-W_931#INS-W_4", 1638 | "INS-W_932#INS-W_4", 1639 | "INS-W_933#INS-W_4", 1640 | "INS-W_934#INS-W_4", 1641 | "INS-W_935#INS-W_4", 1642 | "INS-W_936#INS-W_4", 1643 | "INS-W_937#INS-W_4", 1644 | "INS-W_938#INS-W_4", 1645 | "INS-W_939#INS-W_4", 1646 | "INS-W_940#INS-W_4", 1647 | "INS-W_941#INS-W_4", 1648 | "INS-W_942#INS-W_4", 1649 | "INS-W_943#INS-W_4", 1650 | "INS-W_944#INS-W_4", 1651 | "INS-W_945#INS-W_4", 1652 | "INS-W_946#INS-W_4", 1653 | "INS-W_947#INS-W_4", 1654 | "INS-W_948#INS-W_4", 1655 | "INS-W_950#INS-W_4", 1656 | "INS-W_951#INS-W_4", 1657 | "INS-W_953#INS-W_4", 1658 | "INS-W_955#INS-W_4", 1659 | "INS-W_958#INS-W_4", 1660 | "INS-W_961#INS-W_4", 1661 | "INS-W_962#INS-W_4", 1662 | "INS-W_963#INS-W_4", 1663 | "INS-W_964#INS-W_4", 1664 | "INS-W_965#INS-W_4", 1665 | "INS-W_966#INS-W_4", 1666 | "INS-W_968#INS-W_4", 1667 | "INS-W_969#INS-W_4", 1668 | "INS-W_970#INS-W_4", 1669 | "INS-W_971#INS-W_4", 1670 | "INS-W_972#INS-W_4", 1671 | "INS-W_973#INS-W_4", 1672 | "INS-W_975#INS-W_4", 1673 | "INS-W_976#INS-W_4", 1674 | "INS-W_977#INS-W_4", 1675 | "INS-W_978#INS-W_4", 1676 | "INS-W_979#INS-W_4", 1677 | "INS-W_980#INS-W_4", 1678 | "INS-W_981#INS-W_4", 1679 | "INS-W_982#INS-W_4", 1680 | "INS-W_983#INS-W_4", 1681 | "INS-W_984#INS-W_4", 1682 | "INS-W_985#INS-W_4", 1683 | "INS-W_986#INS-W_4", 1684 | "INS-W_987#INS-W_4", 1685 | "INS-W_988#INS-W_4", 1686 | "INS-W_989#INS-W_4", 1687 | "INS-W_991#INS-W_4", 1688 | "INS-W_993#INS-W_4", 1689 | "INS-W_994#INS-W_4", 1690 | "INS-W_995#INS-W_4", 1691 | "INS-W_997#INS-W_4", 1692 | "INS-W_998#INS-W_4", 1693 | "INS-W_999#INS-W_4" 1694 | ] 1695 | }, 1696 | "INS-W_4": 1697 | { 1698 | "INS-W_1": 1699 | [ 1700 | "INS-W_002#INS-W_1", 1701 | "INS-W_009#INS-W_1", 1702 | "INS-W_015#INS-W_1", 1703 | "INS-W_020#INS-W_1", 1704 | "INS-W_023#INS-W_1", 1705 | "INS-W_042#INS-W_1", 1706 | "INS-W_046#INS-W_1", 1707 | "INS-W_048#INS-W_1", 1708 | "INS-W_055#INS-W_1", 1709 | "INS-W_058#INS-W_1", 1710 | "INS-W_063#INS-W_1", 1711 | "INS-W_068#INS-W_1", 1712 | "INS-W_076#INS-W_1", 1713 | "INS-W_082#INS-W_1", 1714 | "INS-W_084#INS-W_1", 1715 | "INS-W_087#INS-W_1", 1716 | "INS-W_094#INS-W_1", 1717 | "INS-W_095#INS-W_1", 1718 | "INS-W_096#INS-W_1", 1719 | "INS-W_098#INS-W_1", 1720 | "INS-W_099#INS-W_1", 1721 | "INS-W_108#INS-W_1", 1722 | "INS-W_109#INS-W_1", 1723 | "INS-W_110#INS-W_1", 1724 | "INS-W_117#INS-W_1", 1725 | "INS-W_125#INS-W_1", 1726 | "INS-W_136#INS-W_1", 1727 | "INS-W_149#INS-W_1", 1728 | "INS-W_153#INS-W_1", 1729 | "INS-W_156#INS-W_1", 1730 | "INS-W_158#INS-W_1", 1731 | "INS-W_169#INS-W_1", 1732 | "INS-W_176#INS-W_1", 1733 | "INS-W_191#INS-W_1", 1734 | "INS-W_193#INS-W_1", 1735 | "INS-W_196#INS-W_1", 1736 | "INS-W_198#INS-W_1", 1737 | "INS-W_209#INS-W_1" 1738 | ], 1739 | "INS-W_2": 1740 | [ 1741 | "INS-W_304#INS-W_2", 1742 | "INS-W_305#INS-W_2", 1743 | "INS-W_306#INS-W_2", 1744 | "INS-W_307#INS-W_2", 1745 | "INS-W_310#INS-W_2", 1746 | "INS-W_311#INS-W_2", 1747 | "INS-W_316#INS-W_2", 1748 | "INS-W_317#INS-W_2", 1749 | "INS-W_326#INS-W_2", 1750 | "INS-W_327#INS-W_2", 1751 | "INS-W_328#INS-W_2", 1752 | "INS-W_330#INS-W_2", 1753 | "INS-W_331#INS-W_2", 1754 | "INS-W_333#INS-W_2", 1755 | "INS-W_340#INS-W_2", 1756 | "INS-W_341#INS-W_2", 1757 | "INS-W_342#INS-W_2", 1758 | "INS-W_343#INS-W_2", 1759 | "INS-W_345#INS-W_2", 1760 | "INS-W_347#INS-W_2", 1761 | "INS-W_348#INS-W_2", 1762 | "INS-W_351#INS-W_2", 1763 | "INS-W_353#INS-W_2", 1764 | "INS-W_354#INS-W_2", 1765 | "INS-W_355#INS-W_2", 1766 | "INS-W_356#INS-W_2", 1767 | "INS-W_357#INS-W_2", 1768 | "INS-W_359#INS-W_2", 1769 | "INS-W_365#INS-W_2", 1770 | "INS-W_367#INS-W_2", 1771 | "INS-W_369#INS-W_2", 1772 | "INS-W_370#INS-W_2", 1773 | "INS-W_373#INS-W_2", 1774 | "INS-W_374#INS-W_2", 1775 | "INS-W_375#INS-W_2", 1776 | "INS-W_376#INS-W_2", 1777 | "INS-W_380#INS-W_2", 1778 | "INS-W_383#INS-W_2", 1779 | "INS-W_388#INS-W_2", 1780 | "INS-W_391#INS-W_2", 1781 | "INS-W_395#INS-W_2", 1782 | "INS-W_398#INS-W_2", 1783 | "INS-W_399#INS-W_2", 1784 | "INS-W_403#INS-W_2", 1785 | "INS-W_404#INS-W_2", 1786 | "INS-W_405#INS-W_2", 1787 | "INS-W_406#INS-W_2", 1788 | "INS-W_408#INS-W_2", 1789 | "INS-W_412#INS-W_2", 1790 | "INS-W_413#INS-W_2", 1791 | "INS-W_416#INS-W_2", 1792 | "INS-W_418#INS-W_2", 1793 | "INS-W_421#INS-W_2", 1794 | "INS-W_430#INS-W_2", 1795 | "INS-W_436#INS-W_2", 1796 | "INS-W_439#INS-W_2", 1797 | "INS-W_440#INS-W_2", 1798 | "INS-W_445#INS-W_2", 1799 | "INS-W_446#INS-W_2", 1800 | "INS-W_447#INS-W_2", 1801 | "INS-W_449#INS-W_2", 1802 | "INS-W_450#INS-W_2", 1803 | "INS-W_452#INS-W_2", 1804 | "INS-W_453#INS-W_2", 1805 | "INS-W_454#INS-W_2", 1806 | "INS-W_466#INS-W_2", 1807 | "INS-W_467#INS-W_2", 1808 | "INS-W_473#INS-W_2", 1809 | "INS-W_474#INS-W_2", 1810 | "INS-W_475#INS-W_2", 1811 | "INS-W_476#INS-W_2", 1812 | "INS-W_478#INS-W_2", 1813 | "INS-W_480#INS-W_2", 1814 | "INS-W_481#INS-W_2", 1815 | "INS-W_483#INS-W_2", 1816 | "INS-W_484#INS-W_2", 1817 | "INS-W_486#INS-W_2", 1818 | "INS-W_488#INS-W_2", 1819 | "INS-W_492#INS-W_2", 1820 | "INS-W_494#INS-W_2", 1821 | "INS-W_501#INS-W_2", 1822 | "INS-W_502#INS-W_2", 1823 | "INS-W_506#INS-W_2", 1824 | "INS-W_507#INS-W_2", 1825 | "INS-W_511#INS-W_2", 1826 | "INS-W_513#INS-W_2", 1827 | "INS-W_524#INS-W_2", 1828 | "INS-W_529#INS-W_2", 1829 | "INS-W_530#INS-W_2", 1830 | "INS-W_540#INS-W_2", 1831 | "INS-W_542#INS-W_2", 1832 | "INS-W_543#INS-W_2", 1833 | "INS-W_550#INS-W_2", 1834 | "INS-W_559#INS-W_2", 1835 | "INS-W_560#INS-W_2", 1836 | "INS-W_561#INS-W_2", 1837 | "INS-W_563#INS-W_2", 1838 | "INS-W_564#INS-W_2", 1839 | "INS-W_566#INS-W_2" 1840 | ], 1841 | "INS-W_3": 1842 | [ 1843 | "INS-W_601#INS-W_3", 1844 | "INS-W_602#INS-W_3", 1845 | "INS-W_603#INS-W_3", 1846 | "INS-W_604#INS-W_3", 1847 | "INS-W_605#INS-W_3", 1848 | "INS-W_606#INS-W_3", 1849 | "INS-W_607#INS-W_3", 1850 | "INS-W_608#INS-W_3", 1851 | "INS-W_609#INS-W_3", 1852 | "INS-W_610#INS-W_3", 1853 | "INS-W_611#INS-W_3", 1854 | "INS-W_612#INS-W_3", 1855 | "INS-W_613#INS-W_3", 1856 | "INS-W_614#INS-W_3", 1857 | "INS-W_615#INS-W_3", 1858 | "INS-W_616#INS-W_3", 1859 | "INS-W_617#INS-W_3", 1860 | "INS-W_618#INS-W_3", 1861 | "INS-W_619#INS-W_3", 1862 | "INS-W_620#INS-W_3", 1863 | "INS-W_621#INS-W_3", 1864 | "INS-W_622#INS-W_3", 1865 | "INS-W_623#INS-W_3", 1866 | "INS-W_624#INS-W_3", 1867 | "INS-W_625#INS-W_3", 1868 | "INS-W_626#INS-W_3", 1869 | "INS-W_628#INS-W_3", 1870 | "INS-W_632#INS-W_3", 1871 | "INS-W_633#INS-W_3", 1872 | "INS-W_636#INS-W_3", 1873 | "INS-W_637#INS-W_3", 1874 | "INS-W_641#INS-W_3", 1875 | "INS-W_643#INS-W_3", 1876 | "INS-W_644#INS-W_3", 1877 | "INS-W_645#INS-W_3", 1878 | "INS-W_646#INS-W_3", 1879 | "INS-W_647#INS-W_3", 1880 | "INS-W_649#INS-W_3", 1881 | "INS-W_650#INS-W_3", 1882 | "INS-W_651#INS-W_3", 1883 | "INS-W_652#INS-W_3", 1884 | "INS-W_653#INS-W_3", 1885 | "INS-W_654#INS-W_3", 1886 | "INS-W_655#INS-W_3", 1887 | "INS-W_657#INS-W_3", 1888 | "INS-W_658#INS-W_3", 1889 | "INS-W_659#INS-W_3", 1890 | "INS-W_660#INS-W_3", 1891 | "INS-W_661#INS-W_3", 1892 | "INS-W_663#INS-W_3", 1893 | "INS-W_664#INS-W_3", 1894 | "INS-W_665#INS-W_3", 1895 | "INS-W_666#INS-W_3", 1896 | "INS-W_669#INS-W_3", 1897 | "INS-W_670#INS-W_3", 1898 | "INS-W_672#INS-W_3", 1899 | "INS-W_673#INS-W_3", 1900 | "INS-W_674#INS-W_3", 1901 | "INS-W_676#INS-W_3", 1902 | "INS-W_677#INS-W_3", 1903 | "INS-W_678#INS-W_3", 1904 | "INS-W_680#INS-W_3", 1905 | "INS-W_682#INS-W_3", 1906 | "INS-W_683#INS-W_3", 1907 | "INS-W_684#INS-W_3", 1908 | "INS-W_685#INS-W_3", 1909 | "INS-W_686#INS-W_3", 1910 | "INS-W_687#INS-W_3", 1911 | "INS-W_688#INS-W_3", 1912 | "INS-W_691#INS-W_3", 1913 | "INS-W_693#INS-W_3", 1914 | "INS-W_694#INS-W_3", 1915 | "INS-W_700#INS-W_3", 1916 | "INS-W_706#INS-W_3", 1917 | "INS-W_707#INS-W_3", 1918 | "INS-W_708#INS-W_3", 1919 | "INS-W_709#INS-W_3", 1920 | "INS-W_710#INS-W_3", 1921 | "INS-W_711#INS-W_3", 1922 | "INS-W_714#INS-W_3", 1923 | "INS-W_718#INS-W_3", 1924 | "INS-W_719#INS-W_3", 1925 | "INS-W_720#INS-W_3", 1926 | "INS-W_721#INS-W_3", 1927 | "INS-W_722#INS-W_3", 1928 | "INS-W_723#INS-W_3", 1929 | "INS-W_724#INS-W_3", 1930 | "INS-W_725#INS-W_3", 1931 | "INS-W_726#INS-W_3", 1932 | "INS-W_727#INS-W_3", 1933 | "INS-W_728#INS-W_3", 1934 | "INS-W_729#INS-W_3", 1935 | "INS-W_732#INS-W_3", 1936 | "INS-W_734#INS-W_3", 1937 | "INS-W_737#INS-W_3", 1938 | "INS-W_738#INS-W_3", 1939 | "INS-W_740#INS-W_3", 1940 | "INS-W_742#INS-W_3", 1941 | "INS-W_743#INS-W_3", 1942 | "INS-W_747#INS-W_3", 1943 | "INS-W_748#INS-W_3", 1944 | "INS-W_750#INS-W_3", 1945 | "INS-W_751#INS-W_3", 1946 | "INS-W_752#INS-W_3", 1947 | "INS-W_753#INS-W_3", 1948 | "INS-W_757#INS-W_3" 1949 | ], 1950 | "INS-W_4": 1951 | [ 1952 | "INS-W_1001#INS-W_4", 1953 | "INS-W_1002#INS-W_4", 1954 | "INS-W_1003#INS-W_4", 1955 | "INS-W_1004#INS-W_4", 1956 | "INS-W_1005#INS-W_4", 1957 | "INS-W_1006#INS-W_4", 1958 | "INS-W_1007#INS-W_4", 1959 | "INS-W_1008#INS-W_4", 1960 | "INS-W_1009#INS-W_4", 1961 | "INS-W_1010#INS-W_4", 1962 | "INS-W_1011#INS-W_4", 1963 | "INS-W_1012#INS-W_4", 1964 | "INS-W_1013#INS-W_4", 1965 | "INS-W_1014#INS-W_4", 1966 | "INS-W_1015#INS-W_4", 1967 | "INS-W_1016#INS-W_4", 1968 | "INS-W_1017#INS-W_4", 1969 | "INS-W_1018#INS-W_4", 1970 | "INS-W_1019#INS-W_4", 1971 | "INS-W_1024#INS-W_4", 1972 | "INS-W_1026#INS-W_4", 1973 | "INS-W_1028#INS-W_4", 1974 | "INS-W_1036#INS-W_4", 1975 | "INS-W_1037#INS-W_4", 1976 | "INS-W_1038#INS-W_4", 1977 | "INS-W_1039#INS-W_4", 1978 | "INS-W_1040#INS-W_4", 1979 | "INS-W_1044#INS-W_4", 1980 | "INS-W_1061#INS-W_4", 1981 | "INS-W_1080#INS-W_4", 1982 | "INS-W_1200#INS-W_4", 1983 | "INS-W_1201#INS-W_4", 1984 | "INS-W_1203#INS-W_4", 1985 | "INS-W_1204#INS-W_4", 1986 | "INS-W_1205#INS-W_4", 1987 | "INS-W_1206#INS-W_4", 1988 | "INS-W_1210#INS-W_4", 1989 | "INS-W_1211#INS-W_4", 1990 | "INS-W_1212#INS-W_4", 1991 | "INS-W_1213#INS-W_4", 1992 | "INS-W_1216#INS-W_4", 1993 | "INS-W_1220#INS-W_4", 1994 | "INS-W_1221#INS-W_4", 1995 | "INS-W_900#INS-W_4", 1996 | "INS-W_901#INS-W_4", 1997 | "INS-W_902#INS-W_4", 1998 | "INS-W_903#INS-W_4", 1999 | "INS-W_904#INS-W_4", 2000 | "INS-W_905#INS-W_4", 2001 | "INS-W_906#INS-W_4", 2002 | "INS-W_907#INS-W_4", 2003 | "INS-W_908#INS-W_4", 2004 | "INS-W_909#INS-W_4", 2005 | "INS-W_910#INS-W_4", 2006 | "INS-W_911#INS-W_4", 2007 | "INS-W_912#INS-W_4", 2008 | "INS-W_913#INS-W_4", 2009 | "INS-W_914#INS-W_4", 2010 | "INS-W_915#INS-W_4", 2011 | "INS-W_916#INS-W_4", 2012 | "INS-W_917#INS-W_4", 2013 | "INS-W_918#INS-W_4", 2014 | "INS-W_922#INS-W_4", 2015 | "INS-W_923#INS-W_4", 2016 | "INS-W_924#INS-W_4", 2017 | "INS-W_925#INS-W_4", 2018 | "INS-W_926#INS-W_4", 2019 | "INS-W_927#INS-W_4", 2020 | "INS-W_928#INS-W_4", 2021 | "INS-W_929#INS-W_4", 2022 | "INS-W_931#INS-W_4", 2023 | "INS-W_932#INS-W_4", 2024 | "INS-W_933#INS-W_4", 2025 | "INS-W_934#INS-W_4", 2026 | "INS-W_935#INS-W_4", 2027 | "INS-W_936#INS-W_4", 2028 | "INS-W_937#INS-W_4", 2029 | "INS-W_938#INS-W_4", 2030 | "INS-W_939#INS-W_4", 2031 | "INS-W_940#INS-W_4", 2032 | "INS-W_941#INS-W_4", 2033 | "INS-W_942#INS-W_4", 2034 | "INS-W_943#INS-W_4", 2035 | "INS-W_944#INS-W_4", 2036 | "INS-W_945#INS-W_4", 2037 | "INS-W_946#INS-W_4", 2038 | "INS-W_947#INS-W_4", 2039 | "INS-W_948#INS-W_4", 2040 | "INS-W_950#INS-W_4", 2041 | "INS-W_951#INS-W_4", 2042 | "INS-W_953#INS-W_4", 2043 | "INS-W_954#INS-W_4", 2044 | "INS-W_955#INS-W_4", 2045 | "INS-W_956#INS-W_4", 2046 | "INS-W_957#INS-W_4", 2047 | "INS-W_958#INS-W_4", 2048 | "INS-W_959#INS-W_4", 2049 | "INS-W_960#INS-W_4", 2050 | "INS-W_961#INS-W_4", 2051 | "INS-W_962#INS-W_4", 2052 | "INS-W_963#INS-W_4", 2053 | "INS-W_964#INS-W_4", 2054 | "INS-W_965#INS-W_4", 2055 | "INS-W_966#INS-W_4", 2056 | "INS-W_968#INS-W_4", 2057 | "INS-W_969#INS-W_4", 2058 | "INS-W_970#INS-W_4", 2059 | "INS-W_971#INS-W_4", 2060 | "INS-W_972#INS-W_4", 2061 | "INS-W_973#INS-W_4", 2062 | "INS-W_974#INS-W_4", 2063 | "INS-W_975#INS-W_4", 2064 | "INS-W_976#INS-W_4", 2065 | "INS-W_977#INS-W_4", 2066 | "INS-W_978#INS-W_4", 2067 | "INS-W_979#INS-W_4", 2068 | "INS-W_980#INS-W_4", 2069 | "INS-W_981#INS-W_4", 2070 | "INS-W_982#INS-W_4", 2071 | "INS-W_983#INS-W_4", 2072 | "INS-W_984#INS-W_4", 2073 | "INS-W_985#INS-W_4", 2074 | "INS-W_986#INS-W_4", 2075 | "INS-W_987#INS-W_4", 2076 | "INS-W_988#INS-W_4", 2077 | "INS-W_989#INS-W_4", 2078 | "INS-W_991#INS-W_4", 2079 | "INS-W_992#INS-W_4", 2080 | "INS-W_993#INS-W_4", 2081 | "INS-W_994#INS-W_4", 2082 | "INS-W_995#INS-W_4", 2083 | "INS-W_996#INS-W_4", 2084 | "INS-W_997#INS-W_4", 2085 | "INS-W_998#INS-W_4", 2086 | "INS-W_999#INS-W_4" 2087 | ] 2088 | }, 2089 | "INS-D_1": 2090 | { 2091 | "INS-D_1": 2092 | [ 2093 | "INS-D_0002#INS-D_1", 2094 | "INS-D_0005#INS-D_1", 2095 | "INS-D_0006#INS-D_1", 2096 | "INS-D_0007#INS-D_1", 2097 | "INS-D_0012#INS-D_1", 2098 | "INS-D_0013#INS-D_1", 2099 | "INS-D_0014#INS-D_1", 2100 | "INS-D_0015#INS-D_1", 2101 | "INS-D_0022#INS-D_1", 2102 | "INS-D_0023#INS-D_1", 2103 | "INS-D_0024#INS-D_1", 2104 | "INS-D_0025#INS-D_1", 2105 | "INS-D_0026#INS-D_1", 2106 | "INS-D_0027#INS-D_1", 2107 | "INS-D_0028#INS-D_1", 2108 | "INS-D_0030#INS-D_1", 2109 | "INS-D_0031#INS-D_1", 2110 | "INS-D_0032#INS-D_1", 2111 | "INS-D_0033#INS-D_1", 2112 | "INS-D_0036#INS-D_1", 2113 | "INS-D_0039#INS-D_1", 2114 | "INS-D_0045#INS-D_1", 2115 | "INS-D_0046#INS-D_1", 2116 | "INS-D_0047#INS-D_1", 2117 | "INS-D_0050#INS-D_1", 2118 | "INS-D_0052#INS-D_1", 2119 | "INS-D_0053#INS-D_1", 2120 | "INS-D_0054#INS-D_1", 2121 | "INS-D_0055#INS-D_1", 2122 | "INS-D_0059#INS-D_1", 2123 | "INS-D_0060#INS-D_1", 2124 | "INS-D_0061#INS-D_1", 2125 | "INS-D_0062#INS-D_1", 2126 | "INS-D_0064#INS-D_1", 2127 | "INS-D_0068#INS-D_1", 2128 | "INS-D_0069#INS-D_1", 2129 | "INS-D_0071#INS-D_1", 2130 | "INS-D_0073#INS-D_1", 2131 | "INS-D_0077#INS-D_1", 2132 | "INS-D_0081#INS-D_1", 2133 | "INS-D_0082#INS-D_1", 2134 | "INS-D_0084#INS-D_1", 2135 | "INS-D_0087#INS-D_1", 2136 | "INS-D_0089#INS-D_1", 2137 | "INS-D_0091#INS-D_1", 2138 | "INS-D_0092#INS-D_1", 2139 | "INS-D_0096#INS-D_1", 2140 | "INS-D_0097#INS-D_1", 2141 | "INS-D_0098#INS-D_1", 2142 | "INS-D_0099#INS-D_1", 2143 | "INS-D_0100#INS-D_1", 2144 | "INS-D_0101#INS-D_1", 2145 | "INS-D_0102#INS-D_1", 2146 | "INS-D_0103#INS-D_1", 2147 | "INS-D_0105#INS-D_1", 2148 | "INS-D_0108#INS-D_1", 2149 | "INS-D_0111#INS-D_1", 2150 | "INS-D_0115#INS-D_1" 2151 | ], 2152 | "INS-D_2": 2153 | [ 2154 | "INS-D_0002#INS-D_2", 2155 | "INS-D_0005#INS-D_2", 2156 | "INS-D_0006#INS-D_2", 2157 | "INS-D_0007#INS-D_2", 2158 | "INS-D_0012#INS-D_2", 2159 | "INS-D_0013#INS-D_2", 2160 | "INS-D_0014#INS-D_2", 2161 | "INS-D_0015#INS-D_2", 2162 | "INS-D_0022#INS-D_2", 2163 | "INS-D_0023#INS-D_2", 2164 | "INS-D_0024#INS-D_2", 2165 | "INS-D_0025#INS-D_2", 2166 | "INS-D_0026#INS-D_2", 2167 | "INS-D_0027#INS-D_2", 2168 | "INS-D_0028#INS-D_2", 2169 | "INS-D_0030#INS-D_2", 2170 | "INS-D_0031#INS-D_2", 2171 | "INS-D_0032#INS-D_2", 2172 | "INS-D_0033#INS-D_2", 2173 | "INS-D_0036#INS-D_2", 2174 | "INS-D_0039#INS-D_2", 2175 | "INS-D_0045#INS-D_2", 2176 | "INS-D_0046#INS-D_2", 2177 | "INS-D_0047#INS-D_2", 2178 | "INS-D_0050#INS-D_2", 2179 | "INS-D_0052#INS-D_2", 2180 | "INS-D_0053#INS-D_2", 2181 | "INS-D_0054#INS-D_2", 2182 | "INS-D_0055#INS-D_2", 2183 | "INS-D_0059#INS-D_2", 2184 | "INS-D_0060#INS-D_2", 2185 | "INS-D_0061#INS-D_2", 2186 | "INS-D_0062#INS-D_2", 2187 | "INS-D_0064#INS-D_2", 2188 | "INS-D_0068#INS-D_2", 2189 | "INS-D_0069#INS-D_2", 2190 | "INS-D_0071#INS-D_2", 2191 | "INS-D_0073#INS-D_2", 2192 | "INS-D_0077#INS-D_2", 2193 | "INS-D_0081#INS-D_2", 2194 | "INS-D_0082#INS-D_2", 2195 | "INS-D_0084#INS-D_2", 2196 | "INS-D_0087#INS-D_2", 2197 | "INS-D_0089#INS-D_2", 2198 | "INS-D_0091#INS-D_2", 2199 | "INS-D_0092#INS-D_2", 2200 | "INS-D_0096#INS-D_2", 2201 | "INS-D_0097#INS-D_2", 2202 | "INS-D_0098#INS-D_2", 2203 | "INS-D_0099#INS-D_2", 2204 | "INS-D_0100#INS-D_2", 2205 | "INS-D_0101#INS-D_2", 2206 | "INS-D_0102#INS-D_2", 2207 | "INS-D_0103#INS-D_2", 2208 | "INS-D_0105#INS-D_2", 2209 | "INS-D_0108#INS-D_2", 2210 | "INS-D_0111#INS-D_2", 2211 | "INS-D_0115#INS-D_2" 2212 | ] 2213 | }, 2214 | "INS-D_2": 2215 | { 2216 | "INS-D_1": 2217 | [ 2218 | "INS-D_0002#INS-D_1", 2219 | "INS-D_0005#INS-D_1", 2220 | "INS-D_0006#INS-D_1", 2221 | "INS-D_0007#INS-D_1", 2222 | "INS-D_0012#INS-D_1", 2223 | "INS-D_0013#INS-D_1", 2224 | "INS-D_0014#INS-D_1", 2225 | "INS-D_0015#INS-D_1", 2226 | "INS-D_0022#INS-D_1", 2227 | "INS-D_0023#INS-D_1", 2228 | "INS-D_0024#INS-D_1", 2229 | "INS-D_0025#INS-D_1", 2230 | "INS-D_0026#INS-D_1", 2231 | "INS-D_0027#INS-D_1", 2232 | "INS-D_0028#INS-D_1", 2233 | "INS-D_0030#INS-D_1", 2234 | "INS-D_0031#INS-D_1", 2235 | "INS-D_0032#INS-D_1", 2236 | "INS-D_0033#INS-D_1", 2237 | "INS-D_0036#INS-D_1", 2238 | "INS-D_0039#INS-D_1", 2239 | "INS-D_0045#INS-D_1", 2240 | "INS-D_0046#INS-D_1", 2241 | "INS-D_0047#INS-D_1", 2242 | "INS-D_0050#INS-D_1", 2243 | "INS-D_0052#INS-D_1", 2244 | "INS-D_0053#INS-D_1", 2245 | "INS-D_0054#INS-D_1", 2246 | "INS-D_0055#INS-D_1", 2247 | "INS-D_0059#INS-D_1", 2248 | "INS-D_0060#INS-D_1", 2249 | "INS-D_0061#INS-D_1", 2250 | "INS-D_0062#INS-D_1", 2251 | "INS-D_0064#INS-D_1", 2252 | "INS-D_0068#INS-D_1", 2253 | "INS-D_0069#INS-D_1", 2254 | "INS-D_0071#INS-D_1", 2255 | "INS-D_0073#INS-D_1", 2256 | "INS-D_0077#INS-D_1", 2257 | "INS-D_0081#INS-D_1", 2258 | "INS-D_0082#INS-D_1", 2259 | "INS-D_0084#INS-D_1", 2260 | "INS-D_0087#INS-D_1", 2261 | "INS-D_0089#INS-D_1", 2262 | "INS-D_0091#INS-D_1", 2263 | "INS-D_0092#INS-D_1", 2264 | "INS-D_0096#INS-D_1", 2265 | "INS-D_0097#INS-D_1", 2266 | "INS-D_0098#INS-D_1", 2267 | "INS-D_0099#INS-D_1", 2268 | "INS-D_0100#INS-D_1", 2269 | "INS-D_0101#INS-D_1", 2270 | "INS-D_0102#INS-D_1", 2271 | "INS-D_0103#INS-D_1", 2272 | "INS-D_0105#INS-D_1", 2273 | "INS-D_0108#INS-D_1", 2274 | "INS-D_0111#INS-D_1", 2275 | "INS-D_0115#INS-D_1" 2276 | ], 2277 | "INS-D_2": 2278 | [ 2279 | "INS-D_0002#INS-D_2", 2280 | "INS-D_0005#INS-D_2", 2281 | "INS-D_0006#INS-D_2", 2282 | "INS-D_0007#INS-D_2", 2283 | "INS-D_0012#INS-D_2", 2284 | "INS-D_0013#INS-D_2", 2285 | "INS-D_0014#INS-D_2", 2286 | "INS-D_0015#INS-D_2", 2287 | "INS-D_0022#INS-D_2", 2288 | "INS-D_0023#INS-D_2", 2289 | "INS-D_0024#INS-D_2", 2290 | "INS-D_0025#INS-D_2", 2291 | "INS-D_0026#INS-D_2", 2292 | "INS-D_0027#INS-D_2", 2293 | "INS-D_0028#INS-D_2", 2294 | "INS-D_0030#INS-D_2", 2295 | "INS-D_0031#INS-D_2", 2296 | "INS-D_0032#INS-D_2", 2297 | "INS-D_0033#INS-D_2", 2298 | "INS-D_0036#INS-D_2", 2299 | "INS-D_0039#INS-D_2", 2300 | "INS-D_0045#INS-D_2", 2301 | "INS-D_0046#INS-D_2", 2302 | "INS-D_0047#INS-D_2", 2303 | "INS-D_0050#INS-D_2", 2304 | "INS-D_0052#INS-D_2", 2305 | "INS-D_0053#INS-D_2", 2306 | "INS-D_0054#INS-D_2", 2307 | "INS-D_0055#INS-D_2", 2308 | "INS-D_0059#INS-D_2", 2309 | "INS-D_0060#INS-D_2", 2310 | "INS-D_0061#INS-D_2", 2311 | "INS-D_0062#INS-D_2", 2312 | "INS-D_0064#INS-D_2", 2313 | "INS-D_0068#INS-D_2", 2314 | "INS-D_0069#INS-D_2", 2315 | "INS-D_0071#INS-D_2", 2316 | "INS-D_0073#INS-D_2", 2317 | "INS-D_0077#INS-D_2", 2318 | "INS-D_0081#INS-D_2", 2319 | "INS-D_0082#INS-D_2", 2320 | "INS-D_0084#INS-D_2", 2321 | "INS-D_0087#INS-D_2", 2322 | "INS-D_0089#INS-D_2", 2323 | "INS-D_0091#INS-D_2", 2324 | "INS-D_0092#INS-D_2", 2325 | "INS-D_0096#INS-D_2", 2326 | "INS-D_0097#INS-D_2", 2327 | "INS-D_0098#INS-D_2", 2328 | "INS-D_0099#INS-D_2", 2329 | "INS-D_0100#INS-D_2", 2330 | "INS-D_0101#INS-D_2", 2331 | "INS-D_0102#INS-D_2", 2332 | "INS-D_0103#INS-D_2", 2333 | "INS-D_0105#INS-D_2", 2334 | "INS-D_0108#INS-D_2", 2335 | "INS-D_0111#INS-D_2", 2336 | "INS-D_0115#INS-D_2" 2337 | ] 2338 | } 2339 | } 2340 | } --------------------------------------------------------------------------------