├── code ├── tools │ ├── __init__.py │ ├── plot.py │ ├── py_op.py │ ├── segmentation.py │ ├── measures.py │ ├── parse.py │ └── utils.py ├── loaddata │ ├── __init__.py │ ├── dataloader.py │ └── data_function.py ├── compare.py ├── models │ ├── __init__.py │ └── lstm.py ├── test.py ├── loss.py ├── main.py └── function.py ├── analysis ├── .gitignore ├── check_result.py ├── analyse_feature.py ├── analyse_comparison.py ├── stat.py ├── analyse_variation_trend.py ├── missing_rate.py └── analyse_test_files.py ├── requirement.txt ├── preprocessing ├── gen_feature_time.py ├── gen_label.py ├── gen_master_feature.py ├── gen_vital_feature.py └── gen_feature_order.py ├── run.sh ├── file ├── index_group_dict.json ├── similar.json ├── group_index_dict.json ├── label.csv ├── index_feature_list.json ├── feature_index_dict.json ├── range_dict.json ├── master.csv └── task1 │ └── val.json ├── README.md ├── run.py └── .gitignore /code/tools/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /code/loaddata/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /analysis/.gitignore: -------------------------------------------------------------------------------- 1 | *.csv 2 | -------------------------------------------------------------------------------- /requirement.txt: -------------------------------------------------------------------------------- 1 | tqdm 2 | torch 3 | torchvision 4 | sklearn 5 | 6 | -------------------------------------------------------------------------------- /code/compare.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | for seed in [1, 40, 500, 100, 2019]: 4 | for task in ['case1', 'task1', 'task2']: 5 | cmd = 'python main.py --task {:s} --seed {:d}'.format(task, seed) 6 | os.system(cmd) 7 | -------------------------------------------------------------------------------- /code/models/__init__.py: -------------------------------------------------------------------------------- 1 | # coding=utf8 2 | ######################################################################### 3 | # File Name: models/__init__.py 4 | # Author: ccyin 5 | # mail: ccyin04@gmail.com 6 | # Created Time: Mon 19 Aug 2019 02:16:51 AM CST 7 | ######################################################################### 8 | -------------------------------------------------------------------------------- /preprocessing/gen_feature_time.py: -------------------------------------------------------------------------------- 1 | # coding=utf8 2 | 3 | import os 4 | import sys 5 | sys.path.append('../code') 6 | 7 | import tools 8 | from tools import parse, py_op 9 | args = parse.args 10 | 11 | 12 | 13 | def gen_patient_time_dict(): 14 | vital_file = args.vital_file 15 | patient_time_dict = dict() 16 | for i_line,line in enumerate(open(vital_file)): 17 | if 'event_time' not in line: 18 | patient, time = line.strip().split(',')[:2] 19 | patient_time_dict[patient] = max(patient_time_dict.get(patient, 0), float(time)) 20 | py_op.mywritejson(os.path.join(args.result_dir, 'patient_time_dict.json'), patient_time_dict) 21 | 22 | 23 | 24 | def main(): 25 | gen_patient_time_dict() 26 | 27 | 28 | 29 | 30 | 31 | if __name__ == '__main__': 32 | main() 33 | -------------------------------------------------------------------------------- /preprocessing/gen_label.py: -------------------------------------------------------------------------------- 1 | # coding=utf8 2 | 3 | import os 4 | import sys 5 | import json 6 | sys.path.append('../code') 7 | 8 | import tools 9 | from tools import parse, py_op 10 | args = parse.args 11 | 12 | 13 | 14 | 15 | def gen_patient_label_dict(): 16 | patient_label_dict = dict() 17 | label_file = args.label_file 18 | for i_line,line in enumerate(open(label_file)): 19 | if i_line != 0: 20 | data = line.strip().split(',') 21 | patient = data[0] 22 | label = data[-1] 23 | patient_label_dict[patient] = int(label) 24 | py_op.mywritejson(os.path.join(args.result_dir, 'patient_label_dict.json'), patient_label_dict) 25 | 26 | 27 | 28 | 29 | 30 | 31 | def main(): 32 | gen_patient_label_dict() 33 | 34 | 35 | 36 | 37 | 38 | if __name__ == '__main__': 39 | main() 40 | -------------------------------------------------------------------------------- /code/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | from tools import parse, py_op 4 | args = parse.args 5 | 6 | 7 | for seed in [1, 40, 500, 100, 2019]: 8 | for task in ['case1', 'task1', 'task2']: 9 | cmd = 'python main.py --phase test --final 0 --batch-size 8 --task {:s} --seed {:d} --resume ../data/models/{:s}-snm-{:d}-snr-{:d}-value-{:d}-trend-{:d}-cat-{:d}-lt-{:d}-size-{:d}-seed-{:d}-{:s}'.format(task, seed, task, 10 | # cmd = 'python main.py --phase valid --batch-size 8 --task {:s} --seed {:d} --resume ../data/models/{:s}-snm-{:d}-snr-{:d}-value-{:d}-trend-{:d}-cat-{:d}-lt-{:d}-size-{:d}-seed-{:d}-{:s}'.format(task, seed, task, 11 | args.split_num, args.split_nor, args.use_value, args.use_trend, 12 | args.use_cat, args.last_time, args.embed_size, args.seed, 'best.ckpt') 13 | print cmd 14 | os.system(cmd) 15 | print 16 | print 17 | print 18 | break 19 | 20 | -------------------------------------------------------------------------------- /run.sh: -------------------------------------------------------------------------------- 1 | ######################################################################### 2 | # File Name: run.sh 3 | # Author: ccyin 4 | # mail: ccyin04@gmail.com 5 | # Created Time: Thu 12 Sep 2019 06:46:05 AM UTC 6 | ######################################################################### 7 | #!/bin/bash 8 | 9 | # task1 10 | # python run.py --label-file /home/yin/data/sepsis2_task1_training/sepsis2_task1_label_training.csv --vital-file /home/yin/data/sepsis2_task1_training/sepsis2_task1_vital_training.csv --master-file /home/yin/data/sepsis2_task1_training/sepsis2_task1_master_training.csv --task task1 11 | 12 | # task2 13 | python run.py --label-file /home/yin/data/sepsis2_task2_training/sepsis2_task2_label_training.csv --vital-file /home/yin/data/sepsis2_task2_training/sepsis2_task2_vital_training.csv --master-file /home/yin/data/sepsis2_task2_training/sepsis2_task2_master_training.csv --task task2 14 | 15 | # cd code 16 | # python main.py --task task1 17 | 18 | 19 | -------------------------------------------------------------------------------- /file/index_group_dict.json: -------------------------------------------------------------------------------- 1 | { 2 | "128": 125, 3 | "129": 125, 4 | "3": 3, 5 | "4": 3, 6 | "11": 11, 7 | "12": 11, 8 | "13": 11, 9 | "25": 25, 10 | "26": 25, 11 | "27": 25, 12 | "29": 29, 13 | "30": 29, 14 | "39": 39, 15 | "52": 52, 16 | "53": 52, 17 | "54": 54, 18 | "55": 54, 19 | "57": 57, 20 | "58": 57, 21 | "59": 59, 22 | "60": 59, 23 | "61": 59, 24 | "74": 74, 25 | "75": 75, 26 | "76": 74, 27 | "77": 75, 28 | "78": 74, 29 | "81": 81, 30 | "82": 81, 31 | "88": 88, 32 | "90": 88, 33 | "91": 39, 34 | "92": 92, 35 | "93": 92, 36 | "94": 94, 37 | "95": 94, 38 | "96": 96, 39 | "97": 96, 40 | "100": 94, 41 | "108": 108, 42 | "110": 108, 43 | "115": 115, 44 | "116": 115, 45 | "118": 118, 46 | "120": 118, 47 | "121": 118, 48 | "125": 125, 49 | "126": 125, 50 | "127": 125 51 | } -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # DII-Challenge-2019 2 | The early identification of sepsis cases. 3 | 4 | # build the env 5 | 6 | pip install -r requirement.txt 7 | 8 | # data preprocessing 9 | 10 | - creat result folder for data preprocessing results 11 | 12 | mkdir result 13 | mkdir data 14 | mkdir data/models 15 | 16 | - generate json files 17 | 18 | cd preprocessing 19 | python gen_master_feature.py --master-file ../file/master.csv 20 | python gen_feature_time.py --vital-file ../file/vital.csv # only for task1 21 | python gen_vital_feature.py --vital-file ../file/vital.csv 22 | python gen_label_feature.py --label-file ../file/label.csv 23 | 24 | # train and validate the model, the best model will saved in ../data/models/ 25 | 26 | python main.py --task case1 # for task1 case1 27 | python main.py --task task1 # for task1 case2 28 | python main.py --task task2 # for task2 29 | 30 | # You can also run the code by: 31 | 32 | python run.py --label-file ../file/label.csv --vital-file ../file/vital.csv --master-file ../master.csv --task case1 33 | 34 | 35 | -------------------------------------------------------------------------------- /code/tools/plot.py: -------------------------------------------------------------------------------- 1 | # coding=utf8 2 | import matplotlib.pyplot as plt 3 | import numpy as np 4 | 5 | def plot_multi_graph(image_list, name_list, save_path=None, show=False): 6 | graph_place = int(np.sqrt(len(name_list) - 1)) + 1 7 | for i, (image, name) in enumerate(zip(image_list, name_list)): 8 | ax1 = plt.subplot(graph_place,graph_place,i+1) 9 | ax1.set_title(name) 10 | # plt.imshow(image,cmap='gray') 11 | plt.imshow(image) 12 | plt.axis('off') 13 | if save_path: 14 | plt.savefig(save_path) 15 | pass 16 | if show: 17 | plt.show() 18 | 19 | def plot_multi_line(x_list, y_list, name_list, save_path=None, show=False): 20 | graph_place = int(np.sqrt(len(name_list) - 1)) + 1 21 | for i, (x, y, name) in enumerate(zip(x_list, y_list, name_list)): 22 | ax1 = plt.subplot(graph_place,graph_place,i+1) 23 | ax1.set_title(name) 24 | plt.plot(x,y) 25 | # plt.imshow(image,cmap='gray') 26 | if save_path: 27 | plt.savefig(save_path) 28 | if show: 29 | plt.show() 30 | 31 | 32 | -------------------------------------------------------------------------------- /file/similar.json: -------------------------------------------------------------------------------- 1 | [ 2 | [ 3 | 11, 4 | 12, 5 | 13 6 | ], 7 | [ 8 | 25, 9 | 26, 10 | 27 11 | ], 12 | [ 13 | 29, 14 | 30 15 | ], 16 | [ 17 | 52, 18 | 53 19 | ], 20 | [ 21 | 54, 22 | 55 23 | ], 24 | [ 25 | 57, 26 | 58 27 | ], 28 | [ 29 | 59, 30 | 60, 31 | 61 32 | ], 33 | [ 34 | 75, 35 | 77 36 | ], 37 | [ 38 | 74, 39 | 76, 40 | 78 41 | ], 42 | [ 43 | 81, 44 | 82 45 | ], 46 | [ 47 | 92, 48 | 93 49 | ], 50 | [ 51 | 94, 52 | 95, 53 | 100 54 | ], 55 | [ 56 | 96, 57 | 97 58 | ], 59 | [ 60 | 108, 61 | 110 62 | ], 63 | [ 64 | 115, 65 | 116 66 | ], 67 | [ 68 | 118, 69 | 120, 70 | 121 71 | ], 72 | [ 73 | 125, 74 | 126, 75 | 127, 76 | 128, 77 | 129 78 | ], 79 | [ 80 | 3, 81 | 4 82 | ], 83 | [ 84 | 88, 85 | 90 86 | ], 87 | [ 88 | 39, 89 | 91 90 | ] 91 | ] 92 | -------------------------------------------------------------------------------- /analysis/check_result.py: -------------------------------------------------------------------------------- 1 | # coding=utf8 2 | 3 | import os 4 | import sys 5 | import json 6 | sys.path.append('../code') 7 | 8 | import tools 9 | from tools import parse, py_op 10 | import numpy as np 11 | 12 | args = parse.args 13 | 14 | def check_result(): 15 | def get_patient(task): 16 | if task == 'case1': 17 | fo = '/home/yin/comparison/onset_case1/onset_case1_training_seed{:d}/'.format(args.seed) 18 | elif task == 'task1': 19 | fo = '/home/yin/comparison/onset_case2/onset_case2_training_seed{:d}/'.format(args.seed) 20 | elif task == 'task2': 21 | fo = '/home/yin/comparison/mortality/mortality_training_seed{:d}/'.format(args.seed) 22 | result_csv = os.path.join(fo, 'final_result.csv') 23 | pids = set() 24 | for i,line in enumerate(open(result_csv)): 25 | assert i == len(pids) 26 | pid = line.split(',')[0] 27 | pids.add(pid) 28 | return pids 29 | test_patient_dict = json.load(open('../file/test_patient_dict.json')) 30 | for task in ['case1', 'task1', 'task2']: 31 | test = set(test_patient_dict[task]) 32 | pids = get_patient(task) 33 | print len(pids), len(test), len(test & pids) 34 | # if len(test) > len(pids): 35 | # print test - pids 36 | 37 | check_result() 38 | 39 | -------------------------------------------------------------------------------- /file/group_index_dict.json: -------------------------------------------------------------------------------- 1 | { 2 | "96": [ 3 | 96, 4 | 97 5 | ], 6 | "3": [ 7 | 3, 8 | 4 9 | ], 10 | "75": [ 11 | 75, 12 | 77 13 | ], 14 | "118": [ 15 | 118, 16 | 120, 17 | 121 18 | ], 19 | "39": [ 20 | 39, 21 | 91 22 | ], 23 | "74": [ 24 | 74, 25 | 76, 26 | 78 27 | ], 28 | "11": [ 29 | 11, 30 | 12, 31 | 13 32 | ], 33 | "108": [ 34 | 108, 35 | 110 36 | ], 37 | "125": [ 38 | 125, 39 | 126, 40 | 127, 41 | 128, 42 | 129 43 | ], 44 | "81": [ 45 | 81, 46 | 82 47 | ], 48 | "115": [ 49 | 115, 50 | 116 51 | ], 52 | "52": [ 53 | 52, 54 | 53 55 | ], 56 | "54": [ 57 | 54, 58 | 55 59 | ], 60 | "57": [ 61 | 57, 62 | 58 63 | ], 64 | "88": [ 65 | 88, 66 | 90 67 | ], 68 | "25": [ 69 | 25, 70 | 26, 71 | 27 72 | ], 73 | "59": [ 74 | 59, 75 | 60, 76 | 61 77 | ], 78 | "92": [ 79 | 92, 80 | 93 81 | ], 82 | "29": [ 83 | 29, 84 | 30 85 | ], 86 | "94": [ 87 | 94, 88 | 95, 89 | 100 90 | ] 91 | } -------------------------------------------------------------------------------- /analysis/analyse_feature.py: -------------------------------------------------------------------------------- 1 | # coding=utf8 2 | 3 | import os 4 | import sys 5 | sys.path.append('../code') 6 | 7 | import tools 8 | from tools import parse, py_op 9 | import numpy as np 10 | 11 | args = parse.args 12 | 13 | # def myprint(): 14 | # pass 15 | 16 | def ana_feat_dist(task): 17 | feature_count_dict = py_op.myreadjson(os.path.join(args.file_dir, 'feature_count_dict.{:s}.json'.format(args.task))) 18 | normal_range_order_dict = py_op.myreadjson(os.path.join(args.file_dir, 'normal_range_order_dict.{:s}.json'.format(args.task))) 19 | feature_index_dict = py_op.myreadjson(os.path.join(args.file_dir, 'feature_index_dict.json'.format(args.task))) 20 | cnt_list = [] 21 | print normal_range_order_dict.keys() 22 | for k,c in feature_count_dict.items(): 23 | if c > 1 and 'event_time' != k: 24 | idx = feature_index_dict[k] 25 | if str(idx) not in normal_range_order_dict: 26 | continue 27 | mn, mx = normal_range_order_dict[str(idx)] 28 | print mn, mx 29 | a = int(mn * c) 30 | b = int((mx - mn) * c) 31 | c = int((1 - mx) * c) 32 | cnt_list += [a, b, c] 33 | print sorted(cnt_list) 34 | print len(cnt_list) 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | def main(): 44 | ana_feat_dist('task2') 45 | 46 | 47 | if __name__ == '__main__': 48 | os.system('clear') 49 | main() 50 | -------------------------------------------------------------------------------- /run.py: -------------------------------------------------------------------------------- 1 | 2 | import os 3 | import sys 4 | sys.path.append('code/tools') 5 | import parse 6 | args = parse.args 7 | 8 | 9 | 10 | os.system('mkdir result') 11 | os.system('mkdir data') 12 | os.system('mkdir data/models') 13 | 14 | cmd = ''' 15 | cd preprocessing 16 | python gen_master_feature.py --master-file {:s} 17 | '''.format(args.master_file) 18 | # python gen_master_feature.py --master-file ../file/master.csv 19 | print cmd 20 | os.system(cmd) 21 | 22 | cmd = ''' 23 | cd preprocessing 24 | python gen_feature_time.py --vital-file {:s} 25 | '''.format(args.vital_file) 26 | # python gen_feature_time.py --vital-file ../file/vital.csv 27 | print cmd 28 | os.system(cmd) 29 | 30 | cmd = ''' 31 | cd preprocessing 32 | python gen_feature_order.py --vital-file {:s} 33 | '''.format(args.vital_file) 34 | # python gen_vital_feature.py --vital-file ../file/vital.csv 35 | print cmd 36 | os.system(cmd) 37 | 38 | 39 | 40 | cmd = ''' 41 | cd preprocessing 42 | python gen_vital_feature.py --vital-file {:s} 43 | '''.format(args.vital_file) 44 | # python gen_vital_feature.py --vital-file ../file/vital.csv 45 | print cmd 46 | os.system(cmd) 47 | 48 | 49 | cmd = ''' 50 | cd preprocessing 51 | python gen_label.py --label-file {:s} 52 | '''.format(args.label_file) 53 | # python gen_label.py --label-file ../file/label.csv 54 | print cmd 55 | os.system(cmd) 56 | 57 | cmd = ''' 58 | cd code 59 | python main.py --task {:s} 60 | '''.format(args.task) 61 | print cmd 62 | os.system(cmd) 63 | 64 | 65 | -------------------------------------------------------------------------------- /preprocessing/gen_master_feature.py: -------------------------------------------------------------------------------- 1 | # coding=utf8 2 | 3 | import os 4 | import sys 5 | sys.path.append('../code') 6 | 7 | import tools 8 | from tools import parse, py_op 9 | args = parse.args 10 | 11 | 12 | def gen_master_feature_list(): 13 | 14 | 15 | # master information 16 | master_file = args.master_file 17 | m_set = set() 18 | for i_line,line in enumerate(open(master_file)): 19 | if i_line != 0: 20 | data = line.strip().split(',') 21 | for i,d in enumerate(data[1:]): 22 | m_set.add(str(i) + d) 23 | return sorted(m_set) 24 | 25 | 26 | 27 | def gen_patient_master_dict(master_list): 28 | patient_master_dict = dict() 29 | # master information 30 | master_file = args.master_file 31 | master_set = [set() for _ in range(6)] 32 | for i_line,line in enumerate(open(master_file)): 33 | if i_line != 0: 34 | data = line.strip().split(',') 35 | patient = data[0] 36 | feature = ['0' for _ in range(43)] 37 | for i,d in enumerate(data[1:]): 38 | m = str(i) + d 39 | idx = master_list.index(m) 40 | feature[idx] = '1' 41 | patient_master_dict[patient] = ''.join(feature) 42 | py_op.mywritejson(os.path.join(args.result_dir, 'patient_master_dict.json'), patient_master_dict) 43 | 44 | def main(): 45 | master_list = gen_master_feature_list() 46 | gen_patient_master_dict(master_list) 47 | 48 | 49 | 50 | 51 | 52 | if __name__ == '__main__': 53 | main() 54 | -------------------------------------------------------------------------------- /analysis/analyse_comparison.py: -------------------------------------------------------------------------------- 1 | 2 | # coding=utf8 3 | 4 | import os 5 | import sys 6 | import json 7 | sys.path.append('../code') 8 | 9 | import tools 10 | from tools import parse, py_op 11 | import numpy as np 12 | 13 | args = parse.args 14 | 15 | # def myprint(): 16 | # pass 17 | 18 | def ana_patient(): 19 | fo = '/home/yin/comparison' 20 | for task in os.listdir(fo): 21 | print '\n', fo 22 | task_dir = os.path.join(fo, task) 23 | task_dir = os.path.join(task_dir, os.listdir(task_dir)[-1]) 24 | for fi in os.listdir(task_dir): 25 | patients = py_op.myreadjson(os.path.join(task_dir, fi)) 26 | print fi, len(patients) 27 | 28 | 29 | def read_result(task='task2'): 30 | if task == 'case1': 31 | fo = '/home/yin/comparison/onset_case1/onset_case1_training_seed{:d}/'.format(args.seed) 32 | elif task == 'task1': 33 | fo = '/home/yin/comparison/onset_case2/onset_case2_training_seed{:d}/'.format(args.seed) 34 | elif task == 'task2': 35 | fo = '/home/yin/comparison/mortality/mortality_training_seed{:d}/'.format(args.seed) 36 | test_dict = json.load(open(os.path.join(fo, 'test.json'))) 37 | pset = set() 38 | for line in open(os.path.join(fo, 'result.csv')): 39 | p = line.split(',')[0] 40 | pset.add(p) 41 | print set(test_dict) - pset 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | def main(): 50 | ana_patient() 51 | read_result() 52 | 53 | 54 | if __name__ == '__main__': 55 | os.system('clear') 56 | main() 57 | -------------------------------------------------------------------------------- /file/label.csv: -------------------------------------------------------------------------------- 1 | adm_id,sepsis2 2 | A100001,0 3 | A100002,0 4 | A100004,0 5 | A100008,0 6 | A100006,0 7 | A100019,1 8 | A100029,0 9 | A100090,0 10 | A100092,1 11 | A100099,0 12 | A100094,1 13 | A100098,1 14 | A100096,1 15 | A100097,0 16 | A100049,0 17 | A100044,1 18 | A100048,1 19 | A100087,0 20 | A100061,0 21 | A100064,0 22 | A100067,1 23 | A100071,0 24 | A100074,0 25 | A100077,0 26 | A100078,0 27 | A100079,1 28 | A100089,1 29 | A100088,1 30 | A100086,1 31 | A100087,1 32 | A100089,0 33 | A100091,0 34 | A100092,0 35 | A100098,0 36 | A100099,1 37 | A100108,1 38 | A100109,0 39 | A100119,0 40 | A100128,0 41 | A100190,1 42 | A100191,1 43 | A100194,0 44 | A100196,0 45 | A100140,1 46 | A100149,0 47 | A100146,0 48 | A100148,1 49 | A100189,0 50 | A100160,0 51 | A100161,0 52 | A100171,0 53 | A100179,0 54 | A100178,0 55 | A100181,0 56 | A100182,0 57 | A100184,0 58 | A100186,0 59 | A100199,0 60 | A100194,1 61 | A100199,0 62 | A100208,0 63 | A100209,0 64 | A100217,1 65 | A100218,1 66 | A100219,1 67 | A100220,1 68 | A100221,1 69 | A100222,0 70 | A100299,0 71 | A100297,0 72 | A100240,0 73 | A100242,1 74 | A100244,1 75 | A100247,1 76 | A100282,1 77 | A100288,0 78 | A100289,1 79 | A100269,1 80 | A100268,0 81 | A100271,0 82 | A100279,1 83 | A100280,1 84 | A100286,0 85 | A100290,0 86 | A100292,0 87 | A100298,1 88 | A100299,0 89 | A100900,0 90 | A100901,0 91 | A100904,0 92 | A100910,0 93 | A100918,1 94 | A100921,0 95 | A100929,0 96 | A100928,1 97 | A100929,0 98 | A100990,0 99 | A100991,0 100 | A100998,0 101 | A100998,0 102 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | *.egg-info/ 24 | .installed.cfg 25 | *.egg 26 | MANIFEST 27 | 28 | # PyInstaller 29 | # Usually these files are written by a python script from a template 30 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 31 | *.manifest 32 | *.spec 33 | 34 | # Installer logs 35 | pip-log.txt 36 | pip-delete-this-directory.txt 37 | 38 | # Unit test / coverage reports 39 | htmlcov/ 40 | .tox/ 41 | .coverage 42 | .coverage.* 43 | .cache 44 | nosetests.xml 45 | coverage.xml 46 | *.cover 47 | .hypothesis/ 48 | .pytest_cache/ 49 | 50 | # Translations 51 | *.mo 52 | *.pot 53 | 54 | # Django stuff: 55 | *.log 56 | local_settings.py 57 | db.sqlite3 58 | 59 | # Flask stuff: 60 | instance/ 61 | .webassets-cache 62 | 63 | # Scrapy stuff: 64 | .scrapy 65 | 66 | # Sphinx documentation 67 | docs/_build/ 68 | 69 | # PyBuilder 70 | target/ 71 | 72 | # Jupyter Notebook 73 | .ipynb_checkpoints 74 | 75 | # pyenv 76 | .python-version 77 | 78 | # celery beat schedule file 79 | celerybeat-schedule 80 | 81 | # SageMath parsed files 82 | *.sage.py 83 | 84 | # Environments 85 | .env 86 | .venv 87 | env/ 88 | venv/ 89 | ENV/ 90 | env.bak/ 91 | venv.bak/ 92 | 93 | # Spyder project settings 94 | .spyderproject 95 | .spyproject 96 | 97 | # Rope project settings 98 | .ropeproject 99 | 100 | # mkdocs documentation 101 | /site 102 | 103 | # mypy 104 | .mypy_cache/ 105 | 106 | 107 | 108 | 109 | tmp.* 110 | result/ 111 | result.*/ 112 | .idea/ 113 | data/ 114 | tmp/ 115 | log.txt 116 | out.txt 117 | log.* 118 | log/ 119 | out.* 120 | 121 | -------------------------------------------------------------------------------- /analysis/stat.py: -------------------------------------------------------------------------------- 1 | # coding=utf8 2 | 3 | import os 4 | import sys 5 | sys.path.append('../code') 6 | 7 | import tools 8 | from tools import parse, py_op 9 | import numpy as np 10 | 11 | 12 | args = parse.args 13 | 14 | # def myprint(): 15 | # pass 16 | 17 | def ana_feat_dist(task): 18 | n_split = 100 19 | feature_label_count = np.zeros((143, 2, n_split)) 20 | patient_time_record_dict = py_op.myreadjson(os.path.join(args.result_dir, 'json_data', '{:s}.json'.format(args.task))) 21 | patient_label_dict = py_op.myreadjson(os.path.join(args.file_dir, 'patient_label_dict.{:s}.json'.format(args.task))) 22 | [ [ [0. for _ in range(n_split)], [0. for _ in range(n_split)] ] for i in range(143) ] 23 | for ip, (p, t_dict) in enumerate(patient_time_record_dict.items()): 24 | if ip % 10000 == 0: 25 | print ip, len(patient_time_record_dict) 26 | 27 | label = patient_label_dict[p] 28 | for t, vs in t_dict.items(): 29 | for v in vs: 30 | feature, value = v 31 | idx = int(value * n_split) 32 | feature_label_count[feature, label, idx] += 1 33 | for f in range(143): 34 | for l in range(2): 35 | feature_label_count[feature, label] /= feature_label_count[feature, label].sum() 36 | np.save('../file/feature_label_count.npy', feature_label_count) 37 | 38 | 39 | 40 | def draw_pic(): 41 | def avg(ys, n = 50): 42 | nys = [] 43 | for i,y in enumerate(ys): 44 | st = max(0, i - n) 45 | en = min(len(ys), i + n + 1) 46 | nys.append(np.mean(ys[st:en])) 47 | 48 | return nys 49 | 50 | import matplotlib.pyplot as plt 51 | flc = np.load('../file/feature_label_count.npy') 52 | for f in range(143): 53 | lc = flc[f] 54 | x = range(len(lc[0])) 55 | plt.plot(x,avg(lc[0]),'b') 56 | plt.plot(x,avg(lc[1]),'r') 57 | plt.savefig('../result/fig/{:d}.png'.format(f)) 58 | plt.clf() 59 | if f > 10: 60 | break 61 | 62 | 63 | 64 | 65 | 66 | 67 | def main(): 68 | # analyze_features('task1') 69 | # ana_feat_dist('task1') 70 | draw_pic() 71 | 72 | 73 | if __name__ == '__main__': 74 | os.system('clear') 75 | main() 76 | -------------------------------------------------------------------------------- /preprocessing/gen_vital_feature.py: -------------------------------------------------------------------------------- 1 | # coding=utf8 2 | 3 | import os 4 | import sys 5 | import json 6 | sys.path.append('../code') 7 | 8 | import tools 9 | from tools import parse, py_op 10 | args = parse.args 11 | 12 | 13 | def gen_json_data(): 14 | vital_file = args.vital_file 15 | patient_time_record_dict = dict() 16 | feature_index_dict = py_op.myreadjson(os.path.join(args.file_dir, 'feature_index_dict.json')) 17 | feature_value_order_dict = py_op.myreadjson(os.path.join(args.file_dir, 'feature_value_order_dict.json')) 18 | feature_value_order_dict = { str(feature_index_dict[k]):v for k,v in feature_value_order_dict.items() if 'event' not in k} 19 | index_group_dict = py_op.myreadjson(os.path.join(args.file_dir, 'index_group_dict.json')) 20 | patient_time_dict = py_op.myreadjson(os.path.join(args.result_dir, 'patient_time_dict.json')) 21 | mx_time = - 100 22 | for i_line, line in enumerate(open(vital_file)): 23 | if i_line % 10000 == 0: 24 | print 'line', i_line 25 | if 'event_time' not in line: 26 | data = line.strip().split(',') 27 | patient, time = data[:2] 28 | time = int(float(time)) 29 | mx_time = max(mx_time, time) 30 | if patient not in patient_time_record_dict: 31 | patient_time_record_dict[patient] = dict() 32 | if time not in patient_time_record_dict[patient]: 33 | patient_time_record_dict[patient][time] = dict() 34 | 35 | data = data[2:] 36 | vs = dict() 37 | for idx, val in enumerate(data): 38 | if len(val) == 0: 39 | continue 40 | if str(idx) in index_group_dict: 41 | idx = index_group_dict[str(idx)] 42 | value_order = feature_value_order_dict[str(idx)] 43 | vs[idx] = value_order[val] 44 | patient_time_record_dict[patient][time].update(vs) 45 | 46 | new_d = dict() 47 | for p, tr in patient_time_record_dict.items(): 48 | new_d[p] = dict() 49 | for t, vs in tr.items(): 50 | if mx_time > 0: 51 | t = int(t - patient_time_dict[p] - 4) 52 | if t < - 102: 53 | continue 54 | nvs = [] 55 | for k in sorted(vs.keys()): 56 | nvs.append([k, vs[k]]) 57 | new_d[p][t] = nvs 58 | with open(os.path.join(args.result_dir, 'patient_time_record_dict.json'), 'w') as f: 59 | # f.write(json.dumps(new_d, indent=4)) 60 | f.write(json.dumps(new_d)) 61 | 62 | 63 | 64 | def main(): 65 | gen_json_data() 66 | 67 | 68 | 69 | 70 | 71 | if __name__ == '__main__': 72 | main() 73 | -------------------------------------------------------------------------------- /analysis/analyse_variation_trend.py: -------------------------------------------------------------------------------- 1 | # coding=utf8 2 | 3 | import os 4 | import sys 5 | import json 6 | sys.path.append('../code') 7 | 8 | import tools 9 | from tools import parse, py_op 10 | args = parse.args 11 | 12 | 13 | 14 | def analyse_variation_trend(task='task1'): 15 | ''' 16 | generate new vital file 17 | ''' 18 | feature_variation_trend_dict = dict() 19 | 20 | feature_value_order_dict = py_op.myreadjson(os.path.join(args.file_dir, 'feature_value_order_dict.{:s}.json'.format(task))) 21 | 22 | patient_time_dict = py_op.myreadjson(os.path.join(args.file_dir, 'patient_time_dict.json')) 23 | 24 | task_dir = os.path.join(args.data_dir, 'sepsis2_{:s}_training'.format(task)) 25 | vital_file = os.path.join(task_dir, 'sepsis2_{:s}_vital_training.csv'.format(task)) 26 | vital_dict = { } # key-valuelist-dict 27 | 28 | last_patient = '' 29 | feature_time_value_dict = dict() 30 | for i_line,line in enumerate(open(vital_file)): 31 | if i_line % 10000 == 0: 32 | print i_line 33 | if i_line: 34 | ctt_list = line.strip().split(',')[2:] 35 | new_ctt = line.strip().split(',')[:2] 36 | if task == 'task1': 37 | patient, time = new_ctt 38 | new_time = float(time) - patient_time_dict[patient] - 4.0 39 | new_ctt = [patient, str(new_time)] 40 | 41 | patient, time = new_ctt 42 | time = int(float(time)) 43 | 44 | if patient != last_patient: 45 | for feature, tv in feature_time_value_dict.items(): 46 | if len(tv) > 4: 47 | ts = sorted(tv.keys()) 48 | vs = [tv[t] for t in ts] 49 | feature_variation_trend_dict[feature] = feature_variation_trend_dict.get(feature, []) + [[ts, vs]] 50 | if i_line >= 500000: 51 | break 52 | 53 | feature_time_value_dict = dict() 54 | last_patient = patient 55 | 56 | for idx, value in enumerate(ctt_list): 57 | if len(value.strip()): 58 | value = float(value.strip()) 59 | if idx not in feature_time_value_dict: 60 | feature_time_value_dict[idx] = { } 61 | feature_time_value_dict[idx][time] = value 62 | 63 | 64 | 65 | # py_op.mywritejson(os.path.join(args.file_dir, 'feature_variation_trend_dict.json'), feature_variation_trend_dict) 66 | with open (os.path.join(args.file_dir, 'feature_variation_trend_dict.json'), 'w') as f: 67 | f.write(json.dumps(feature_variation_trend_dict)) 68 | 69 | 70 | 71 | 72 | 73 | def draw_pic(): 74 | import numpy as np 75 | import matplotlib.pyplot as plt 76 | flc = np.load('../file/feature_label_count.npy') 77 | fvt = py_op.myreadjson(os.path.join(args.file_dir, 'feature_variation_trend_dict.json')) 78 | 79 | for f in range(143): 80 | vt = fvt[str(f)] 81 | print vt 82 | for i, (t, v) in enumerate(vt): 83 | plt.plot(t,v) 84 | if i > 10: 85 | break 86 | plt.savefig('../result/variation_trend/{:d}.png'.format(f)) 87 | plt.clf() 88 | 89 | 90 | 91 | 92 | def main(): 93 | # analyse_variation_trend() 94 | draw_pic() 95 | pass 96 | 97 | 98 | 99 | 100 | 101 | if __name__ == '__main__': 102 | os.system('clear') 103 | main() 104 | -------------------------------------------------------------------------------- /preprocessing/gen_feature_order.py: -------------------------------------------------------------------------------- 1 | # coding=utf8 2 | 3 | import os 4 | import sys 5 | sys.path.append('../code') 6 | 7 | import tools 8 | from tools import parse, py_op 9 | args = parse.args 10 | 11 | 12 | def gen_feature_order_dict(): 13 | ''' 14 | generate the order of value for each feature 15 | ''' 16 | 17 | feature_value_order_dict = dict() 18 | 19 | # vital information 20 | vital_file = args.vital_file 21 | vital_dict = { } # key-valuelist-dict 22 | for i_line,line in enumerate(open(vital_file)): 23 | if i_line % 10000 == 0: 24 | print i_line 25 | # if i_line > 10000: 26 | # break 27 | if i_line == 0: 28 | new_line = '' 29 | vis = 0 30 | for c in line: 31 | if c == '"': 32 | vis = (vis + 1) % 2 33 | if vis == 1 and c == ',': 34 | c = ';' 35 | new_line += c 36 | line = new_line 37 | col_list = line.strip().split(',')[1:] 38 | for col in col_list: 39 | vital_dict[col] = [] 40 | else: 41 | ctt_list = line.strip().split(',')[1:] 42 | assert len(ctt_list) == len(col_list) 43 | for col,ctt in zip(col_list, ctt_list): 44 | if len(ctt): 45 | vital_dict[col].append(float(ctt)) 46 | # if i_line > 10000: 47 | # break 48 | # if i_line % 10000 == 0: 49 | # print i_line 50 | 51 | 52 | # add group info 53 | groups = py_op.myreadjson(os.path.join(args.file_dir, 'similar.json')) 54 | feature_index_dict = py_op.myreadjson(os.path.join(args.file_dir, 'feature_index_dict.json')) 55 | index_feature_list = py_op.myreadjson(os.path.join(args.file_dir, 'index_feature_list.json')) 56 | for g in groups: 57 | for k in g: 58 | mg = min(g) 59 | if k != mg: 60 | kf = index_feature_list[k] 61 | mf = index_feature_list[mg] 62 | vital_dict[mf] = vital_dict[mf] + vital_dict[kf] 63 | vital_dict.pop(kf) 64 | print 'features', len(vital_dict) 65 | 66 | # feature_count_dict = { k: len(v) for k,v in vital_dict.items() } 67 | # py_op.mywritejson(os.path.join(args.file_dir, 'feature_count_dict.json'), feature_count_dict) 68 | 69 | 70 | 71 | ms_list = [] 72 | for col in col_list: 73 | if col not in vital_dict: 74 | continue 75 | value_list = sorted(vital_dict[col]) 76 | value_order_dict = dict() 77 | value_minorder_dict = dict() 78 | value_maxorder_dict = dict() 79 | for i_value, value in enumerate(value_list): 80 | if value not in value_minorder_dict: 81 | value_minorder_dict[value] = i_value 82 | if value == value_list[-1]: 83 | value_maxorder_dict[value] = len(value_list) - 1 84 | break 85 | if value != value_list[i_value+1]: 86 | value_maxorder_dict[value] = i_value 87 | for value in value_maxorder_dict: 88 | value_order_dict[value] = (value_maxorder_dict[value] + value_minorder_dict[value]) / 2.0 / len(value_list) 89 | feature_value_order_dict[col] = value_order_dict 90 | py_op.mywritejson(os.path.join(args.file_dir, 'feature_value_order_dict.json'), feature_value_order_dict) 91 | 92 | 93 | def main(): 94 | gen_feature_order_dict() 95 | 96 | 97 | 98 | 99 | 100 | if __name__ == '__main__': 101 | os.system('clear') 102 | main() 103 | -------------------------------------------------------------------------------- /code/loss.py: -------------------------------------------------------------------------------- 1 | # coding=utf8 2 | ######################################################################### 3 | # File Name: classify_loss.py 4 | # Author: ccyin 5 | # mail: ccyin04@gmail.com 6 | # Created Time: 2019年06月11日 星期二 14时33分59秒 7 | ######################################################################### 8 | 9 | import sys 10 | sys.path.append('classification/') 11 | 12 | # torch 13 | import numpy as np 14 | import torch 15 | import torchvision 16 | import torch.nn as nn 17 | import torch.nn.functional as F 18 | from torch.autograd import Variable 19 | import torch.backends.cudnn as cudnn 20 | from torch.utils.data import DataLoader 21 | 22 | 23 | class Loss(nn.Module): 24 | def __init__(self): 25 | super(Loss, self).__init__() 26 | self.classify_loss = nn.BCELoss() 27 | self.sigmoid = nn.Sigmoid() 28 | self.regress_loss = nn.SmoothL1Loss() 29 | 30 | def forward(self, font_output, font_target, use_hard_mining=False): 31 | batch_size = font_output.size(0) 32 | # font_output = font_output.cpu() 33 | # font_target = font_target.cpu() 34 | 35 | 36 | 37 | # font_target = font_target.unsqueeze(-1).expand(font_output.size()).contiguous() 38 | font_output = self.sigmoid(font_output) 39 | # font_loss = self.classify_loss(font_output, font_target) 40 | # return [font_loss, font_loss, font_loss] 41 | 42 | 43 | font_output = font_output.view(-1) 44 | font_target = font_target.view(-1) 45 | pos_index = font_target == 1 46 | neg_index = font_target == 0 47 | 48 | assert font_output.size() == font_target.size() 49 | assert pos_index.size() == font_target.size() 50 | assert neg_index.size() == font_target.size() 51 | 52 | # print font_output.size(), font_target.size() 53 | 54 | 55 | # pos 56 | # print pos_index.dtype 57 | # print pos_index.size() 58 | # print pos_index 59 | pos_target = font_target[pos_index] 60 | pos_output = font_output[pos_index] 61 | # pos_output = font_output.cpu()[pos_index.cpu()].cuda() 62 | # pos_target = font_target.cpu()[pos_index.cpu()].cuda() 63 | if use_hard_mining: 64 | num_hard_pos = max(2, int(0.2 * batch_size)) 65 | if len(pos_output) > num_hard_pos: 66 | pos_output, pos_target = hard_mining(pos_output, pos_target, num_hard_pos, largest=False, start=int(num_hard_pos/4)) 67 | if len(pos_output): 68 | pos_loss = self.classify_loss(pos_output, pos_target) * 0.5 69 | else: 70 | pos_loss = 0 71 | 72 | 73 | # neg 74 | neg_output = font_output[neg_index] 75 | neg_target = font_target[neg_index] 76 | if use_hard_mining: 77 | num_hard_neg = max(num_hard_pos, 2) 78 | if len(neg_output) > num_hard_neg: 79 | neg_output, neg_target = hard_mining(neg_output, neg_target, num_hard_neg, largest=True, start=int(num_hard_pos/4)) 80 | if len(neg_output): 81 | neg_loss = self.classify_loss(neg_output, neg_target) * 0.5 82 | else: 83 | neg_loss = 0 84 | 85 | font_loss = pos_loss + neg_loss 86 | return [font_loss, pos_loss, neg_loss] 87 | # return [font_loss.cuda(), pos_loss, neg_loss] 88 | 89 | 90 | def hard_mining(neg_output, neg_labels, num_hard, largest=True, start=0): 91 | # num_hard = min(max(num_hard, 10), len(neg_output)) 92 | _, idcs = torch.topk(neg_output, min(num_hard, len(neg_output)), largest=largest) 93 | start = 0 94 | idcs = idcs[start:] 95 | neg_output = torch.index_select(neg_output, 0, idcs) 96 | neg_labels = torch.index_select(neg_labels, 0, idcs) 97 | return neg_output, neg_labels 98 | -------------------------------------------------------------------------------- /code/tools/py_op.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | 此文件用于常用python函数的使用 4 | """ 5 | import os 6 | import json 7 | import traceback 8 | from collections import OrderedDict 9 | import random 10 | 11 | import sys 12 | # reload(sys) 13 | # sys.setdefaultencoding('utf-8') 14 | 15 | ################################################################################ 16 | ### pre define variables 17 | #:: enumerate 18 | #:: raw_input 19 | #:: listdir 20 | #:: sorted 21 | ### pre define function 22 | def mywritejson(save_path,content): 23 | content = json.dumps(content,indent=4,ensure_ascii=False) 24 | with open(save_path,'w') as f: 25 | f.write(content) 26 | 27 | def myreadjson(load_path): 28 | with open(load_path,'r') as f: 29 | return json.loads(f.read()) 30 | 31 | def mywritefile(save_path,content): 32 | with open(save_path,'w') as f: 33 | f.write(content) 34 | 35 | def myreadfile(load_path): 36 | with open(load_path,'r') as f: 37 | return f.read() 38 | 39 | def myprint(content): 40 | print(json.dumps(content,indent=4,ensure_ascii=False)) 41 | 42 | def rm(fi): 43 | os.system('rm ' + fi) 44 | 45 | def mystrip(s): 46 | return ''.join(s.split()) 47 | 48 | def mysorteddict(d,key = lambda s:s, reverse=False): 49 | dordered = OrderedDict() 50 | for k in sorted(d.keys(),key = key,reverse=reverse): 51 | dordered[k] = d[k] 52 | return dordered 53 | 54 | def mysorteddictfile(src,obj): 55 | mywritejson(obj,mysorteddict(myreadjson(src))) 56 | 57 | def myfuzzymatch(srcs,objs,grade=80): 58 | matchDict = OrderedDict() 59 | for src in srcs: 60 | for obj in objs: 61 | value = fuzz.partial_ratio(src,obj) 62 | if value > grade: 63 | try: 64 | matchDict[src].append(obj) 65 | except: 66 | matchDict[src] = [obj] 67 | return matchDict 68 | 69 | def mydumps(x): 70 | return json.dumps(content,indent=4,ensure_ascii=False) 71 | 72 | def get_random_list(l,num=-1,isunique=0): 73 | if isunique: 74 | l = set(l) 75 | if num < 0: 76 | num = len(l) 77 | if isunique and num > len(l): 78 | return 79 | lnew = [] 80 | l = list(l) 81 | while(num>len(lnew)): 82 | x = l[int(random.random()*len(l))] 83 | if isunique and x in lnew: 84 | continue 85 | lnew.append(x) 86 | return lnew 87 | 88 | def fuzz_list(node1_list,node2_list,score_baseline=66,proposal_num=10,string_map=None): 89 | node_dict = { } 90 | for i,node1 in enumerate(node1_list): 91 | match_score_dict = { } 92 | for node2 in node2_list: 93 | if node1 != node2: 94 | if string_map is not None: 95 | n1 = string_map(node1) 96 | n2 = string_map(node2) 97 | score = fuzz.partial_ratio(n1,n2) 98 | if n1 == n2: 99 | node2_list.remove(node2) 100 | else: 101 | score = fuzz.partial_ratio(node1,node2) 102 | if score > score_baseline: 103 | match_score_dict[node2] = score 104 | else: 105 | node2_list.remove(node2) 106 | node2_sort = sorted(match_score_dict.keys(), key=lambda k:match_score_dict[k],reverse=True) 107 | node_dict[node1] = [[n,match_score_dict[n]] for n in node2_sort[:proposal_num]] 108 | print(i,len(node1_list)) 109 | return node_dict, node2_list 110 | 111 | def swap(a,b): 112 | return b, a 113 | 114 | def mkdir(d): 115 | path = d.split('/') 116 | for i in range(len(path)): 117 | d = '/'.join(path[:i+1]) 118 | if not os.path.exists(d): 119 | os.mkdir(d) 120 | 121 | -------------------------------------------------------------------------------- /file/index_feature_list.json: -------------------------------------------------------------------------------- 1 | [ 2 | "A/G Ratio", 3 | "ALT/SGPT", 4 | "AST/SGOT", 5 | "Albumin Quant", 6 | "\"Albumin; Serum\"", 7 | "\"Alk Phos; Serum\"", 8 | "\"Amylase; Serum\"", 9 | "Anion Gap", 10 | "BMI", 11 | "BNP-B Type Natriuretic Peptide", 12 | "BUN/Creat Ratio", 13 | "Base Excess", 14 | "Base Excess Calc", 15 | "\"Base Excess Calc; Arterial\"", 16 | "Baso %", 17 | "Baso Abs", 18 | "Baso Abs Cnt Bld", 19 | "\"Bilirubin Direct; Serum\"", 20 | "Bilirubin Serum Quant", 21 | "Bilirubin Total Bld mCnc", 22 | "Blood Urea Nitrogen", 23 | "Body Surface Area", 24 | "Braden Scale", 25 | "\"CK/CPK; Total; Serum\"", 26 | "CKMB", 27 | "CO2", 28 | "\"CO2 Total; Arterial\"", 29 | "\"CO2 Total; Serum\"", 30 | "Calcium Ionized", 31 | "Calcium Quant", 32 | "\"Calcium; Serum\"", 33 | "Chol HDL", 34 | "\"Chol Total; Serum\"", 35 | "\"Cl; Serum\"", 36 | "\"Creatinine; Serum Quant\"", 37 | "DBP", 38 | "Diff Bands", 39 | "\"Diff Bands; Abs\"", 40 | "Diff Blast", 41 | "Diff Nucleated RBC", 42 | "\"Diff; Eosinophil %\"", 43 | "\"Diff; Eosinophil Abs Quant Bld\"", 44 | "\"Diff; Eosinophil Count %\"", 45 | "\"Diff; Eosinophil Count; Abs\"", 46 | "\"Diff; Granulocyte %\"", 47 | "ETCO2", 48 | "FLACC Pain Scale", 49 | "FiO2", 50 | "GFR/BSA Pred Black SerPl MDRD ArVRat", 51 | "GFR/BSA Pred non black SerPl MDRD ArVRat", 52 | "Glasgow Coma Score", 53 | "\"Globulin; Serum\"", 54 | "Glomerular Filtration Rate", 55 | "\"Glomerular Filtration Rate; Est\"", 56 | "Glucose Stick/Meter WBlood POC", 57 | "Glucose WBlood Quant", 58 | "\"Glucose; Serum Quant\"", 59 | "HCO3", 60 | "\"HCO3; Arterial\"", 61 | "HR", 62 | "HR Apical", 63 | "HR Monitored", 64 | "Hct", 65 | "Height", 66 | "Hgb", 67 | "Hgb A1C", 68 | "INR Platelet Poor Plasma", 69 | "\"Imm Granulocytes Cnt Bld Auto; %\"", 70 | "\"Imm Granulocytes Cnt Bld Auto; Abs\"", 71 | "International Normalized Ratio", 72 | "\"Iron; Serum\"", 73 | "Lactic Acid Blood", 74 | "Lactic Dehydrogenase", 75 | "\"Lipase; Serum\"", 76 | "Lymph %", 77 | "Lymph Abs Cnt", 78 | "\"Lymphocytes Cnt Bld Auto;%\"", 79 | "\"Lymphocytes Cnt Bld Auto;Abs\"", 80 | "Lymphocytes NFr Bld Auto", 81 | "MAP", 82 | "MCH Concentration", 83 | "Magnesium", 84 | "\"Magnesium; Serum/Plasma\"", 85 | "Mean Corpuscular Hemoglobin", 86 | "Mean Corpuscular Volume", 87 | "Mean Platelet Volume", 88 | "Mono %", 89 | "Mono Abs Cnt Bld", 90 | "Neutrophil %", 91 | "Neutrophil Abs Bld Cnt", 92 | "Neutrophil Seg", 93 | "\"Nucleated RBC Ratio; Blood Auto\"", 94 | "PCO2", 95 | "\"PCO2; Arterial\"", 96 | "PO2", 97 | "\"PO2; Arterial\"", 98 | "PT", 99 | "PT Time PPP", 100 | "PTT", 101 | "PTT/APTT PPP", 102 | "PaO2", 103 | "Pain Score", 104 | "Peak inspiratory pressure", 105 | "Peep", 106 | "\"Phosphorus; Serum\"", 107 | "Platelet Count", 108 | "\"Potassium; Serum\"", 109 | "\"Protein Total; Serum\"", 110 | "Pulse", 111 | "Pulse Oximetry", 112 | "Pulse Peripheral", 113 | "QRS Duration", 114 | "QT Interval", 115 | "RBC", 116 | "Red Blood Cell Distribution Width", 117 | "Resp Rt", 118 | "Resp Rt Tot", 119 | "SBP", 120 | "SO2", 121 | "SPO2", 122 | "SaO2", 123 | "\"SaO2 %; Arterial\"", 124 | "\"Sodium; Serum\"", 125 | "Specific Gravity Urine", 126 | "TCO2", 127 | "Temp", 128 | "Temp Axillary", 129 | "Temp Oral", 130 | "Temp Temporal Artery", 131 | "Temp Tympanic", 132 | "Thyroid Stimulating Hormone", 133 | "Tidal Volume", 134 | "\"Triglyceride; Serum\"", 135 | "Troponin I", 136 | "Troponin T S/Pl", 137 | "UA Red Blood Cell", 138 | "UA Urobilinogen", 139 | "UA WBC", 140 | "\"Vitamin B12; Serum\"", 141 | "WBC", 142 | "Weight", 143 | "\"pH; Arterial\"", 144 | "\"pH; Urine\"" 145 | ] -------------------------------------------------------------------------------- /analysis/missing_rate.py: -------------------------------------------------------------------------------- 1 | # coding=utf8 2 | 3 | import os 4 | import sys 5 | import json 6 | import numpy as np 7 | sys.path.append('../code') 8 | 9 | import tools 10 | from tools import parse 11 | args = parse.args 12 | 13 | # def myprint(): 14 | # pass 15 | 16 | def analyze_time(task='task1'): 17 | ''' 18 | Analyze features of vital data 19 | ''' 20 | # wf = open(os.path.join(args.result_dir, 'task')) 21 | wf = open(task + '.csv', 'w') 22 | def myprint(s): 23 | wf.write(s + '\n') 24 | 25 | task_dir = os.path.join(args.data_dir, 'sepsis2_{:s}_training'.format(task)) 26 | 27 | 28 | # vital information 29 | vital_file = os.path.join(task_dir, 'sepsis2_{:s}_vital_training.csv'.format(task)) 30 | patient_time_dict = dict() 31 | for i_line,line in enumerate(open(vital_file)): 32 | if i_line % 10000 == 0: 33 | print i_line 34 | if i_line: 35 | patient, time = line.strip().split(',')[:2] 36 | patient_time_dict[patient] = max(patient_time_dict.get(patient, -10000000), int(float(time))) 37 | time_list = patient_time_dict.values() 38 | print max(time_list), min(time_list) 39 | x = range(max(time_list) + 1) 40 | y = [0 for _ in x] 41 | for t in time_list: 42 | y[t] += 1 43 | with open('../file/time_dist.json', 'w') as f: 44 | f.write(json.dumps([x,y], indent=4)) 45 | 46 | def draw_time_dist(): 47 | import matplotlib.pyplot as plt 48 | time_dist = json.load(open('../file/time_dist.json')) 49 | x,y = time_dist 50 | for xi,yi in zip(x, y): 51 | print xi, yi 52 | plt.plot(x, y) 53 | plt.savefig('../result/fig/time.png') 54 | 55 | 56 | 57 | 58 | def analyze_features(task): 59 | ''' 60 | Analyze features of vital data 61 | ''' 62 | # wf = open(os.path.join(args.result_dir, 'task')) 63 | wf = open(task + '.csv', 'w') 64 | def myprint(s): 65 | wf.write(s + '\n') 66 | 67 | task_dir = os.path.join(args.data_dir, 'sepsis2_{:s}_training'.format(task)) 68 | 69 | 70 | # vital information 71 | vital_file = os.path.join(task_dir, 'sepsis2_{:s}_vital_training.csv'.format(task)) 72 | vital_dict = { } # key-valuelist-dict 73 | for i_line,line in enumerate(open(vital_file)): 74 | if i_line == 0: 75 | new_line = '' 76 | vis = 0 77 | for c in line: 78 | if c == '"': 79 | vis = (vis + 1) % 2 80 | if vis == 1 and c == ',': 81 | c = ';' 82 | new_line += c 83 | line = new_line 84 | col_list = line.strip().split(',')[1:] 85 | for col in col_list: 86 | vital_dict[col] = [] 87 | else: 88 | ctt_list = line.strip().split(',')[1:] 89 | assert len(ctt_list) == len(col_list) 90 | for col,ctt in zip(col_list, ctt_list): 91 | if len(ctt): 92 | vital_dict[col].append(float(ctt)) 93 | # if i_line > 10000: 94 | # break 95 | 96 | ms_list = [] 97 | myprint('{:s}:\t vital info: \n'.format(task)) 98 | myprint('Feature, Missing Rate, Min, 25%, 75%, Max') 99 | for col in col_list: 100 | value_list = sorted(vital_dict[col]) 101 | if len(value_list) == 0: 102 | continue 103 | fn = len(value_list) / 4 104 | myprint('{:s}, {:d}%, {:3.2f}, {:3.2f},{:3.2f},{:3.2f}'.format(col.replace(';', ','), (i_line - len(value_list))*100/i_line, value_list[0], value_list[fn], value_list[fn*3], value_list[-1])) 105 | 106 | ms_list.append((i_line - len(value_list))*100.0/i_line) 107 | 108 | ms_list = sorted(ms_list) 109 | myprint('\nMissing Rate') 110 | myprint('\nMissing Rate Min: {:d}%'.format(int(ms_list[0]))) 111 | myprint('\nMissing Rate Max: {:d}%'.format(int(ms_list[-1]))) 112 | myprint('\nMissing Rate Mean: {:d}%'.format(int(sum(ms_list)/len(ms_list)))) 113 | 114 | 115 | 116 | 117 | 118 | 119 | 120 | 121 | def main(): 122 | # analyze_time() 123 | draw_time_dist() 124 | # analyze_features('task1') 125 | # analyze_features('task2') 126 | 127 | 128 | if __name__ == '__main__': 129 | os.system('clear') 130 | main() 131 | -------------------------------------------------------------------------------- /file/feature_index_dict.json: -------------------------------------------------------------------------------- 1 | { 2 | "\"Diff; Eosinophil %\"": 40, 3 | "PaO2": 100, 4 | "BMI": 8, 5 | "Thyroid Stimulating Hormone": 130, 6 | "FiO2": 47, 7 | "\"Cl; Serum\"": 33, 8 | "\"Protein Total; Serum\"": 107, 9 | "Troponin I": 133, 10 | "GFR/BSA Pred non black SerPl MDRD ArVRat": 49, 11 | "Neutrophil %": 88, 12 | "ETCO2": 45, 13 | "Temp Axillary": 126, 14 | "\"Albumin; Serum\"": 4, 15 | "Calcium Quant": 29, 16 | "Mean Corpuscular Volume": 84, 17 | "\"Iron; Serum\"": 70, 18 | "\"Triglyceride; Serum\"": 132, 19 | "Diff Blast": 38, 20 | "Peep": 103, 21 | "Mean Corpuscular Hemoglobin": 83, 22 | "\"Vitamin B12; Serum\"": 138, 23 | "QRS Duration": 111, 24 | "Platelet Count": 105, 25 | "Magnesium": 81, 26 | "Blood Urea Nitrogen": 20, 27 | "\"Sodium; Serum\"": 122, 28 | "Pain Score": 101, 29 | "MAP": 79, 30 | "\"Base Excess Calc; Arterial\"": 13, 31 | "Neutrophil Seg": 90, 32 | "Hgb A1C": 65, 33 | "A/G Ratio": 0, 34 | "\"HCO3; Arterial\"": 58, 35 | "\"pH; Urine\"": 142, 36 | "Mono Abs Cnt Bld": 87, 37 | "\"PCO2; Arterial\"": 93, 38 | "DBP": 35, 39 | "Hct": 62, 40 | "HR": 59, 41 | "\"Magnesium; Serum/Plasma\"": 82, 42 | "INR Platelet Poor Plasma": 66, 43 | "Bilirubin Serum Quant": 18, 44 | "\"SaO2 %; Arterial\"": 121, 45 | "Glomerular Filtration Rate": 52, 46 | "\"Nucleated RBC Ratio; Blood Auto\"": 91, 47 | "\"Glucose; Serum Quant\"": 56, 48 | "Troponin T S/Pl": 134, 49 | "UA WBC": 137, 50 | "AST/SGOT": 2, 51 | "SaO2": 120, 52 | "PT": 96, 53 | "CKMB": 24, 54 | "Chol HDL": 31, 55 | "Base Excess": 11, 56 | "\"Potassium; Serum\"": 106, 57 | "Diff Bands": 36, 58 | "Baso Abs": 15, 59 | "Anion Gap": 7, 60 | "PTT": 98, 61 | "PT Time PPP": 97, 62 | "Lymph Abs Cnt": 75, 63 | "Body Surface Area": 21, 64 | "Baso Abs Cnt Bld": 16, 65 | "HR Apical": 60, 66 | "\"Globulin; Serum\"": 51, 67 | "\"Bilirubin Direct; Serum\"": 17, 68 | "Pulse Peripheral": 110, 69 | "Lactic Dehydrogenase": 72, 70 | "Base Excess Calc": 12, 71 | "Resp Rt Tot": 116, 72 | "UA Urobilinogen": 136, 73 | "Red Blood Cell Distribution Width": 114, 74 | "Bilirubin Total Bld mCnc": 19, 75 | "Weight": 140, 76 | "Temp Tympanic": 129, 77 | "Mean Platelet Volume": 85, 78 | "\"Glomerular Filtration Rate; Est\"": 53, 79 | "HR Monitored": 61, 80 | "\"Calcium; Serum\"": 30, 81 | "Braden Scale": 22, 82 | "BNP-B Type Natriuretic Peptide": 9, 83 | "Lymph %": 74, 84 | "\"Imm Granulocytes Cnt Bld Auto; Abs\"": 68, 85 | "ALT/SGPT": 1, 86 | "\"Diff; Eosinophil Count; Abs\"": 43, 87 | "HCO3": 57, 88 | "UA Red Blood Cell": 135, 89 | "\"Diff Bands; Abs\"": 37, 90 | "Tidal Volume": 131, 91 | "\"Diff; Eosinophil Count %\"": 42, 92 | "Temp Oral": 127, 93 | "\"CK/CPK; Total; Serum\"": 23, 94 | "MCH Concentration": 80, 95 | "\"Lymphocytes Cnt Bld Auto;%\"": 76, 96 | "Resp Rt": 115, 97 | "Albumin Quant": 3, 98 | "International Normalized Ratio": 69, 99 | "Specific Gravity Urine": 123, 100 | "Diff Nucleated RBC": 39, 101 | "\"Lymphocytes Cnt Bld Auto;Abs\"": 77, 102 | "\"Creatinine; Serum Quant\"": 34, 103 | "Glucose WBlood Quant": 55, 104 | "\"pH; Arterial\"": 141, 105 | "Glasgow Coma Score": 50, 106 | "SBP": 117, 107 | "CO2": 25, 108 | "Baso %": 14, 109 | "BUN/Creat Ratio": 10, 110 | "Lactic Acid Blood": 71, 111 | "Pulse": 108, 112 | "Height": 63, 113 | "\"Alk Phos; Serum\"": 5, 114 | "PO2": 94, 115 | "PCO2": 92, 116 | "\"Diff; Granulocyte %\"": 44, 117 | "\"Lipase; Serum\"": 73, 118 | "GFR/BSA Pred Black SerPl MDRD ArVRat": 48, 119 | "Lymphocytes NFr Bld Auto": 78, 120 | "Glucose Stick/Meter WBlood POC": 54, 121 | "FLACC Pain Scale": 46, 122 | "WBC": 139, 123 | "Hgb": 64, 124 | "Temp Temporal Artery": 128, 125 | "\"CO2 Total; Arterial\"": 26, 126 | "SPO2": 119, 127 | "SO2": 118, 128 | "Calcium Ionized": 28, 129 | "\"CO2 Total; Serum\"": 27, 130 | "TCO2": 124, 131 | "Temp": 125, 132 | "\"Phosphorus; Serum\"": 104, 133 | "\"PO2; Arterial\"": 95, 134 | "\"Chol Total; Serum\"": 32, 135 | "Pulse Oximetry": 109, 136 | "QT Interval": 112, 137 | "\"Diff; Eosinophil Abs Quant Bld\"": 41, 138 | "Peak inspiratory pressure": 102, 139 | "RBC": 113, 140 | "\"Amylase; Serum\"": 6, 141 | "\"Imm Granulocytes Cnt Bld Auto; %\"": 67, 142 | "Neutrophil Abs Bld Cnt": 89, 143 | "Mono %": 86, 144 | "PTT/APTT PPP": 99 145 | } -------------------------------------------------------------------------------- /code/tools/segmentation.py: -------------------------------------------------------------------------------- 1 | # coding=utf8 2 | import matplotlib.pyplot as plt 3 | from scipy import ndimage as ndi 4 | from skimage import morphology,color,data 5 | from skimage import filters 6 | import numpy as np 7 | import skimage 8 | import os 9 | from skimage import measure 10 | 11 | 12 | 13 | def watershed(image, label=None): 14 | denoised = filters.rank.median(image, morphology.disk(2)) #过滤噪声 15 | #将梯度值低于10的作为开始标记点 16 | markers = filters.rank.gradient(denoised, morphology.disk(5)) < 10 17 | markers = ndi.label(markers)[0] 18 | 19 | gradient = filters.rank.gradient(denoised, morphology.disk(2)) #计算梯度 20 | labels =morphology.watershed(gradient, markers, mask=image) #基于梯度的分水岭算法 21 | 22 | fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(6, 6)) 23 | axes = axes.ravel() 24 | ax0, ax1, ax2, ax3 = axes 25 | 26 | ax0.imshow(image, cmap=plt.cm.gray, interpolation='nearest') 27 | ax0.set_title("Original") 28 | # ax1.imshow(gradient, cmap=plt.cm.spectral, interpolation='nearest') 29 | ax1.imshow(gradient, cmap=plt.cm.gray, interpolation='nearest') 30 | ax1.set_title("Gradient") 31 | if label is not None: 32 | # ax2.imshow(markers, cmap=plt.cm.spectral, interpolation='nearest') 33 | ax2.imshow(label, cmap=plt.cm.gray, interpolation='nearest') 34 | else: 35 | ax2.imshow(markers, cmap=plt.cm.spectral, interpolation='nearest') 36 | ax2.set_title("Markers") 37 | ax3.imshow(labels, cmap=plt.cm.spectral, interpolation='nearest') 38 | ax3.set_title("Segmented") 39 | 40 | for ax in axes: 41 | ax.axis('off') 42 | 43 | fig.tight_layout() 44 | plt.show() 45 | 46 | def plot_4(image, gradient,label,segmentation, save_path=None): 47 | fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(6, 6)) 48 | axes = axes.ravel() 49 | ax0, ax1, ax2, ax3 = axes 50 | ax0.imshow(image, cmap=plt.cm.gray, interpolation='nearest') 51 | ax0.set_title("Original") 52 | ax1.imshow(gradient, cmap=plt.cm.gray, interpolation='nearest') 53 | ax1.set_title("Gradient") 54 | ax2.imshow(label, cmap=plt.cm.gray, interpolation='nearest') 55 | ax2.set_title("label") 56 | ax3.imshow(segmentation, cmap=plt.cm.spectral, interpolation='nearest') 57 | ax3.set_title("Segmented") 58 | 59 | for ax in axes: 60 | ax.axis('off') 61 | 62 | fig.tight_layout() 63 | if save_path: 64 | print save_path 65 | plt.savefig(save_path) 66 | else: 67 | plt.show() 68 | 69 | def fill(image): 70 | ''' 71 | 填充图片内部空白 72 | 临时写的函数 73 | 建议后期替换 74 | ''' 75 | label_img = measure.label(image, background=1) 76 | props = measure.regionprops(label_img) 77 | max_area = np.array([p.area for p in props]).max() 78 | for i,prop in enumerate(props): 79 | if prop.area < max_area: 80 | image[prop.coords[:,0],prop.coords[:,1]] = 1 81 | return image 82 | 83 | 84 | 85 | def my_watershed(image, label=None, min_gray=480, max_gray=708, min_gradient=5, show=False, save_path='/tmp/x.jpg'): 86 | image = image - min_gray 87 | image[image>max_gray] = 0 88 | image[image< 10] = 0 89 | image = image * 5 90 | 91 | denoised = filters.rank.median(image, morphology.disk(2)) #过滤噪声 92 | #将梯度值低于10的作为开始标记点 93 | markers = filters.rank.gradient(denoised, morphology.disk(5)) < 10 94 | markers = ndi.label(markers)[0] 95 | 96 | gradient = filters.rank.gradient(denoised, morphology.disk(2)) #计算梯度 97 | labels = gradient > min_gradient 98 | 99 | mask = gradient > min_gradient 100 | label_img = measure.label(mask, background=0) 101 | props = measure.regionprops(label_img) 102 | pred = np.zeros_like(gradient) 103 | for i,prop in enumerate(props): 104 | if prop.area > 50: 105 | region = np.array(prop.coords) 106 | vx,vy = region.var(0) 107 | v = vx + vy 108 | if v < 200: 109 | pred[prop.coords[:,0],prop.coords[:,1]] = 1 110 | 111 | # 填充边缘内部空白 112 | pred = fill(pred) 113 | 114 | if show: 115 | plot_4(image, gradient, label, pred) 116 | else: 117 | plot_4(image, gradient, label, pred, save_path) 118 | 119 | return pred 120 | 121 | def segmentation(image_npy, label_npy,save_path): 122 | print image_npy 123 | image = np.load(image_npy) 124 | label = np.load(label_npy) 125 | if np.sum(label) == 0: 126 | return 127 | min_gray,max_gray = 480, 708 128 | my_watershed(image,label,min_gray, max_gray,show=False, save_path=save_path) 129 | 130 | def main(): 131 | data_dir = '/home/yin/all/PVL_DATA/preprocessed/2D/' 132 | save_dir = '/home/yin/all/PVL_DATA/tool_result/' 133 | os.system('rm -r ' + save_dir) 134 | os.system('mkdir ' + save_dir) 135 | for patient in os.listdir(data_dir): 136 | patient_dir = os.path.join(data_dir, patient) 137 | for f in os.listdir(patient_dir): 138 | if 'roi.npy' in f: 139 | label_npy = os.path.join(patient_dir,f) 140 | image_npy = label_npy.replace('.roi.npy','.npy') 141 | segmentation(image_npy,label_npy, os.path.join(save_dir,label_npy.strip('/').replace('/','.').replace('npy','jpg'))) 142 | 143 | if __name__ == '__main__': 144 | # image =color.rgb2gray(data.camera()) 145 | # watershed(image) 146 | main() 147 | image_npy = '/home/yin/all/PVL_DATA/preprocessed/2D/JD_chen_xi/23.npy' 148 | image_npy = '/home/yin/all/PVL_DATA/preprocessed/2D/JD_chen_xi/14.npy' 149 | image_npy = '/home/yin/all/PVL_DATA/preprocessed/2D/JD_zhang_yu_chen/23.npy' 150 | label_npy = image_npy.replace('.npy','.roi.npy') 151 | segmentation(image_npy,label_npy) 152 | 153 | 154 | -------------------------------------------------------------------------------- /file/range_dict.json: -------------------------------------------------------------------------------- 1 | { 2 | "0": [ 3 | 2.1408783595733656e-06, 4 | 0.6331411912807262 5 | ], 6 | "1": [ 7 | 0.00030185237464440054, 8 | 0.7064923267231049 9 | ], 10 | "2": [ 11 | 0.0, 12 | 0.6873068031850349 13 | ], 14 | "3": [ 15 | 1.409416783335056e-05, 16 | 0.9995959671887773 17 | ], 18 | "5": [ 19 | 5.201181708484168e-06, 20 | 0.7626700786418674 21 | ], 22 | "6": [ 23 | 8.216926869350862e-05, 24 | 0.7195562859490551 25 | ], 26 | "7": [ 27 | 0.0006374377240118317, 28 | 0.9764595366834227 29 | ], 30 | "9": [ 31 | 5.201181708484168e-06, 32 | 0.09978150036416605 33 | ], 34 | "10": [ 35 | 0.0, 36 | 0.6081311150381449 37 | ], 38 | "139": [ 39 | 3.704074918183522e-05, 40 | 0.9991066642844381 41 | ], 42 | "141": [ 43 | 9.576601545776155e-05, 44 | 0.8080398386624305 45 | ], 46 | "14": [ 47 | 0.0, 48 | 0.9977341120216955 49 | ], 50 | "15": [ 51 | 0.0, 52 | 0.9615260926765092 53 | ], 54 | "16": [ 55 | 0.0, 56 | 0.954532787113675 57 | ], 58 | "17": [ 59 | 0.0, 60 | 0.6675518300486306 61 | ], 62 | "19": [ 63 | 5.94198248303564e-06, 64 | 0.8106884380904845 65 | ], 66 | "24": [ 67 | 3.704074918183522e-05, 68 | 0.4963916680334591 69 | ], 70 | "25": [ 71 | 0.00037555546389783157, 72 | 0.8493154253436984 73 | ], 74 | "29": [ 75 | 2.1408783595733656e-06, 76 | 0.9873902264621128 77 | ], 78 | "32": [ 79 | 0.0, 80 | 0.860348817986444 81 | ], 82 | "34": [ 83 | 0.0, 84 | 0.6961955561538268 85 | ], 86 | "36": [ 87 | 0.0, 88 | 0.4556723716381418 89 | ], 90 | "37": [ 91 | 0.0, 92 | 0.5864628820960699 93 | ], 94 | "38": [ 95 | 0.3665187623049572, 96 | 0.9994300911854104 97 | ], 98 | "39": [ 99 | 0.3665187623049572, 100 | 0.9336851564702565 101 | ], 102 | "40": [ 103 | 0.00030185237464440054, 104 | 0.9595347363471897 105 | ], 106 | "41": [ 107 | 0.00030185237464440054, 108 | 0.9743654299512485 109 | ], 110 | "42": [ 111 | 0.00030185237464440054, 112 | 0.963389981736815 113 | ], 114 | "43": [ 115 | 0.00030185237464440054, 116 | 0.9784114785439815 117 | ], 118 | "47": [ 119 | 0.00030185237464440054, 120 | 0.16784077830137462 121 | ], 122 | "51": [ 123 | 0.0, 124 | 0.6953446451663466 125 | ], 126 | "54": [ 127 | 5.867282079364769e-06, 128 | 0.12904891357493498 129 | ], 130 | "56": [ 131 | 0.0, 132 | 0.2465314184449531 133 | ], 134 | "57": [ 135 | 0.0005563177168288809, 136 | 0.9050371058515982 137 | ], 138 | "62": [ 139 | 0.0, 140 | 0.9833723255267994 141 | ], 142 | "64": [ 143 | 0.0, 144 | 0.9886675715773856 145 | ], 146 | "65": [ 147 | 3.989786147462496e-05, 148 | 0.48037025215448453 149 | ], 150 | "66": [ 151 | 9.294544102611767e-05, 152 | 0.5904359141184125 153 | ], 154 | "11": [ 155 | 0.0, 156 | 0.7587090809765937 157 | ], 158 | "70": [ 159 | 0.00017140898183064793, 160 | 0.9856873500171409 161 | ], 162 | "71": [ 163 | 0.0, 164 | 0.6692467511304844 165 | ], 166 | "73": [ 167 | 0.00036075036075036075, 168 | 0.5109026775693443 169 | ], 170 | "80": [ 171 | 0.0, 172 | 0.991848166161043 173 | ], 174 | "81": [ 175 | 0.0, 176 | 0.9671630800236418 177 | ], 178 | "142": [ 179 | 0.0, 180 | 0.9872549019607844 181 | ], 182 | "86": [ 183 | 0.0020309249490896514, 184 | 0.7670254634076725 185 | ], 186 | "87": [ 187 | 0.004873457306160868, 188 | 0.7577730715165927 189 | ], 190 | "88": [ 191 | 0.0002893779018172932, 192 | 0.3268555554126529 193 | ], 194 | "89": [ 195 | 0.0009076453768093194, 196 | 0.5870213535517597 197 | ], 198 | "92": [ 199 | 0.000434063662670525, 200 | 0.7363414634146341 201 | ], 202 | "94": [ 203 | 0.0007796595486637502, 204 | 0.524550612899034 205 | ], 206 | "96": [ 207 | 3.094212584781425e-05, 208 | 0.4343655626516164 209 | ], 210 | "103": [ 211 | 5.794716377606898e-06, 212 | 0.758317935528977 213 | ], 214 | "104": [ 215 | 1.3242051458611968e-05, 216 | 0.8501661877458055 217 | ], 218 | "105": [ 219 | 2.2025655483507188e-05, 220 | 0.9336851564702565 221 | ], 222 | "106": [ 223 | 2.0303744010395516e-06, 224 | 0.9660602615122229 225 | ], 226 | "107": [ 227 | 0.0, 228 | 0.9798158226975184 229 | ], 230 | "108": [ 231 | 2.2610091579567086e-05, 232 | 0.6405256324520905 233 | ], 234 | "109": [ 235 | 5.794716377606898e-06, 236 | 0.030839480561623913 237 | ], 238 | "113": [ 239 | 0.0012990299070572342, 240 | 0.9924001102481034 241 | ], 242 | "115": [ 243 | 0.0008461129881567939, 244 | 0.3023995262038783 245 | ], 246 | "117": [ 247 | 4.603994463179883e-05, 248 | 0.7392891357381594 249 | ], 250 | "119": [ 251 | 9.901537031227363e-06, 252 | 0.6331411912807262 253 | ], 254 | "122": [ 255 | 0.0, 256 | 0.9635646740682264 257 | ], 258 | "124": [ 259 | 0.0015201285993849596, 260 | 0.9219143136706738 261 | ], 262 | "125": [ 263 | 3.008190299455317e-06, 264 | 0.8108493391340156 265 | ] 266 | } -------------------------------------------------------------------------------- /code/tools/measures.py: -------------------------------------------------------------------------------- 1 | # coding=utf8 2 | import os 3 | import numpy as np 4 | from sklearn import metrics 5 | from PIL import Image 6 | import traceback 7 | 8 | def stati_class_number_true_flase(label, pred): 9 | label = np.array(label) 10 | pred = np.array(pred) 11 | 12 | cls_list = set(label) | set(pred) 13 | d = dict() 14 | for cls in cls_list: 15 | d[cls] = dict() 16 | d[cls]['number'] = np.sum(label==cls) 17 | d[cls]['true'] = np.sum(label[label==cls]==pred[label==cls]) 18 | d[cls]['pred'] = np.sum(pred==cls) 19 | return d 20 | 21 | def stati_class_number_true_flase_multi_label_margin(labels, preds): 22 | 23 | d = dict() 24 | for label, pred in zip(labels, preds): 25 | label = set(label[label>=0]) 26 | for cls in range(len(pred)): 27 | if cls not in d: 28 | d[cls] = dict() 29 | d[cls]['number'] = 0 30 | d[cls]['true'] = 0 31 | d[cls]['pred'] = 0 32 | if cls in label: 33 | d[cls]['number'] += 1 34 | if pred[cls] > 0.5: 35 | d[cls]['true'] += 1 36 | if pred[cls] > 0.5: 37 | d[cls]['pred'] += 1 38 | return d 39 | 40 | def stati_class_number_true_flase_bce(labels, preds): 41 | d = dict() 42 | labels = labels.astype(np.int64).reshape(-1) 43 | preds = preds.reshape(-1) > 0 44 | index = labels >= 0 45 | labels = labels[index] 46 | preds = preds[index] 47 | 48 | preds_num = preds.sum(0) 49 | true_num = (labels+preds==2).sum(0) 50 | for cls in range(2): 51 | d[cls] = dict() 52 | d[cls]['number'] = (labels==cls).sum() 53 | d[cls]['true'] = (labels+preds==2*cls).sum() 54 | d[cls]['pred'] = (labels==cls).sum() 55 | return d 56 | 57 | def measures(d_list): 58 | # 合并每一个预测的结果 59 | d_all = dict() 60 | for d in d_list: 61 | for cls in d.keys(): 62 | if cls not in d_all: 63 | d_all[cls] = dict() 64 | for k in d[cls].keys(): 65 | if k not in d_all[cls]: 66 | d_all[cls][k] = 0 67 | d_all[cls][k] += d[cls][k] 68 | m = dict() 69 | number = sum([d_all[cls]['number'] for cls in d_all.keys()]) 70 | for cls in d_all: 71 | m[cls] = dict() 72 | m[cls]['number'] = d_all[cls]['number'] 73 | m[cls]['true'] = d_all[cls]['true'] 74 | m[cls]['pred'] = d_all[cls]['pred'] 75 | m[cls]['ratio'] = d_all[cls]['number'] / (float(number) + 10e-10) 76 | m[cls]['accuracy'] = d_all[cls]['true'] / (float(d_all[cls]['number']) + 10e-10) 77 | m[cls]['precision'] = d_all[cls]['true'] /(float(d_all[cls]['pred']) + 10e-10) 78 | return m 79 | 80 | def print_measures(m, s = 'measures'): 81 | print s 82 | accuracy = 0 83 | for cls in sorted(m.keys()): 84 | print '\tclass: {:d}\taccuracy:{:.6f}\tprecision:{:.6f}\tratio:{:.6f}\t\tN/T/P:{:d}/{:d}/{:d}\ 85 | '.format(cls, m[cls]['accuracy'],m[cls]['precision'],m[cls]['ratio'],m[cls]['number'],m[cls]['true'],m[cls]['pred']) 86 | accuracy += m[cls]['accuracy'] * m[cls]['ratio'] 87 | print '\tacc:{:.6f}'.format(accuracy) 88 | return accuracy 89 | 90 | def mse(pred_image, image): 91 | pred_image = pred_image.reshape(-1).astype(np.float32) 92 | image = image.reshape(-1).astype(np.float32) 93 | mse_err = metrics.mean_squared_error(pred_image,image) 94 | return mse_err 95 | 96 | def psnr(pred_image, image): 97 | return 10 * np.log10(255*255/mse(pred_image,image)) 98 | 99 | 100 | def psnr_pred(stain_vis=20, end= 10000): 101 | clean_dir = '../../data/AI/testB/' 102 | psnr_list = [] 103 | f = open('../../data/result.csv','w') 104 | for i,clean in enumerate(os.listdir(clean_dir)): 105 | clean = os.path.join(clean_dir, clean) 106 | clean_file = clean 107 | pred = clean.replace('.jpg','.png').replace('data','data/test_clean') 108 | stain = clean.replace('trainB','trainA').replace('testB','testA').replace('.jpg','_.jpg') 109 | 110 | try: 111 | pred = np.array(Image.open(pred).resize((250,250))).astype(np.float32) 112 | clean = np.array(Image.open(clean).resize((250,250))).astype(np.float32) 113 | stain = np.array(Image.open(stain).resize((250,250))).astype(np.float32) 114 | 115 | # diff = np.abs(stain - pred) 116 | # vis = 20 117 | # pred[diffgray_vis] = stain[stain>gray_vis] 121 | 122 | if end < 1000: 123 | diff = np.abs(clean - stain) 124 | # stain[diff>stain_vis] = pred[diff>stain_vis] 125 | stain[diff>stain_vis] = clean[diff>stain_vis] 126 | 127 | psnr_pred = psnr(clean, pred) 128 | psnr_stain = psnr(clean, stain) 129 | psnr_list.append([psnr_stain, psnr_pred]) 130 | except: 131 | continue 132 | if i>end: 133 | break 134 | print i, min(end, 1000) 135 | 136 | f.write(clean_file.split('/')[-1].split('.')[0]) 137 | f.write(',') 138 | f.write(str(psnr_stain)) 139 | f.write(',') 140 | f.write(str(psnr_pred)) 141 | f.write(',') 142 | f.write(str(psnr_pred/psnr_stain - 1)) 143 | f.write('\n') 144 | # print '预测',np.mean(psnr_list) 145 | psnr_list = np.array(psnr_list) 146 | psnr_mean = ((psnr_list[:,1] - psnr_list[:,0]) / psnr_list[:,0]).mean() 147 | if end > 1000: 148 | print '网纹图PSNR', psnr_list[:,0].mean() 149 | print '预测图PSNR', psnr_list[:,1].mean() 150 | print '增益率', psnr_mean 151 | f.write(str(psnr_mean)) 152 | f.close() 153 | return psnr_list[:,0].mean() 154 | 155 | def main(): 156 | pmax = [0.,0.] 157 | for vis in range(1, 30): 158 | p = psnr_pred(vis, 10) 159 | print vis, p 160 | if p > pmax[1]: 161 | pmax = [vis, p] 162 | print '...' 163 | # print 256,psnr_pred(256) 164 | print pmax 165 | # print 10 * np.log10(255*255/metrics.mean_squared_error([3],[9])) 166 | 167 | 168 | if __name__ == '__main__': 169 | psnr_pred(4000) 170 | # main() 171 | # for v in range(1,10): 172 | # print v, 10 * np.log10(255*255/v/v) 173 | -------------------------------------------------------------------------------- /code/tools/parse.py: -------------------------------------------------------------------------------- 1 | # coding=utf8 2 | 3 | import argparse 4 | 5 | parser = argparse.ArgumentParser(description='DII Challenge 2019') 6 | 7 | parser.add_argument( 8 | '--data-dir', 9 | type=str, 10 | default='/home/yin/data/', 11 | help='data directory' 12 | ) 13 | parser.add_argument( 14 | '--result-dir', 15 | type=str, 16 | default='../result/', 17 | help='result directory' 18 | ) 19 | parser.add_argument( 20 | '--file-dir', 21 | type=str, 22 | default='../file/', 23 | help='useful file directory' 24 | ) 25 | parser.add_argument( 26 | '--vital-file', 27 | type=str, 28 | default='../file/vital.csv', 29 | help='vital information' 30 | ) 31 | parser.add_argument( 32 | '--master-file', 33 | type=str, 34 | default='../file/master.csv', 35 | help='master information' 36 | ) 37 | parser.add_argument( 38 | '--label-file', 39 | type=str, 40 | default='../file/label.csv', 41 | help='label' 42 | ) 43 | parser.add_argument( 44 | '--model', 45 | '-m', 46 | type=str, 47 | default='lstm', 48 | help='model' 49 | ) 50 | parser.add_argument( 51 | '--embed-size', 52 | metavar='EMBED SIZE', 53 | type=int, 54 | default=256, 55 | help='embed size' 56 | ) 57 | parser.add_argument( 58 | '--rnn-size', 59 | metavar='rnn SIZE', 60 | type=int, 61 | help='rnn size' 62 | ) 63 | parser.add_argument( 64 | '--hidden-size', 65 | metavar='hidden SIZE', 66 | type=int, 67 | help='hidden size' 68 | ) 69 | parser.add_argument( 70 | '--split-num', 71 | metavar='split num', 72 | type=int, 73 | default=5, 74 | help='split num' 75 | ) 76 | parser.add_argument( 77 | '--split-nor', 78 | metavar='split normal range', 79 | type=int, 80 | default=3, 81 | help='split num' 82 | ) 83 | parser.add_argument( 84 | '--num-layers', 85 | metavar='num layers', 86 | type=int, 87 | default=2, 88 | help='num layers' 89 | ) 90 | parser.add_argument( 91 | '--num-code', 92 | metavar='num codes', 93 | type=int, 94 | default=1200, 95 | help='num code' 96 | ) 97 | parser.add_argument( 98 | '--use-glp', 99 | metavar='use global pooling operation', 100 | type=int, 101 | default=1, 102 | help='use global pooling operation' 103 | ) 104 | parser.add_argument( 105 | '--use-visit', 106 | metavar='use visit as input', 107 | type=int, 108 | default=1, 109 | help='use visit as input' 110 | ) 111 | parser.add_argument( 112 | '--use-value', 113 | metavar='use value embedding as input', 114 | type=int, 115 | default=1, 116 | help='use value embedding as input' 117 | ) 118 | parser.add_argument( 119 | '--use-cat', 120 | metavar='use cat for time and value embedding', 121 | type=int, 122 | default=1, 123 | help='use cat or add' 124 | ) 125 | parser.add_argument( 126 | '--use-trend', 127 | metavar='use feature variation trend', 128 | type=int, 129 | default=1, 130 | help='use trend' 131 | ) 132 | parser.add_argument( 133 | '--avg-time', 134 | metavar='avg time for trend, hours', 135 | type=int, 136 | default=4, 137 | help='avg time for trend' 138 | ) 139 | parser.add_argument( 140 | '--seed', 141 | metavar='seed', 142 | type=int, 143 | default=1, 144 | help='seed' 145 | ) 146 | parser.add_argument( 147 | '--set', 148 | metavar='split set for training', 149 | type=int, 150 | default=0, 151 | help='split set' 152 | ) 153 | parser.add_argument( 154 | '--last-time', 155 | metavar='last-time for task2', 156 | type=int, 157 | default=-4, 158 | help='last time' 159 | ) 160 | parser.add_argument( 161 | '--final', 162 | metavar='final test to submit', 163 | type=int, 164 | default=0, 165 | help='final' 166 | ) 167 | 168 | 169 | 170 | 171 | parser.add_argument('--phase', 172 | default='train', 173 | type=str, 174 | metavar='S', 175 | help='pretrain/train/test phase') 176 | parser.add_argument( 177 | '--batch-size', 178 | '-b', 179 | metavar='BATCH SIZE', 180 | type=int, 181 | default=32, 182 | help='batch size' 183 | ) 184 | parser.add_argument('--save-dir', 185 | default='../../data', 186 | type=str, 187 | metavar='S', 188 | help='save dir') 189 | parser.add_argument('--resume', 190 | default='', 191 | type=str, 192 | metavar='S', 193 | help='start from checkpoints') 194 | parser.add_argument('--task', 195 | default='task1', 196 | type=str, 197 | metavar='S', 198 | help='start from checkpoints') 199 | 200 | ##### 201 | parser.add_argument('-j', 202 | '--workers', 203 | default=8, 204 | type=int, 205 | metavar='N', 206 | help='number of data loading workers (default: 32)') 207 | parser.add_argument('--lr', 208 | '--learning-rate', 209 | default=0.0001, 210 | type=float, 211 | metavar='LR', 212 | help='initial learning rate') 213 | parser.add_argument('--epochs', 214 | default=20, 215 | type=int, 216 | metavar='N', 217 | help='number of total epochs to run') 218 | parser.add_argument('--save-freq', 219 | default='5', 220 | type=int, 221 | metavar='S', 222 | help='save frequency') 223 | parser.add_argument('--save-pred-freq', 224 | default='10', 225 | type=int, 226 | metavar='S', 227 | help='save pred clean frequency') 228 | parser.add_argument('--val-freq', 229 | default='5', 230 | type=int, 231 | metavar='S', 232 | help='val frequency') 233 | args = parser.parse_args() 234 | -------------------------------------------------------------------------------- /analysis/analyse_test_files.py: -------------------------------------------------------------------------------- 1 | 2 | # coding=utf8 3 | 4 | import os 5 | import sys 6 | import json 7 | sys.path.append('../code') 8 | 9 | import tools 10 | from tools import parse, py_op 11 | import numpy as np 12 | 13 | args = parse.args 14 | 15 | # def myprint(): 16 | # pass 17 | 18 | def ana_time(task='task2'): 19 | if task == 'task2': 20 | vital_file = '/home/yin/contestdata2/DII_sepsis2_task2_evaluation/sepsis2_task2_evaluation_vital.csv' 21 | elif task == 'case1': 22 | vital_file = '/home/yin/contestdata2/DII_sepsis2_task1_evaluation/sepsis2_task1_evaluation_case1_vital.csv' 23 | else: 24 | vital_file = '/home/yin/contestdata2/DII_sepsis2_task1_evaluation/sepsis2_task1_evaluation_case2_vital.csv' 25 | patient_time_dict = dict() 26 | for line in open(vital_file): 27 | data = line.split(',') 28 | patient, time = data[:2] 29 | if time != 'event_time': 30 | patient_time_dict[patient] = patient_time_dict.get(patient, []) + [float(time)] 31 | mx, mn = -100, 100 32 | for p,ts in patient_time_dict.items(): 33 | if min(ts) > 5: 34 | print p 35 | mx = max(mx, min(ts)) 36 | mn = min(mn, max(ts)) 37 | print mx, mn 38 | 39 | def ana_patient(): 40 | def get_patients(task): 41 | if task == 'task2': 42 | master_file = '/home/yin/contestdata2/DII_sepsis2_task2_evaluation/sepsis2_task2_evaluation_master.csv' 43 | elif task == 'case1': 44 | master_file = '/home/yin/contestdata2/DII_sepsis2_task1_evaluation/sepsis2_task1_evaluation_case1_master.csv' 45 | else: 46 | master_file = '/home/yin/contestdata2/DII_sepsis2_task1_evaluation/sepsis2_task1_evaluation_case2_master.csv' 47 | pids = set() 48 | for i,line in enumerate(open(master_file)): 49 | if i == 0: 50 | # print line 51 | continue 52 | pid = line.split(',')[0] 53 | pids.add(pid) 54 | return pids 55 | pids_case1 = get_patients('case1') 56 | pids_case2 = get_patients('case2') 57 | pids_task2 = get_patients('task2') 58 | print 'case1', len(pids_case1), len(pids_case1 & pids_case2) 59 | print 'case2', len(pids_case2) 60 | print 'task2', len(pids_task2), len(pids_task2 & pids_case2) 61 | print pids_task2 & pids_case2 62 | test_patient_dict = { 63 | 'case1': sorted(pids_case1), 64 | 'task1': sorted(pids_case2), 65 | 'task2': sorted(pids_task2) 66 | } 67 | py_op.mywritejson(os.path.join(args.file_dir, 'test_patient_dict.json'), test_patient_dict) 68 | 69 | def get_patient_line_dict(): 70 | def get_data(task): 71 | if task == 'task2': 72 | vital_file = '/home/yin/contestdata2/DII_sepsis2_task2_evaluation/sepsis2_task2_evaluation_vital.csv' 73 | elif task == 'case1': 74 | vital_file = '/home/yin/contestdata2/DII_sepsis2_task1_evaluation/sepsis2_task1_evaluation_case1_vital.csv' 75 | else: 76 | vital_file = '/home/yin/contestdata2/DII_sepsis2_task1_evaluation/sepsis2_task1_evaluation_case2_vital.csv' 77 | patient_data = dict() 78 | for line in open(vital_file): 79 | if 'event' not in line: 80 | data = line.strip().split(',') 81 | patient = data[0] 82 | line = ','.join(data[2:]) 83 | patient_data[patient] = patient_data.get(patient, []) + [line] 84 | for p, d in patient_data.items(): 85 | if len(d) < 4: 86 | print task, p, len(d) 87 | return patient_data 88 | task_patient_data = dict() 89 | for k in ['case1', 'case2', 'task2']: 90 | print k 91 | task_patient_data[k] = get_data(k) 92 | print 'write' 93 | with open('../result/task_patient_data.json', 'w') as f: 94 | f.write(json.dumps(task_patient_data)) 95 | 96 | 97 | def ana_data_similar(): 98 | def get_master(task): 99 | if task == 'task2': 100 | master_file = '/home/yin/contestdata2/DII_sepsis2_task2_evaluation/sepsis2_task2_evaluation_master.csv' 101 | elif task == 'case1': 102 | master_file = '/home/yin/contestdata2/DII_sepsis2_task1_evaluation/sepsis2_task1_evaluation_case1_master.csv' 103 | else: 104 | master_file = '/home/yin/contestdata2/DII_sepsis2_task1_evaluation/sepsis2_task1_evaluation_case2_master.csv' 105 | master_pid_dict = dict() 106 | for i,line in enumerate(open(master_file)): 107 | if i == 0: 108 | continue 109 | pid = line.split(',')[0] 110 | # master = line.replace(pid+',', '') 111 | master = line[len(pid) + 1:] 112 | master = ''.join(master.split()) 113 | master_pid_dict[master] = master_pid_dict.get(master, []) + [pid] 114 | return master_pid_dict 115 | task_master_pid_dict = dict() 116 | task_patient_data = py_op.myreadjson('../result/task_patient_data.json') 117 | for k in ['case1', 'case2', 'task2']: 118 | task_master_pid_dict[k] = get_master(k) 119 | 120 | kf = 'case1' 121 | ks = 'task2' 122 | ks = 'case2' 123 | master_set = set(task_master_pid_dict[kf]) & set(task_master_pid_dict[ks]) 124 | cset = set() 125 | n = 0 126 | for master in master_set: 127 | pc = task_master_pid_dict[kf][master] 128 | pt = task_master_pid_dict[ks][master] 129 | if len(pc) + len(pt) >= 2: 130 | for ppc in pc: 131 | n += 1 132 | for ppt in pt: 133 | ppc_data = set(task_patient_data[kf][ppc]) 134 | ppt_data = set(task_patient_data[ks][ppt]) 135 | same = 0 136 | for cline in ppc_data: 137 | for tline in ppt_data: 138 | if cline == tline: 139 | # print ppc, ppt 140 | # cset.add(ppc) 141 | # print cline 142 | # print tline 143 | same += 1 144 | if same > 5: 145 | print same, len(ppc_data), len(ppt_data) 146 | cset.add(ppc) 147 | print len(cset), n 148 | 149 | def main(): 150 | # ana_time('case1') 151 | ana_patient() 152 | # get_patient_line_dict() 153 | # ana_data_similar() 154 | 155 | 156 | if __name__ == '__main__': 157 | os.system('clear') 158 | main() 159 | -------------------------------------------------------------------------------- /code/loaddata/dataloader.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | 3 | """ 4 | Read images and corresponding labels. 5 | """ 6 | 7 | import numpy as np 8 | import os 9 | import sys 10 | import json 11 | import torch 12 | from torch.utils.data import Dataset 13 | 14 | sys.path.append('loaddata') 15 | import data_function 16 | 17 | 18 | class DataSet(Dataset): 19 | def __init__(self, 20 | patient_list, 21 | patient_time_record_dict, 22 | patient_label_dict, 23 | patient_master_dict, 24 | phase='train', # phase 25 | split_num=5, # split feature value into different parts 26 | args=None # 全局参数 27 | ): 28 | 29 | self.patient_list = patient_list 30 | self.patient_time_record_dict = patient_time_record_dict 31 | self.patient_label_dict = patient_label_dict 32 | self.patient_master_dict = patient_master_dict 33 | self.phase = phase 34 | self.split_num = split_num 35 | self.split_nor = args.split_nor 36 | self.split_nn = args.split_nn 37 | self.args = args 38 | if args.task == 'task2': 39 | self.length = 49 40 | else: 41 | self.length = 98 42 | 43 | 44 | def get_visit_info(self, time_record_dict): 45 | # times = sorted([float(t) for t in time_record_dict.keys()]) 46 | times = sorted(time_record_dict.keys(), key=lambda s:float(s)) 47 | # for t in time_record_dict: 48 | # time_record_dict[str(float(t))] = time_record_dict[t] 49 | visit_list = [] 50 | value_list = [] 51 | mask_list = [] 52 | time_list = [] 53 | 54 | n_code = 72 55 | import traceback 56 | 57 | # trend 58 | trend_list = [] 59 | previous_value = [[[],[]] for _ in range(143)] 60 | change_th = 0.02 61 | start_time = - self.args.avg_time * 2 62 | end_time = -1 63 | 64 | for time in times : 65 | if float(time) <= -4 - self.length: 66 | continue 67 | if self.args.task == 'task2': 68 | if float(time) > self.args.last_time: 69 | continue 70 | time = str(time) 71 | records = time_record_dict[time] 72 | feature_index = [r[0] for r in records] 73 | feature_value = [float(r[1]) for r in records] 74 | 75 | # embed feature value 76 | feature_index = np.array(feature_index) 77 | feature_value = np.array(feature_value) 78 | feature = feature_index * self.split_nn + feature_value * self.split_num 79 | 80 | # trend 81 | trend = np.zeros(n_code, dtype=np.int64) 82 | i_v = 0 83 | for idx, val in zip(feature_index, feature_value): 84 | # delete val with time less than start_time 85 | ptimes = previous_value[idx][0] 86 | lip = 0 87 | for ip, pt in enumerate(ptimes): 88 | if pt >= float(time) + start_time: 89 | lip = ip 90 | break 91 | 92 | avg_val = None 93 | if len(previous_value[idx][0]) == 1: 94 | avg_val = previous_value[idx][1][-1] 95 | 96 | previous_value[idx] = [ 97 | previous_value[idx][0][lip:], 98 | previous_value[idx][1][lip:]] 99 | 100 | # trend value 101 | if len(previous_value[idx][0]): 102 | avg_val = np.mean(previous_value[idx][1]) 103 | if avg_val is not None: 104 | if val < avg_val - change_th: 105 | delta = 0 106 | elif val > avg_val + change_th: 107 | delta = 1 108 | else: 109 | delta = 2 110 | trend[i_v] = idx * 3 + delta + 1 111 | 112 | # add new val 113 | previous_value[idx][0].append(float(time)) 114 | previous_value[idx][1].append(float(val)) 115 | 116 | i_v += 1 117 | 118 | 119 | 120 | 121 | 122 | visit = np.zeros(n_code, dtype=np.int64) 123 | mask = np.zeros(n_code, dtype=np.int64) 124 | i_v = 0 125 | for feat, idx, val in zip(feature, feature_index, feature_value): 126 | 127 | # order 128 | mask[i_v] = 1 129 | visit[i_v] = int(feat + 1) 130 | i_v += 1 131 | 132 | 133 | 134 | 135 | value = np.zeros((2, n_code ), dtype=np.int64) 136 | value[0][: len(feature_index)] = feature_index + 1 137 | value[1][: len(feature_index)] = (feature_value * 100).astype(np.int64) 138 | value_list.append(value) 139 | 140 | visit_list.append(visit) 141 | mask_list.append(mask) 142 | time_list.append(float(time)) 143 | trend_list.append(trend) 144 | 145 | if self.args.task == 'task2': 146 | num_len = self.length + self.args.last_time 147 | # print 'task2', num_len, self.args.last_time 148 | else: 149 | num_len = self.length 150 | # print 'task1' 151 | # print 'num_len', num_len 152 | # print len(visit_list) 153 | assert len(visit_list) <= num_len 154 | visit = np.zeros(n_code, dtype=np.int64) 155 | trend = np.zeros(n_code, dtype=np.int64) 156 | value = np.zeros((2, n_code), dtype=np.int64) 157 | while len(visit_list) < num_len: 158 | visit_list.append(visit) 159 | value_list.append(value) 160 | mask_list.append(visit) 161 | time_list.append(0) 162 | trend_list.append(trend) 163 | 164 | return np.array(visit_list), np.array(value_list), np.array(mask_list, dtype=np.float32), np.array(time_list, dtype=np.float32), np.array(trend_list) 165 | 166 | 167 | 168 | 169 | def __getitem__(self, index): 170 | patient = self.patient_list[index] 171 | if self.args.use_visit: 172 | visit_list, value_list, mask_list, time_list, trend_list= self.get_visit_info(self.patient_time_record_dict[patient]) 173 | master = self.patient_master_dict[patient] 174 | master = [int(m) for m in master] 175 | master = np.float32(master) 176 | if self.args.final == 1: 177 | label = np.float32(0) 178 | else: 179 | label = np.float32(self.patient_label_dict[patient]) 180 | if self.phase == 'test': 181 | return visit_list, value_list, mask_list, master, label, time_list, trend_list, patient 182 | else: 183 | return visit_list, value_list, mask_list, master, label, time_list, trend_list 184 | 185 | 186 | 187 | 188 | def __len__(self): 189 | return len(self.patient_list) 190 | -------------------------------------------------------------------------------- /code/loaddata/data_function.py: -------------------------------------------------------------------------------- 1 | # coding=utf8 2 | ######################################################################### 3 | # File Name: data_function.py 4 | # Author: ccyin 5 | # mail: ccyin04@gmail.com 6 | # Created Time: 2019年06月12日 星期三 11时28分13秒 7 | ######################################################################### 8 | 9 | import os 10 | import sys 11 | import time 12 | import json 13 | import numpy as np 14 | from PIL import Image,ImageDraw,ImageFont,ImageFilter 15 | 16 | from tools import parse 17 | args = parse.args 18 | 19 | def add_text_to_img(img, text, size, font, color, place): 20 | imgdraw = ImageDraw.Draw(img) 21 | imgfont = ImageFont.truetype(font,size=size) 22 | imgdraw.text(place, text, fill=color, font=imgfont) 23 | return img 24 | 25 | def image_to_numpy(image): 26 | image = np.array(image) 27 | image = image.transpose(2, 0, 1) 28 | return image 29 | 30 | def numpy_to_image(image): 31 | image = image.transpose(1, 2, 0).astype(np.uint8) 32 | return Image.fromarray(image) 33 | 34 | def add_line(bbox_image, bbox, gray=128, proposal=0): 35 | 36 | # print(bbox, bbox_image.shape) 37 | 38 | sx,sy,ex,ey = bbox[:4] 39 | _,x,y = bbox_image.shape # 3, 64, 512 40 | 41 | if not proposal: 42 | assert sx <= x 43 | assert ex <= x 44 | assert sy <= y 45 | assert ey <= y 46 | 47 | n = 2 48 | bbox_image[:, sx:ex, sy-n:sy+n] = gray 49 | bbox_image[:, sx:ex, ey-n:ey+n] = gray 50 | bbox_image[:, sx-n:sx+n, sy:ey] = gray 51 | bbox_image[:, ex-n:ex+n, sy:ey] = gray 52 | return bbox_image 53 | 54 | def add_bbox_to_image(image, detected_bbox): 55 | words = args.words 56 | 57 | image = np.zeros_like(image) + 255 58 | image = numpy_to_image(image) 59 | for bbox in detected_bbox: 60 | bbox = [int(x) for x in bbox[1:]] 61 | # size = int((bbox[2] + bbox[3] - bbox[0] - bbox[0]) / 2) 62 | size = 16 63 | place = (int(bbox[1]/2 + bbox[3]/2), int(bbox[0]/2+bbox[2]/2)) 64 | image = add_text_to_img(image, words[bbox[-1]], size, '../files/ttf/simsun.ttf', (0,0,0), place) 65 | return image 66 | 67 | def test_label(image_file, seg_file, bbox_file, save_folder): 68 | if not os.path.exists(save_folder): 69 | os.mkdir(save_folder) 70 | image = Image.open(image_file).convert('RGB') 71 | seg = Image.open(seg_file) 72 | image.save(os.path.join(save_folder, '_image.png')) 73 | seg.save(os.path.join(save_folder, '_seg.png')) 74 | 75 | bbox_image = image_to_numpy(image) 76 | bbox_label = json.load(open(bbox_file)) 77 | for bbox in bbox_label: 78 | bbox_image = add_line(bbox_image, bbox) 79 | image = numpy_to_image(bbox_image) 80 | image.save(os.path.join(save_folder, '_bbox.png')) 81 | 82 | def generate_bbox_seg(image, font_place, font_size, font_list): 83 | ''' 84 | 只生成框位置坐标 85 | ''' 86 | imgh,imgw = image.size 87 | font_num = len(font_list) 88 | 89 | # 生成分割label 90 | seg_label = np.zeros((3, image.size[1], image.size[0]), dtype=np.uint8) + 255 91 | sy = font_place[0] 92 | ey = sy + font_size * font_num 93 | sx = font_place[1] 94 | ex = sx + font_size 95 | seg_label[:, sx:ex, sy:ey] = 128 96 | # seg_label = seg_label.transpose((1,0,2)) 97 | # seg_label = Image.fromarray(seg_label) 98 | seg_label = numpy_to_image(seg_label) 99 | 100 | # 生成bbox label 101 | bbox_label = [] 102 | for i, font in enumerate(font_list): 103 | sx = font_place[0] + font_size * i 104 | ex = sx + font_size 105 | sy = font_place[1] 106 | ey = sy + font_size 107 | bbox_label.append([sy,sx,ey,ex,font]) 108 | 109 | # 生成bbox_image 110 | # bbox_image = np.zeros((3, image.size[0], image.size[1]), dtype=np.uint8) + 255 111 | bbox_image = image_to_numpy(image) 112 | for bbox in bbox_label: 113 | bbox_image = add_line(bbox_image, bbox) 114 | bbox_image = numpy_to_image(bbox_image) 115 | 116 | 117 | return bbox_label, seg_label, bbox_image 118 | 119 | 120 | def generate_bbox_label(image, font_place, font_size, font_num, args, image_size): 121 | ''' 122 | 根据anchors生成监督信息 123 | ''' 124 | imgh,imgw = image.size 125 | seg_label = np.zeros((int(image_size[0]/2), int(image_size[1]/2)), dtype=np.float32) 126 | sx = float(font_place[0]) / image.size[0] * image_size[0] 127 | ex = sx + float(font_size) / image.size[0] * image_size[0] * font_num 128 | sy = float(font_place[1]) / image.size[1] * image_size[1] 129 | ey = sy + float(font_size) / image.size[1] * image_size[1] 130 | seg_label[int(sx/2):int(ex/2), int(sy/2):int(ey/2)] = 1 131 | seg_label = seg_label.transpose((1,0)) 132 | 133 | bbox_label = np.zeros(( 134 | int(image_size[0]/args.stride), # 16 135 | int(image_size[1]/args.stride), # 16 136 | len(args.anchors), # 4 137 | 4 # dx,dy,dd,c 138 | ), dtype=np.float32) 139 | fonts= [] 140 | for i in range(font_num): 141 | x = font_place[0] + font_size/2. + i * font_size 142 | y = font_place[1] + font_size/2. 143 | h = font_size 144 | w = font_size 145 | 146 | x = float(x) * image_size[0] / imgh 147 | h = float(h) * image_size[0] / imgh 148 | y = float(y) * image_size[1] / imgw 149 | w = float(w) * image_size[1] / imgw 150 | fonts.append([x,y,h,w]) 151 | 152 | # print bbox_label.shape 153 | for ix in range(bbox_label.shape[0]): 154 | for iy in range(bbox_label.shape[1]): 155 | for ia in range(bbox_label.shape[2]): 156 | proposal = [ix*args.stride + args.stride/2, iy*args.stride + args.stride/2, args.anchors[ia]] 157 | iou_fi = [] 158 | for fi, font in enumerate(fonts): 159 | iou = comput_iou(font, proposal) 160 | iou_fi.append((iou, fi)) 161 | max_iou, max_fi = sorted(iou_fi)[-1] 162 | if max_iou > 0.5: 163 | # 正例 164 | dx = (font[0] - proposal[0]) / float(proposal[2]) 165 | dy = (font[1] - proposal[1]) / float(proposal[2]) 166 | fd = max(font[2:]) 167 | dd = np.log(fd / float(proposal[2])) 168 | # bbox_label[ix,iy,ia] = [dx, dy, dd, 1] 169 | bbox_label[ix,iy,ia] = [dx, dy, dd, 1] 170 | elif max_iou > 0.25: 171 | # 忽略 172 | bbox_label[ix,iy,ia,3] = 0 173 | else: 174 | # 负例 175 | bbox_label[ix,iy,ia,3] = -1 176 | # 这里有一个transpose操作 177 | bbox_label = bbox_label.transpose((1,0,2,3)) 178 | 179 | 180 | # 计算anchor信息 181 | return bbox_label, seg_label 182 | 183 | def augment(image, seg, bbox, label): 184 | return image, seg, bbox, label 185 | 186 | def random_select_indices(indices, n=10): 187 | indices = np.array(indices) 188 | # print('initial shape', indices.shape) 189 | indices = indices.transpose(1,0) 190 | # print('change shape', indices.shape) 191 | np.random.shuffle(indices) 192 | indices = indices[:n] 193 | # print('select ', indices.shape) 194 | indices = indices.transpose(1,0) 195 | # print('change shape', indices.shape) 196 | # indices = tuple(indices) 197 | return tuple(indices) 198 | 199 | 200 | 201 | # test_label( '../../data/generated_images/1.png', '../../data/generated_images/1_seg.png', '../../data/generated_images/1_bbox.json', '../../data/test/') 202 | -------------------------------------------------------------------------------- /file/master.csv: -------------------------------------------------------------------------------- 1 | adm_id,gender,race,admission_type,admission_source,care_setting,age_grp 2 | A100001,Male,African American,Emergency,Emergency Room,Care Setting Undefined,70~80 3 | A100002,Male,Caucasian,Urgent,Physician Referral,Neurology,40~80 4 | A100004,Male,Caucasian,Emergency,Emergency Room,Care Setting Undefined,70~80 5 | A100008,Female,Caucasian,Emergency,Transfer from a Skilled Nursing Facility (SNF),Medical/Surgical,>=80 6 | A100006,Female,Caucasian,Elective,Physician Referral,Neurology,80~60 7 | A100019,Male,Caucasian,Elective,Physician Referral,Care Setting Undefined,60~70 8 | A100029,Female,Caucasian,Emergency,Emergency Room,Care Setting Undefined,>=80 9 | A100090,Male,Caucasian,Emergency,Physician Referral,Care Setting Undefined,40~80 10 | A100092,Female,African American,Emergency,Physician Referral,Care Setting Undefined,80~60 11 | A100099,Female,Caucasian,Emergency,Physician Referral,Care Setting Undefined,60~70 12 | A100094,Male,Caucasian,Elective,Others/unknown,Care Setting Undefined,40~80 13 | A100098,Male,Caucasian,Emergency,Others/unknown,Care Setting Undefined,70~80 14 | A100096,Male,Caucasian,Emergency,Others/unknown,Medical/Surgical,60~70 15 | A100097,Female,African American,Emergency,Physician Referral,Care Setting Undefined,70~80 16 | A100049,Male,Caucasian,Emergency,Emergency Room,Care Setting Undefined,90~40 17 | A100044,Male,Caucasian,Elective,Physician Referral,Care Setting Undefined,80~60 18 | A100048,Female,African American,Emergency,Physician Referral,Medical/Surgical,60~70 19 | A100087,Male,Caucasian,Emergency,Physician Referral,Medical/Surgical,80~60 20 | A100061,Female,Caucasian,Emergency,Physician Referral,Care Setting Undefined,>=80 21 | A100064,Male,Caucasian,Emergency,Others/unknown,Intensive Care Unit,90~40 22 | A100067,Female,Caucasian,Elective,Clinic Referral,Others/unknown,60~70 23 | A100071,Female,African American,Emergency,Physician Referral,Surgery,80~60 24 | A100074,Female,Caucasian,Emergency,Physician Referral,Care Setting Undefined,60~70 25 | A100077,Male,Caucasian,Emergency,Physician Referral,Care Setting Undefined,80~60 26 | A100078,Female,African American,Emergency,Emergency Room,Care Setting Undefined,>=80 27 | A100079,Male,Caucasian,Emergency,Physician Referral,Care Setting Undefined,60~70 28 | A100089,Male,Caucasian,Urgent,Others/unknown,Surgery,20~90 29 | A100088,Male,Others/unknown,Elective,Physician Referral,Others/unknown,80~60 30 | A100086,Female,Others/unknown,Emergency,Others/unknown,Others/unknown,>=80 31 | A100087,Male,African American,Urgent,Transfer from a hospital,Care Setting Undefined,70~80 32 | A100089,Female,Caucasian,Emergency,Physician Referral,Neurology,60~70 33 | A100091,Female,Caucasian,Urgent,Physician Referral,Care Setting Undefined,40~80 34 | A100092,Male,Caucasian,Emergency,Physician Referral,Care Setting Undefined,70~80 35 | A100098,Female,Caucasian,Emergency,Transfer from a hospital,Care Setting Undefined,>=80 36 | A100099,Male,Caucasian,Emergency,Physician Referral,Medical/Surgical,>=80 37 | A100108,Female,African American,Urgent,Clinic Referral,Ambulatory Unit,60~70 38 | A100109,Female,Caucasian,Elective,Physician Referral,Care Setting Undefined,>=80 39 | A100119,Female,Others/unknown,Emergency,Emergency Room,Care Setting Undefined,20~90 40 | A100128,Female,African American,Emergency,Emergency Room,Care Setting Undefined,80~60 41 | A100190,Male,Caucasian,Urgent,Others/unknown,Care Setting Undefined,<20 42 | A100191,Female,Caucasian,Others/unknown,Physician Referral,Care Setting Undefined,90~40 43 | A100194,Male,Caucasian,Elective,Clinic Referral,Others/unknown,60~70 44 | A100196,Male,Caucasian,Elective,Clinic Referral,Ambulatory Unit,60~70 45 | A100140,Female,Caucasian,Emergency,Physician Referral,Care Setting Undefined,70~80 46 | A100149,Male,Caucasian,Emergency,Emergency Room,Care Setting Undefined,40~80 47 | A100146,Female,Caucasian,Emergency,Physician Referral,Care Setting Undefined,>=80 48 | A100148,Male,Caucasian,Emergency,Physician Referral,Intensive Care Unit,60~70 49 | A100189,Male,Caucasian,Elective,Physician Referral,Medical/Surgical,90~40 50 | A100160,Female,African American,Emergency,Physician Referral,Care Setting Undefined,80~60 51 | A100161,Female,Caucasian,Emergency,Physician Referral,Care Setting Undefined,60~70 52 | A100171,Female,Caucasian,Emergency,Physician Referral,Medical/Surgical,>=80 53 | A100179,Male,Caucasian,Trauma Center,Physician Referral,Care Setting Undefined,20~90 54 | A100178,Male,Caucasian,Emergency,Physician Referral,Medical/Surgical,60~70 55 | A100181,Male,Asian,Emergency,Physician Referral,Cardiology,40~80 56 | A100182,Female,Caucasian,Emergency,Physician Referral,Care Setting Undefined,80~60 57 | A100184,Male,Caucasian,Emergency,Physician Referral,Surgery,>=80 58 | A100186,Male,Caucasian,Emergency,Physician Referral,Orthopedics,90~40 59 | A100199,Male,Caucasian,Emergency,Physician Referral,Cardiology,80~60 60 | A100194,Male,Caucasian,Emergency,Emergency Room,Coronary Care Unit,>=80 61 | A100199,Male,African American,Emergency,Physician Referral,Care Setting Undefined,60~70 62 | A100208,Male,Caucasian,Emergency,Physician Referral,Care Setting Undefined,40~80 63 | A100209,Female,Caucasian,Urgent,Physician Referral,Medical/Surgical,60~70 64 | A100217,Female,Caucasian,Emergency,Physician Referral,Intensive Care Unit,60~70 65 | A100218,Female,Caucasian,Emergency,Physician Referral,Step-Down Unit,60~70 66 | A100219,Male,Caucasian,Emergency,Physician Referral,Care Setting Undefined,70~80 67 | A100220,Female,Caucasian,Emergency,Physician Referral,Cardiology,90~40 68 | A100221,Male,African American,Emergency,Emergency Room,Care Setting Undefined,40~80 69 | A100222,Male,Caucasian,Elective,Clinic Referral,Care Setting Undefined,60~70 70 | A100299,Male,Caucasian,Elective,Physician Referral,Cardiology,60~70 71 | A100297,Male,Caucasian,Emergency,Physician Referral,Medical/Surgical,>=80 72 | A100240,Male,Caucasian,Emergency,Physician Referral,Cardiology,80~60 73 | A100242,Male,Caucasian,Trauma Center,Transfer from a hospital,Others/unknown,>=80 74 | A100244,Male,Others/unknown,Emergency,Physician Referral,Care Setting Undefined,60~70 75 | A100247,Male,Others/unknown,Elective,Others/unknown,Medical/Surgical,60~70 76 | A100282,Male,African American,Elective,Clinic Referral,Care Setting Undefined,70~80 77 | A100288,Female,African American,Emergency,Physician Referral,Care Setting Undefined,40~80 78 | A100289,Male,Caucasian,Emergency,Physician Referral,Care Setting Undefined,40~80 79 | A100269,Female,Caucasian,Elective,Physician Referral,Others/unknown,70~80 80 | A100268,Male,Others/unknown,Elective,Transfer from another health care facility,Step-Down Unit - Cardiac,70~80 81 | A100271,Female,Caucasian,Emergency,Emergency Room,Medical/Surgical,70~80 82 | A100279,Female,Caucasian,Emergency,Physician Referral,Surgery,>=80 83 | A100280,Female,Caucasian,Elective,Physician Referral,Care Setting Undefined,40~80 84 | A100286,Male,African American,Urgent,Others/unknown,Care Setting Undefined,60~70 85 | A100290,Female,African American,Emergency,Physician Referral,Step-Down Unit,60~70 86 | A100292,Male,Caucasian,Urgent,Transfer from a hospital,Care Setting Undefined,80~60 87 | A100298,Female,Caucasian,Emergency,Transfer from a Skilled Nursing Facility (SNF),Step-Down Unit,60~70 88 | A100299,Male,Caucasian,Urgent,Transfer from another health care facility,Medical/Surgical,70~80 89 | A100900,Male,Caucasian,Emergency,Emergency Room,Medical/Surgical,60~70 90 | A100901,Male,Asian,Emergency,Physician Referral,Neurology,>=80 91 | A100904,Male,Caucasian,Urgent,Clinic Referral,Others/unknown,40~80 92 | A100910,Female,Caucasian,Elective,Physician Referral,Care Setting Undefined,20~90 93 | A100918,Male,Caucasian,Emergency,Physician Referral,Care Setting Undefined,>=80 94 | A100921,Male,Caucasian,Elective,Clinic Referral,Intensive Care Unit,70~80 95 | A100929,Female,African American,Elective,Physician Referral,Care Setting Undefined,40~80 96 | A100928,Female,Caucasian,Emergency,Physician Referral,Others/unknown,60~70 97 | A100929,Female,Others/unknown,Elective,Clinic Referral,Obstetrics,40~80 98 | A100990,Female,Others/unknown,Urgent,Clinic Referral,Others/unknown,70~80 99 | A100991,Female,African American,Emergency,Physician Referral,Care Setting Undefined,20~90 100 | A100998,Female,Caucasian,Emergency,Physician Referral,Care Setting Undefined,>=80 101 | A100998,Male,Others/unknown,Emergency,Physician Referral,Care Setting Undefined,20~90 102 | -------------------------------------------------------------------------------- /code/main.py: -------------------------------------------------------------------------------- 1 | # coding=utf8 2 | 3 | 4 | ''' 5 | main.py 为程序入口 6 | ''' 7 | 8 | 9 | # 基本依赖包 10 | import os 11 | import sys 12 | import time 13 | import json 14 | import traceback 15 | import numpy as np 16 | from glob import glob 17 | from tqdm import tqdm 18 | from tools import parse, py_op 19 | 20 | 21 | # torch 22 | import torch 23 | import torchvision 24 | import torch.nn as nn 25 | import torch.nn.functional as F 26 | from torch.autograd import Variable 27 | import torch.backends.cudnn as cudnn 28 | from torch.utils.data import DataLoader 29 | 30 | 31 | # 自定义文件 32 | import loss 33 | import models 34 | import function 35 | import loaddata 36 | # import framework 37 | from loaddata import dataloader 38 | from models import lstm 39 | 40 | 41 | # 全局变量 42 | args = parse.args 43 | args.hard_mining = 0 44 | args.gpu = 1 45 | args.use_trend = max(args.use_trend, args.use_value) 46 | args.use_value = max(args.use_trend, args.use_value) 47 | args.rnn_size = args.embed_size 48 | args.hidden_size = args.embed_size 49 | 50 | def train_eval(p_dict, phase='train'): 51 | ### 传入参数 52 | epoch = p_dict['epoch'] 53 | model = p_dict['model'] # 模型 54 | loss = p_dict['loss'] # loss 函数 55 | if phase == 'train': 56 | data_loader = p_dict['train_loader'] # 训练数据 57 | optimizer = p_dict['optimizer'] # 优化器 58 | else: 59 | data_loader = p_dict['val_loader'] 60 | 61 | ### 局部变量定义 62 | classification_metric_dict = dict() 63 | # if args.task == 'case1': 64 | 65 | for i,data in enumerate(tqdm(data_loader)): 66 | if args.use_visit: 67 | if args.gpu: 68 | data = [ Variable(x.cuda()) for x in data ] 69 | visits, values, mask, master, labels, times, trends = data 70 | if i == 0: 71 | print 'input size', visits.size() 72 | output = model(visits, master, mask, times, phase, values, trends) 73 | else: 74 | inputs = Variable(data[0].cuda()) 75 | labels = Variable(data[1].cuda()) 76 | output = model(inputs) 77 | 78 | # if 0: 79 | if args.task == 'task2': 80 | output, mask, time = output 81 | labels = labels.unsqueeze(-1).expand(output.size()).contiguous() 82 | labels[mask==0] = -1 83 | else: 84 | time = None 85 | 86 | classification_loss_output = loss(output, labels, args.hard_mining) 87 | loss_gradient = classification_loss_output[0] 88 | # 计算性能指标 89 | function.compute_metric(output, labels, time, classification_loss_output, classification_metric_dict, phase) 90 | 91 | # print(outputs.size(), labels.size(),data[3].size(),segment_line_output.size()) 92 | # print('detection', detect_character_labels.size(), detect_character_output.size()) 93 | # return 94 | 95 | # 训练阶段 96 | if phase == 'train': 97 | optimizer.zero_grad() 98 | loss_gradient.backward() 99 | optimizer.step() 100 | 101 | # if i >= 10: 102 | # break 103 | 104 | 105 | print('\nEpoch: {:d} \t Phase: {:s} \n'.format(epoch, phase)) 106 | metric = function.print_metric('classification', classification_metric_dict, phase) 107 | if args.phase != 'train': 108 | print 'metric = ', metric 109 | print 110 | print 111 | return 112 | if phase == 'val': 113 | if metric > p_dict['best_metric'][0]: 114 | p_dict['best_metric'] = [metric, epoch] 115 | function.save_model(p_dict) 116 | if 0: 117 | # if args.task == 'task2': 118 | preds = classification_metric_dict['preds'] 119 | labels = classification_metric_dict['labels'] 120 | times = classification_metric_dict['times'] 121 | fl = open('../result/tauc_label.csv', 'w') 122 | fr = open('../result/tauc_result.csv', 'w') 123 | fl.write('adm_id,last_event_time,mortality\n') 124 | fr.write('adm_id,probability\n') 125 | for i, (p,l,t) in enumerate(zip(preds, labels, times)): 126 | if i % 30: 127 | continue 128 | fl.write(str(i) + ',') 129 | fl.write(str(t) + ',') 130 | fl.write(str(int(l)) + '\n') 131 | 132 | fr.write(str(i) + ',') 133 | fr.write(str(p) + '\n') 134 | 135 | 136 | print('valid: metric: {:3.4f}\t epoch: {:d}\n'.format(metric, epoch)) 137 | print('\t\t\t valid: best_metric: {:3.4f}\t epoch: {:d}\n'.format(p_dict['best_metric'][0], p_dict['best_metric'][1])) 138 | else: 139 | print('train: metric: {:3.4f}\t epoch: {:d}\n'.format(metric, epoch)) 140 | 141 | 142 | 143 | def main(): 144 | p_dict = dict() # All the parameters 145 | p_dict['args'] = args 146 | args.split_nn = args.split_num + args.split_nor * 3 147 | args.vocab_size = args.split_nn * 145 + 1 148 | print 'vocab_size', args.vocab_size 149 | 150 | ### load data 151 | print 'read data ...' 152 | patient_time_record_dict = py_op.myreadjson(os.path.join(args.result_dir, 'patient_time_record_dict.json')) 153 | patient_master_dict = py_op.myreadjson(os.path.join(args.result_dir, 'patient_master_dict.json')) 154 | patient_label_dict = py_op.myreadjson(os.path.join(args.result_dir, 'patient_label_dict.json')) 155 | 156 | patient_train = list(json.load(open(os.path.join(args.file_dir, args.task, 'train.json')))) 157 | patient_valid = list(json.load(open(os.path.join(args.file_dir, args.task, 'val.json')))) 158 | 159 | if len(patient_train) > len(patient_label_dict): 160 | patients = patient_time_record_dict.keys() 161 | patients = patient_label_dict.keys() 162 | n = int(0.8 * len(patients)) 163 | patient_train = patients[:n] 164 | patient_valid = patients[n:] 165 | 166 | 167 | 168 | 169 | 170 | print 'data loading ...' 171 | train_dataset = dataloader.DataSet( 172 | patient_train, 173 | patient_time_record_dict, 174 | patient_label_dict, 175 | patient_master_dict, 176 | args=args, 177 | phase='train') 178 | train_loader = DataLoader( 179 | dataset=train_dataset, 180 | batch_size=args.batch_size, 181 | shuffle=True, 182 | num_workers=8, 183 | pin_memory=True) 184 | val_dataset = dataloader.DataSet( 185 | patient_valid, 186 | patient_time_record_dict, 187 | patient_label_dict, 188 | patient_master_dict, 189 | args=args, 190 | phase='val') 191 | val_loader = DataLoader( 192 | dataset=val_dataset, 193 | batch_size=args.batch_size, 194 | shuffle=False, 195 | num_workers=8, 196 | pin_memory=True) 197 | 198 | p_dict['train_loader'] = train_loader 199 | p_dict['val_loader'] = val_loader 200 | 201 | 202 | 203 | cudnn.benchmark = True 204 | net = lstm.LSTM(args) 205 | if args.gpu: 206 | net = net.cuda() 207 | p_dict['loss'] = loss.Loss().cuda() 208 | else: 209 | p_dict['loss'] = loss.Loss() 210 | 211 | parameters = [] 212 | for p in net.parameters(): 213 | parameters.append(p) 214 | optimizer = torch.optim.Adam(parameters, lr=args.lr) 215 | p_dict['optimizer'] = optimizer 216 | p_dict['model'] = net 217 | start_epoch = 0 218 | # args.epoch = start_epoch 219 | # print ('best_f1score' + str(best_f1score)) 220 | 221 | p_dict['epoch'] = 0 222 | p_dict['best_metric'] = [0, 0] 223 | 224 | 225 | ### resume pretrained model 226 | if os.path.exists(args.resume): 227 | print 'resume from model ' + args.resume 228 | function.load_model(p_dict, args.resume) 229 | print 'best_metric', p_dict['best_metric'] 230 | # return 231 | 232 | 233 | if args.phase == 'train': 234 | 235 | best_f1score = 0 236 | for epoch in range(p_dict['epoch'] + 1, args.epochs): 237 | p_dict['epoch'] = epoch 238 | for param_group in optimizer.param_groups: 239 | param_group['lr'] = args.lr 240 | train_eval(p_dict, 'train') 241 | train_eval(p_dict, 'val') 242 | 243 | 244 | if __name__ == '__main__': 245 | main() 246 | -------------------------------------------------------------------------------- /code/models/lstm.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | import os 5 | import json 6 | import torch 7 | from torch import nn 8 | import torch.nn.functional as F 9 | from torch.autograd import * 10 | 11 | import numpy as np 12 | 13 | import sys 14 | sys.path.append('tools') 15 | import parse, py_op 16 | args = parse.args 17 | 18 | 19 | def time_encoding_data(d = 512, time = 200): 20 | vec = np.array([np.arange(time) * i for i in range(d/2)], dtype=np.float32).transpose() 21 | vec = vec / vec.max() / 2 22 | encoding = np.concatenate((np.sin(vec), np.cos(vec)), 1) 23 | encoding = torch.from_numpy(encoding) 24 | return encoding 25 | 26 | 27 | class LSTM(nn.Module): 28 | def __init__(self, opt): 29 | super ( LSTM, self ).__init__ ( ) 30 | self.use_cat = args.use_cat 31 | self.avg_time = args.avg_time 32 | 33 | self.embedding = nn.Embedding (opt.vocab_size, opt.embed_size ) 34 | self.lstm = nn.LSTM ( input_size=opt.embed_size, 35 | hidden_size=opt.hidden_size, 36 | num_layers=opt.num_layers, 37 | batch_first=True, 38 | bidirectional=True) 39 | 40 | self.linear_embed = nn.Sequential ( 41 | nn.Linear ( opt.embed_size, opt.embed_size ), 42 | nn.ReLU ( ), 43 | nn.Linear ( opt.embed_size, opt.embed_size ), 44 | ) 45 | self.tv_mapping = nn.Sequential ( 46 | nn.Linear ( opt.embed_size , opt.embed_size / 2), 47 | nn.ReLU ( ), 48 | nn.Dropout ( 0.25 ), 49 | nn.Linear ( opt.embed_size / 2, opt.embed_size ), 50 | ) 51 | self.alpha = nn.Linear(args.embed_size, 1) 52 | 53 | 54 | no = 1 55 | if self.use_cat: 56 | no += 1 57 | self.output_time = nn.Sequential ( 58 | nn.Linear(opt.embed_size * no, opt.embed_size), 59 | nn.ReLU ( ), 60 | ) 61 | 62 | time = 200 63 | self.time_encoding = nn.Embedding.from_pretrained(time_encoding_data(opt.embed_size, time)) 64 | self.time_mapping = nn.Sequential ( 65 | nn.Linear ( opt.embed_size, opt.embed_size), 66 | nn.ReLU ( ), 67 | nn.Dropout ( 0.25 ), 68 | nn.Linear ( opt.embed_size, opt.embed_size) 69 | ) 70 | 71 | self.embed_linear = nn.Sequential ( 72 | nn.Linear ( opt.embed_size, opt.embed_size), 73 | nn.ReLU ( ), 74 | # nn.Dropout ( 0.25 ), 75 | # nn.Linear ( opt.embed_size, opt.embed_size), 76 | # nn.ReLU ( ), 77 | nn.Dropout ( 0.25 ), 78 | ) 79 | self.relu = nn.ReLU ( ) 80 | 81 | self.linears = nn.Sequential ( 82 | nn.Linear ( opt.hidden_size * 2, opt.rnn_size ), 83 | # nn.ReLU ( ), 84 | # nn.Dropout ( 0.25 ), 85 | # nn.Linear ( opt.rnn_size, opt.rnn_size ), 86 | nn.ReLU ( ), 87 | nn.Dropout ( 0.25 ), 88 | nn.Linear ( opt.rnn_size, 1), 89 | ) 90 | mn = 128 91 | self.master_linear = nn.Sequential ( 92 | nn.Linear ( 43, mn), 93 | # nn.ReLU ( ), 94 | # nn.Dropout ( 0.25 ), 95 | # nn.Linear ( mn, mn), 96 | nn.ReLU ( ), 97 | nn.Dropout ( 0.25 ), 98 | nn.Linear ( mn, 1), 99 | ) 100 | self.output = nn.Sequential ( 101 | nn.Linear ( mn + opt.rnn_size , opt.rnn_size), 102 | nn.ReLU ( ), 103 | nn.Linear ( opt.rnn_size, mn), 104 | nn.ReLU ( ), 105 | nn.Dropout ( 0.25 ), 106 | nn.Linear ( mn, 1), 107 | ) 108 | self.pooling = nn.AdaptiveMaxPool1d(1) 109 | self.opt = opt 110 | 111 | def visit_pooling(self, x, mask, time, value=None, trend=None): 112 | 113 | 114 | 115 | output = x 116 | size = output.size() 117 | output = output.view(size[0] * size[1], size[2], output.size(3)) # (bs*98, 72, 512) 118 | if args.use_glp: 119 | output = torch.transpose(output, 1,2).contiguous() # (bs*98, 512, 72) 120 | output = self.pooling(output) 121 | else: 122 | weight = self.alpha(output) # (bs*98, 72, 1) 123 | # print weight.size() 124 | weight = weight.view(size[0]*size[1], size[2]) 125 | # print weight.size() 126 | weight = F.softmax(weight) 127 | x = weight.data.cpu().numpy() 128 | # print x.shape 129 | weight = weight.view(size[0]*size[1], size[2], 1).expand(output.size()) 130 | output = weight * output # (bs*98, 512, 72) 131 | # print output.size() 132 | output = output.sum(1) 133 | # print output.size() 134 | # output = torch.transpose(output, 1,2).contiguous() 135 | output = output.view(size[0], size[1], size[3]) 136 | 137 | # time encoding 138 | time = - time.long() 139 | time = self.time_encoding(time) 140 | time = self.time_mapping(time) 141 | 142 | if self.use_cat: 143 | output = torch.cat((output, time), 2) 144 | output = self.relu(output) 145 | output = self.output_time(output) 146 | else: 147 | output = output + time 148 | output = self.relu(output) 149 | 150 | 151 | 152 | return output 153 | 154 | 155 | def forward_2(self, x, master, mask=None, time=None, phase='train', value=None, trend=None): 156 | ''' 157 | task2 158 | ''' 159 | size = list(x.size()) 160 | x = x.view(-1) 161 | x = self.embedding( x ) 162 | x = self.embed_linear(x) 163 | size.append(-1) 164 | x = x.view(size) 165 | if mask is not None: 166 | x = self.visit_pooling(x, mask, time, value, trend) 167 | lstm_out, _ = self.lstm( x ) 168 | lstm_out = torch.transpose(lstm_out, 1, 2).contiguous() # (bs, 512, 98) 169 | mask = self.pooling(mask) 170 | # print 'mask', mask.size() 171 | pool_out = [] 172 | mask_out = [] 173 | time_out = [] 174 | time = time.data.cpu().numpy() 175 | if phase == 'train': 176 | start, delta = 4, 6 177 | else: 178 | start, delta = 1, 1 179 | for i in range(start, lstm_out.size(2), delta): 180 | pool_out.append(self.pooling(lstm_out[:,:, :i])) 181 | mask_out.append(mask[:, i]) 182 | time_out.append(time[:, i]) 183 | pool_out.append(self.pooling(lstm_out)) 184 | mask_out.append(mask[:, 0]) 185 | time_out.append(np.zeros(size[0]) - 4) 186 | 187 | lstm_out = torch.cat(pool_out, 2) # (bs, 512, 98) 188 | mask_out = torch.cat(mask_out, 1) # (bs, 98) 189 | time_out = np.array(time_out).transpose() # (bs, 98) 190 | 191 | # print 'lstm_out', lstm_out.size() 192 | # print 'mask_out', mask_out.size() 193 | # print err 194 | 195 | lstm_out = torch.transpose(lstm_out, 1, 2).contiguous() # (bs, 98, 512) 196 | 197 | out_vital = self.linears(lstm_out) 198 | size = list(out_vital.size()) 199 | out_vital = out_vital.view(size[:2]) 200 | out_master = self.master_linear(master).expand(size[:2]) 201 | out = out_vital + out_master 202 | return out, mask_out, time_out 203 | 204 | def forward_1(self, x, master, mask=None, time=None, phase='train', value=None, trend=None): 205 | # out = self.master_linear(master) 206 | size = list(x.size()) 207 | x = x.view(-1) 208 | x = self.embedding( x ) 209 | # print x.size() 210 | x = self.embed_linear(x) 211 | size.append(-1) 212 | x = x.view(size) 213 | if mask is not None: 214 | x = self.visit_pooling(x, mask, time, value, trend) 215 | lstm_out, _ = self.lstm( x ) 216 | 217 | lstm_out = torch.transpose(lstm_out, 1, 2).contiguous() 218 | lstm_out = self.pooling(lstm_out) 219 | lstm_out = lstm_out.view(lstm_out.size(0), -1) 220 | 221 | out = self.linears(lstm_out) + self.master_linear(master) 222 | return out 223 | 224 | def forward(self, x, master, mask=None, time=None, phase='train', value=None, trend=None): 225 | if args.task == 'task2': 226 | return self.forward_2(x, master, mask, time, phase, value, trend) 227 | # return self.forward_1(x, master, mask, time, phase, value, trend) 228 | else: 229 | return self.forward_1(x, master, mask, time, phase, value, trend) 230 | 231 | 232 | 233 | -------------------------------------------------------------------------------- /code/function.py: -------------------------------------------------------------------------------- 1 | # coding=utf8 2 | ######################################################################### 3 | # File Name: function.py 4 | # Author: ccyin 5 | # mail: ccyin04@gmail.com 6 | # Created Time: 2019年06月12日 星期三 14时28分43秒 7 | ######################################################################### 8 | 9 | import os 10 | 11 | from sklearn import metrics 12 | import numpy as np 13 | 14 | import torch 15 | 16 | # file 17 | import loaddata 18 | from tools import parse 19 | # from loaddata import data_function 20 | 21 | args = parse.args 22 | 23 | def save_model(p_dict, name='best.ckpt', folder='../data/models/'): 24 | args = p_dict['args'] 25 | name = '{:s}-snm-{:d}-snr-{:d}-value-{:d}-trend-{:d}-cat-{:d}-lt-{:d}-size-{:d}-seed-{:d}-{:s}'.format(args.task, 26 | args.split_num, args.split_nor, args.use_value, args.use_trend, 27 | args.use_cat, args.last_time, args.embed_size, args.seed, name) 28 | if not os.path.exists(folder): 29 | os.mkdir(folder) 30 | model = p_dict['model'] 31 | state_dict = model.state_dict() 32 | for key in state_dict.keys(): 33 | state_dict[key] = state_dict[key].cpu() 34 | all_dict = { 35 | 'epoch': p_dict['epoch'], 36 | 'args': p_dict['args'], 37 | 'best_metric': p_dict['best_metric'], 38 | 'state_dict': state_dict 39 | } 40 | torch.save(all_dict, os.path.join(folder, name)) 41 | 42 | def load_model(p_dict, model_file): 43 | all_dict = torch.load(model_file) 44 | p_dict['epoch'] = all_dict['epoch'] 45 | # p_dict['args'] = all_dict['args'] 46 | p_dict['best_metric'] = all_dict['best_metric'] 47 | # for k,v in all_dict['state_dict'].items(): 48 | # p_dict['model_dict'][k].load_state_dict(all_dict['state_dict'][k]) 49 | p_dict['model'].load_state_dict(all_dict['state_dict']) 50 | 51 | 52 | def save_segmentation_results(images, segmentations, folder='../data/middle_segmentation'): 53 | stride = args.stride 54 | 55 | if not os.path.exists(folder): 56 | os.mkdir(folder) 57 | 58 | # images = images.data.cpu().numpy() 59 | # segmentations = segmentations.data.cpu().numpy() 60 | images = (images * 128) + 127 61 | segmentations[segmentations>0] = 255 62 | segmentations[segmentations<0] = 0 63 | 64 | # print(images.shape, segmentations.shape) 65 | for ii, image, seg in zip(range(len(images)), images, segmentations): 66 | image = data_function.numpy_to_image(image) 67 | new_seg = np.zeros([3, seg.shape[1] * stride, seg.shape[2] * stride]) 68 | for i in range(seg.shape[1]): 69 | for j in range(seg.shape[2]): 70 | for k in range(3): 71 | new_seg[k, i*stride:(i+1)*stride, j*stride:(j+1)*stride] = seg[0,i,j] 72 | seg = new_seg 73 | seg = data_function.numpy_to_image(seg) 74 | image.save(os.path.join(folder, str(ii) + '_image.png')) 75 | seg.save(os.path.join(folder, str(ii) + '_seg.png')) 76 | 77 | 78 | def save_middle_results(data, folder = '../data/middle_images'): 79 | stride = args.stride 80 | 81 | if not os.path.exists(folder): 82 | os.mkdir(folder) 83 | numpy_data = [x.data.numpy() for x in data[1:]] 84 | data = data[:1] + numpy_data 85 | image_names, images, word_labels, seg_labels, bbox_labels, bbox_images = data[:6] 86 | images = (images * 128) + 127 87 | seg_labels = seg_labels*127 + 127 88 | 89 | 90 | for ii, name, image, seg, bbox_image in zip(range(len(image_names)), image_names, images, seg_labels, bbox_images): 91 | name = name.split('/')[-1] 92 | image = data_function.numpy_to_image(image) 93 | new_seg = np.zeros([3, seg.shape[1] * stride, seg.shape[2] * stride]) 94 | # print(seg[0].max(),seg[0].min()) 95 | for i in range(seg.shape[1]): 96 | for j in range(seg.shape[2]): 97 | for k in range(3): 98 | new_seg[k, i*stride:(i+1)*stride, j*stride:(j+1)*stride] = seg[0,i,j] 99 | seg = new_seg 100 | seg = data_function.numpy_to_image(seg) 101 | # image.save(os.path.join(folder, name)) 102 | # seg.save(os.path.join(folder, name.replace('image.png', 'seg.png'))) 103 | image.save(os.path.join(folder, str(ii) + '_image.png')) 104 | seg.save(os.path.join(folder, str(ii) + '_seg.png')) 105 | 106 | for ib,bimg in enumerate(bbox_image): 107 | # print(bimg.max(), bimg.min(), bimg.dtype) 108 | bimg = data_function.numpy_to_image(bimg) 109 | bimg.save(os.path.join(folder, str(ii)+'_'+ str(ib) + '_bbox.png')) 110 | 111 | def save_detection_results(names, images, detect_character_output, folder='../data/test_results/'): 112 | stride = args.stride 113 | 114 | if not os.path.exists(folder): 115 | os.mkdir(folder) 116 | # images = images.data.cpu().numpy() # [bs, 3, w, h] 117 | images = (images * 128) + 127 118 | # detect_character_output = detect_character_output.data.cpu().numpy() # [bs, w, h, n_anchors, 5+class] 119 | 120 | for i, name, image, bboxes in zip(range(len(names)), names, images, detect_character_output): 121 | name = name.split('/')[-1] 122 | 123 | ### 保存原图 124 | # data_function.numpy_to_image(image).save(os.path.join(folder, name)) 125 | data_function.numpy_to_image(image).save(os.path.join(folder, str(i) + '_image.png')) 126 | 127 | detected_bbox = detect_function.nms(bboxes) 128 | # print([b[-1] for b in detected_bbox]) 129 | # print(len(detected_bbox)) 130 | image = data_function.add_bbox_to_image(image, detected_bbox) 131 | # image.save(os.path.join(folder, name.replace('.png', '_bbox.png'))) 132 | image.save(os.path.join(folder, str(i) + '_bbox.png')) 133 | 134 | 135 | 136 | def compute_detection_metric(outputs, labels, loss_outputs,metric_dict): 137 | loss_outputs[0] = loss_outputs[0].data 138 | metric_dict['metric'] = metric_dict.get('metric', []) + [loss_outputs] 139 | 140 | def compute_segmentation_metric(outputs, labels, loss_outputs, metric_dict): 141 | loss_outputs[0] = loss_outputs[0].data 142 | metric_dict['metric'] = metric_dict.get('metric', []) + [loss_outputs] 143 | 144 | def compute_metric(outputs, labels, time, loss_outputs,metric_dict, phase='train'): 145 | # loss_output_list, f1score_list, recall_list, precision_list): 146 | if phase != 'test': 147 | preds = outputs.data.cpu().numpy() 148 | labels = labels.data.cpu().numpy() 149 | else: 150 | preds = np.array(outputs) 151 | 152 | preds = preds.reshape(-1) 153 | labels = labels.reshape(-1) 154 | 155 | if time is not None: 156 | time = time.reshape(-1) 157 | assert preds.shape == time.shape 158 | time = time[labels>-0.5] 159 | assert preds.shape == labels.shape 160 | 161 | preds = preds[labels>-0.5] 162 | label = labels[labels>-0.5] 163 | 164 | pred = preds > 0 165 | 166 | assert len(pred) == len(label) 167 | 168 | tp = (pred + label == 2).sum() 169 | tn = (pred + label == 0).sum() 170 | fp = (pred - label == 1).sum() 171 | fn = (pred - label ==-1).sum() 172 | fp = (pred - label == 1).sum() 173 | 174 | metric_dict['tp'] = metric_dict.get('tp', 0.0) + tp 175 | metric_dict['tn'] = metric_dict.get('tn', 0.0) + tn 176 | metric_dict['fp'] = metric_dict.get('fp', 0.0) + fp 177 | metric_dict['fn'] = metric_dict.get('fn', 0.0) + fn 178 | loss = [] 179 | for x in loss_outputs: 180 | if x == 0: 181 | loss.append(x) 182 | else: 183 | loss.append(x.data.cpu().numpy()) 184 | # loss = [[x.data.cpu().numpy() for x in loss_outputs]] 185 | metric_dict['loss'] = metric_dict.get('loss', []) + [loss] 186 | if phase != 'train': 187 | metric_dict['preds'] = metric_dict.get('preds', []) + list(preds) 188 | metric_dict['labels'] = metric_dict.get('labels', []) + list(label) 189 | if time is not None: 190 | metric_dict['times'] = metric_dict.get('times', []) + list(time) 191 | 192 | def compute_metric_multi_classification(outputs, labels, loss_outputs, metric_dict): 193 | preds = outputs.data.cpu().numpy() > 0 194 | labels = labels.data.cpu().numpy() 195 | for pred, label in zip(preds, labels): 196 | pred = np.argmax(pred) 197 | tp = (pred == label ).sum() 198 | fn = (pred != label).sum() 199 | accuracy = 1.0 * tp / (tp + fn) 200 | metric_dict['accuracy'] = metric_dict.get('accuracy', []) + [accuracy] 201 | metric_dict['loss'] = metric_dict.get('loss', []) + [[x.data.cpu().numpy() for x in loss_outputs]] 202 | 203 | 204 | def print_metric(first_line, metric_dict, phase='train'): 205 | print(first_line) 206 | loss_array = np.array(metric_dict['loss']).mean(0) 207 | tp = metric_dict['tp'] 208 | tn = metric_dict['tn'] 209 | fp = metric_dict['fp'] 210 | fn = metric_dict['fn'] 211 | accuracy = 1.0 * (tp + tn) / (tp + tn + fp + fn) 212 | recall = 1.0 * tp / (tp + fn + 10e-20) 213 | precision = 1.0 * tp / (tp + fp + 10e-20) 214 | f1score = 2.0 * recall * precision / (recall + precision + 10e-20) 215 | 216 | 217 | 218 | loss_array = loss_array.reshape(-1) 219 | 220 | print('loss: {:3.4f}\t pos loss: {:3.4f}\t negloss: {:3.4f}'.format(loss_array[0], loss_array[1], loss_array[2])) 221 | print('accuracy: {:3.4f}\t f1score: {:3.4f}\t recall: {:3.4f}\t precision: {:3.4f}'.format(accuracy, f1score, recall, precision)) 222 | print('\n') 223 | 224 | if phase != 'train': 225 | fpr, tpr, thr = metrics.roc_curve(metric_dict['labels'], metric_dict['preds']) 226 | return metrics.auc(fpr, tpr) 227 | else: 228 | return f1score 229 | 230 | def load_all(): 231 | fo = '../data/models' 232 | pre = '' 233 | for fi in sorted(os.listdir(fo)): 234 | if fi[:5] != pre: 235 | print 236 | pre = fi[:5] 237 | x = torch.load(os.path.join(fo, fi)) 238 | # print x['epoch'], fi 239 | print x['best_metric'], fi 240 | load_all() 241 | 242 | -------------------------------------------------------------------------------- /code/tools/utils.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | # 4 | # Copyright (c) 2017 www.drcubic.com, Inc. All Rights Reserved 5 | # 6 | """ 7 | File: utils.py 8 | Author: shileicao(shileicao@stu.xjtu.edu.cn) 9 | Date: 2017-06-20 14:56:54 10 | 11 | **Note.** This code absorb some code from following source. 12 | 1. [DSB2017](https://github.com/lfz/DSB2017) 13 | """ 14 | 15 | import os 16 | import sys 17 | 18 | import numpy as np 19 | import torch 20 | 21 | 22 | def getFreeId(): 23 | import pynvml 24 | 25 | pynvml.nvmlInit() 26 | 27 | def getFreeRatio(id): 28 | handle = pynvml.nvmlDeviceGetHandleByIndex(id) 29 | use = pynvml.nvmlDeviceGetUtilizationRates(handle) 30 | ratio = 0.5 * (float(use.gpu + float(use.memory))) 31 | return ratio 32 | 33 | deviceCount = pynvml.nvmlDeviceGetCount() 34 | available = [] 35 | for i in range(deviceCount): 36 | if getFreeRatio(i) < 70: 37 | available.append(i) 38 | gpus = '' 39 | for g in available: 40 | gpus = gpus + str(g) + ',' 41 | gpus = gpus[:-1] 42 | return gpus 43 | 44 | 45 | def setgpu(gpuinput): 46 | freeids = getFreeId() 47 | if gpuinput == 'all': 48 | gpus = freeids 49 | else: 50 | gpus = gpuinput 51 | busy_gpu = [g not in freeids for g in gpus.split(',')] 52 | if any(busy_gpu): 53 | raise ValueError('gpu' + ' '.join(busy_gpu) + 'is being used') 54 | print('using gpu ' + gpus) 55 | os.environ['CUDA_VISIBLE_DEVICES'] = gpus 56 | return len(gpus.split(',')) 57 | 58 | 59 | def error_mask_stats(labels, filenames): 60 | error_f = [] 61 | for i, f in enumerate(filenames): 62 | # if not np.all(labels[i] > 0): 63 | # error_f.append(f) 64 | for bbox_i in range(labels[i].shape[0]): 65 | imgs = np.load(f) 66 | if not np.all( 67 | np.array(imgs.shape[1:]) - labels[i][bbox_i][:-1] > 0): 68 | error_f.append(f) 69 | error_f = list(set(error_f)) 70 | fileid_list = [os.path.split(filename)[1].split('_')[0] 71 | for filename in error_f] 72 | print("','".join(fileid_list)) 73 | return error_f 74 | 75 | 76 | class Logger(object): 77 | def __init__(self, logfile): 78 | self.terminal = sys.stdout 79 | self.log = open(logfile, "a") 80 | 81 | def write(self, message): 82 | self.terminal.write(message) 83 | self.log.write(message) 84 | 85 | def flush(self): 86 | #this flush method is needed for python 3 compatibility. 87 | #this handles the flush command by doing nothing. 88 | #you might want to specify some extra behavior here. 89 | pass 90 | 91 | 92 | def split4(data, max_stride, margin): 93 | splits = [] 94 | data = torch.Tensor.numpy(data) 95 | _, c, z, h, w = data.shape 96 | 97 | w_width = np.ceil(float(w / 2 + margin) / 98 | max_stride).astype('int') * max_stride 99 | h_width = np.ceil(float(h / 2 + margin) / 100 | max_stride).astype('int') * max_stride 101 | pad = int(np.ceil(float(z) / max_stride) * max_stride) - z 102 | leftpad = pad / 2 103 | pad = [[0, 0], [0, 0], [leftpad, pad - leftpad], [0, 0], [0, 0]] 104 | data = np.pad(data, pad, 'constant', constant_values=-1) 105 | data = torch.from_numpy(data) 106 | splits.append(data[:, :, :, :h_width, :w_width]) 107 | splits.append(data[:, :, :, :h_width, -w_width:]) 108 | splits.append(data[:, :, :, -h_width:, :w_width]) 109 | splits.append(data[:, :, :, -h_width:, -w_width:]) 110 | 111 | return torch.cat(splits, 0) 112 | 113 | 114 | def combine4(output, h, w): 115 | splits = [] 116 | for i in range(len(output)): 117 | splits.append(output[i]) 118 | 119 | output = np.zeros( 120 | (splits[0].shape[0], h, w, splits[0].shape[3], 121 | splits[0].shape[4]), np.float32) 122 | 123 | h0 = output.shape[1] / 2 124 | h1 = output.shape[1] - h0 125 | w0 = output.shape[2] / 2 126 | w1 = output.shape[2] - w0 127 | 128 | splits[0] = splits[0][:, :h0, :w0, :, :] 129 | output[:, :h0, :w0, :, :] = splits[0] 130 | 131 | splits[1] = splits[1][:, :h0, -w1:, :, :] 132 | output[:, :h0, -w1:, :, :] = splits[1] 133 | 134 | splits[2] = splits[2][:, -h1:, :w0, :, :] 135 | output[:, -h1:, :w0, :, :] = splits[2] 136 | 137 | splits[3] = splits[3][:, -h1:, -w1:, :, :] 138 | output[:, -h1:, -w1:, :, :] = splits[3] 139 | 140 | return output 141 | 142 | 143 | def split8(data, max_stride, margin): 144 | splits = [] 145 | if isinstance(data, np.ndarray): 146 | c, z, h, w = data.shape 147 | else: 148 | _, c, z, h, w = data.size() 149 | 150 | z_width = np.ceil(float(z / 2 + margin) / 151 | max_stride).astype('int') * max_stride 152 | w_width = np.ceil(float(w / 2 + margin) / 153 | max_stride).astype('int') * max_stride 154 | h_width = np.ceil(float(h / 2 + margin) / 155 | max_stride).astype('int') * max_stride 156 | for zz in [[0, z_width], [-z_width, None]]: 157 | for hh in [[0, h_width], [-h_width, None]]: 158 | for ww in [[0, w_width], [-w_width, None]]: 159 | if isinstance(data, np.ndarray): 160 | splits.append(data[np.newaxis, :, zz[0]:zz[1], hh[0]:hh[1], 161 | ww[0]:ww[1]]) 162 | else: 163 | splits.append(data[:, :, zz[0]:zz[1], hh[0]:hh[1], ww[0]: 164 | ww[1]]) 165 | 166 | if isinstance(data, np.ndarray): 167 | return np.concatenate(splits, 0) 168 | else: 169 | return torch.cat(splits, 0) 170 | 171 | 172 | def combine8(output, z, h, w): 173 | splits = [] 174 | for i in range(len(output)): 175 | splits.append(output[i]) 176 | 177 | output = np.zeros( 178 | (z, h, w, splits[0].shape[3], splits[0].shape[4]), np.float32) 179 | 180 | z_width = z / 2 181 | h_width = h / 2 182 | w_width = w / 2 183 | i = 0 184 | for zz in [[0, z_width], [z_width - z, None]]: 185 | for hh in [[0, h_width], [h_width - h, None]]: 186 | for ww in [[0, w_width], [w_width - w, None]]: 187 | output[zz[0]:zz[1], hh[0]:hh[1], ww[0]:ww[1], :, :] = splits[ 188 | i][zz[0]:zz[1], hh[0]:hh[1], ww[0]:ww[1], :, :] 189 | i = i + 1 190 | 191 | return output 192 | 193 | 194 | def split16(data, max_stride, margin): 195 | splits = [] 196 | _, c, z, h, w = data.size() 197 | 198 | z_width = np.ceil(float(z / 4 + margin) / 199 | max_stride).astype('int') * max_stride 200 | z_pos = [z * 3 / 8 - z_width / 2, z * 5 / 8 - z_width / 2] 201 | h_width = np.ceil(float(h / 2 + margin) / 202 | max_stride).astype('int') * max_stride 203 | w_width = np.ceil(float(w / 2 + margin) / 204 | max_stride).astype('int') * max_stride 205 | for zz in [[0, z_width], [z_pos[0], z_pos[0] + z_width], 206 | [z_pos[1], z_pos[1] + z_width], [-z_width, None]]: 207 | for hh in [[0, h_width], [-h_width, None]]: 208 | for ww in [[0, w_width], [-w_width, None]]: 209 | splits.append(data[:, :, zz[0]:zz[1], hh[0]:hh[1], ww[0]:ww[ 210 | 1]]) 211 | 212 | return torch.cat(splits, 0) 213 | 214 | 215 | def combine16(output, z, h, w): 216 | splits = [] 217 | for i in range(len(output)): 218 | splits.append(output[i]) 219 | 220 | output = np.zeros( 221 | (z, h, w, splits[0].shape[3], splits[0].shape[4]), np.float32) 222 | 223 | z_width = z / 4 224 | h_width = h / 2 225 | w_width = w / 2 226 | splitzstart = splits[0].shape[0] / 2 - z_width / 2 227 | z_pos = [z * 3 / 8 - z_width / 2, z * 5 / 8 - z_width / 2] 228 | i = 0 229 | for zz, zz2 in zip( 230 | [[0, z_width], [z_width, z_width * 2], [z_width * 2, z_width * 3], 231 | [z_width * 3 - z, None]], 232 | [[0, z_width], [splitzstart, z_width + splitzstart], 233 | [splitzstart, z_width + splitzstart], [z_width * 3 - z, None]]): 234 | for hh in [[0, h_width], [h_width - h, None]]: 235 | for ww in [[0, w_width], [w_width - w, None]]: 236 | output[zz[0]:zz[1], hh[0]:hh[1], ww[0]:ww[1], :, :] = splits[ 237 | i][zz2[0]:zz2[1], hh[0]:hh[1], ww[0]:ww[1], :, :] 238 | i = i + 1 239 | 240 | return output 241 | 242 | 243 | def split32(data, max_stride, margin): 244 | splits = [] 245 | _, c, z, h, w = data.size() 246 | 247 | z_width = np.ceil(float(z / 2 + margin) / 248 | max_stride).astype('int') * max_stride 249 | w_width = np.ceil(float(w / 4 + margin) / 250 | max_stride).astype('int') * max_stride 251 | h_width = np.ceil(float(h / 4 + margin) / 252 | max_stride).astype('int') * max_stride 253 | 254 | w_pos = [w * 3 / 8 - w_width / 2, w * 5 / 8 - w_width / 2] 255 | h_pos = [h * 3 / 8 - h_width / 2, h * 5 / 8 - h_width / 2] 256 | 257 | for zz in [[0, z_width], [-z_width, None]]: 258 | for hh in [[0, h_width], [h_pos[0], h_pos[0] + h_width], 259 | [h_pos[1], h_pos[1] + h_width], [-h_width, None]]: 260 | for ww in [[0, w_width], [w_pos[0], w_pos[0] + w_width], 261 | [w_pos[1], w_pos[1] + w_width], [-w_width, None]]: 262 | splits.append(data[:, :, zz[0]:zz[1], hh[0]:hh[1], ww[0]:ww[ 263 | 1]]) 264 | 265 | return torch.cat(splits, 0) 266 | 267 | 268 | def combine32(splits, z, h, w): 269 | 270 | output = np.zeros( 271 | (z, h, w, splits[0].shape[3], splits[0].shape[4]), np.float32) 272 | 273 | z_width = int(np.ceil(float(z) / 2)) 274 | h_width = int(np.ceil(float(h) / 4)) 275 | w_width = int(np.ceil(float(w) / 4)) 276 | splithstart = splits[0].shape[1] / 2 - h_width / 2 277 | splitwstart = splits[0].shape[2] / 2 - w_width / 2 278 | 279 | i = 0 280 | for zz in [[0, z_width], [z_width - z, None]]: 281 | 282 | for hh, hh2 in zip( 283 | [[0, h_width], [h_width, h_width * 2], [h_width * 2, h_width * 3], 284 | [h_width * 3 - h, None]], 285 | [[0, h_width], [splithstart, h_width + splithstart], 286 | [splithstart, h_width + splithstart], [h_width * 3 - h, None]]): 287 | 288 | for ww, ww2 in zip( 289 | [[0, w_width], [w_width, w_width * 2], 290 | [w_width * 2, w_width * 3], [w_width * 3 - w, None]], 291 | [[0, w_width], [splitwstart, w_width + splitwstart], 292 | [splitwstart, w_width + splitwstart], 293 | [w_width * 3 - w, None]]): 294 | 295 | output[zz[0]:zz[1], hh[0]:hh[1], ww[0]:ww[1], :, :] = splits[ 296 | i][zz[0]:zz[1], hh2[0]:hh2[1], ww2[0]:ww2[1], :, :] 297 | i = i + 1 298 | 299 | return output 300 | 301 | 302 | def split64(data, max_stride, margin): 303 | splits = [] 304 | _, c, z, h, w = data.size() 305 | 306 | z_width = np.ceil(float(z / 4 + margin) / 307 | max_stride).astype('int') * max_stride 308 | w_width = np.ceil(float(w / 4 + margin) / 309 | max_stride).astype('int') * max_stride 310 | h_width = np.ceil(float(h / 4 + margin) / 311 | max_stride).astype('int') * max_stride 312 | 313 | z_pos = [z * 3 / 8 - z_width / 2, z * 5 / 8 - z_width / 2] 314 | w_pos = [w * 3 / 8 - w_width / 2, w * 5 / 8 - w_width / 2] 315 | h_pos = [h * 3 / 8 - h_width / 2, h * 5 / 8 - h_width / 2] 316 | 317 | for zz in [[0, z_width], [z_pos[0], z_pos[0] + z_width], 318 | [z_pos[1], z_pos[1] + z_width], [-z_width, None]]: 319 | for hh in [[0, h_width], [h_pos[0], h_pos[0] + h_width], 320 | [h_pos[1], h_pos[1] + h_width], [-h_width, None]]: 321 | for ww in [[0, w_width], [w_pos[0], w_pos[0] + w_width], 322 | [w_pos[1], w_pos[1] + w_width], [-w_width, None]]: 323 | splits.append(data[:, :, zz[0]:zz[1], hh[0]:hh[1], ww[0]:ww[ 324 | 1]]) 325 | 326 | return torch.cat(splits, 0) 327 | 328 | 329 | def combine64(output, z, h, w): 330 | splits = [] 331 | for i in range(len(output)): 332 | splits.append(output[i]) 333 | 334 | output = np.zeros( 335 | (z, h, w, splits[0].shape[3], splits[0].shape[4]), np.float32) 336 | 337 | z_width = int(np.ceil(float(z) / 4)) 338 | h_width = int(np.ceil(float(h) / 4)) 339 | w_width = int(np.ceil(float(w) / 4)) 340 | splitzstart = splits[0].shape[0] / 2 - z_width / 2 341 | splithstart = splits[0].shape[1] / 2 - h_width / 2 342 | splitwstart = splits[0].shape[2] / 2 - w_width / 2 343 | 344 | i = 0 345 | for zz, zz2 in zip( 346 | [[0, z_width], [z_width, z_width * 2], [z_width * 2, z_width * 3], 347 | [z_width * 3 - z, None]], 348 | [[0, z_width], [splitzstart, z_width + splitzstart], 349 | [splitzstart, z_width + splitzstart], [z_width * 3 - z, None]]): 350 | 351 | for hh, hh2 in zip( 352 | [[0, h_width], [h_width, h_width * 2], [h_width * 2, h_width * 3], 353 | [h_width * 3 - h, None]], 354 | [[0, h_width], [splithstart, h_width + splithstart], 355 | [splithstart, h_width + splithstart], [h_width * 3 - h, None]]): 356 | 357 | for ww, ww2 in zip( 358 | [[0, w_width], [w_width, w_width * 2], 359 | [w_width * 2, w_width * 3], [w_width * 3 - w, None]], 360 | [[0, w_width], [splitwstart, w_width + splitwstart], 361 | [splitwstart, w_width + splitwstart], 362 | [w_width * 3 - w, None]]): 363 | 364 | output[zz[0]:zz[1], hh[0]:hh[1], ww[0]:ww[1], :, :] = splits[ 365 | i][zz2[0]:zz2[1], hh2[0]:hh2[1], ww2[0]:ww2[1], :, :] 366 | i = i + 1 367 | 368 | return output 369 | -------------------------------------------------------------------------------- /file/task1/val.json: -------------------------------------------------------------------------------- 1 | {"A100194": 0, "A100259": 1, "A100530": 0, "A100617": 1, "A101018": 0, "A101307": 1, "A101357": 0, "A101443": 1, "A101545": 1, "A101651": 0, "A101671": 0, "A101745": 0, "A101759": 0, "A102045": 0, "A102176": 1, "A102285": 0, "A102311": 0, "A102383": 0, "A102542": 0, "A102577": 1, "A102685": 0, "A103134": 0, "A103190": 1, "A103237": 1, "A103290": 0, "A103512": 0, "A103586": 0, "A103602": 0, "A103603": 0, "A103728": 1, "A103803": 0, "A103924": 1, "A103973": 0, "A103983": 1, "A104111": 1, "A104331": 1, "A104559": 1, "A104699": 1, "A104743": 1, "A104772": 0, "A104869": 0, "A104883": 0, "A104959": 1, "A105166": 1, "A105180": 1, "A105199": 0, "A105305": 1, "A105353": 0, "A105394": 1, "A105470": 1, "A105500": 0, "A105504": 0, "A105508": 1, "A105536": 0, "A105627": 0, "A105651": 0, "A105750": 1, "A105790": 0, "A105862": 1, "A105918": 1, "A105967": 1, "A106014": 1, "A106134": 1, "A106245": 1, "A106256": 1, "A106444": 1, "A106553": 1, "A106601": 0, "A106621": 0, "A106640": 0, "A106694": 0, "A106832": 0, "A106839": 1, "A107433": 1, "A107643": 1, "A107688": 1, "A107750": 0, "A107900": 1, "A107902": 0, "A108117": 1, "A108526": 0, "A108593": 1, "A108685": 1, "A109041": 0, "A109200": 0, "A109272": 0, "A109492": 1, "A109574": 1, "A109744": 0, "A109826": 1, "A109866": 1, "A109884": 1, "A109958": 0, "A109989": 0, "A109997": 1, "A110021": 0, "A110093": 0, "A110150": 1, "A110592": 1, "A110672": 1, "A110927": 1, "A111041": 1, "A111280": 0, "A111287": 1, "A111373": 1, "A111443": 0, "A111509": 0, "A111598": 1, "A111773": 0, "A112110": 1, "A112408": 1, "A112599": 1, "A112634": 1, "A112720": 1, "A112913": 1, "A113111": 1, "A113169": 1, "A113212": 1, "A113243": 0, "A113256": 1, "A113311": 1, "A113396": 0, "A113466": 0, "A113507": 0, "A113790": 0, "A113887": 0, "A113955": 1, "A113985": 1, "A114065": 0, "A114151": 1, "A114252": 0, "A114422": 0, "A114484": 1, "A114517": 0, "A114560": 0, "A114577": 0, "A114775": 0, "A114872": 1, "A114904": 1, "A115119": 1, "A115483": 1, "A115531": 1, "A115546": 0, "A115570": 1, "A115574": 1, "A115664": 0, "A115672": 1, "A115923": 1, "A115954": 1, "A116002": 1, "A116047": 0, "A116104": 0, "A116207": 1, "A116439": 0, "A116494": 1, "A116507": 0, "A116533": 0, "A116702": 1, "A116717": 1, "A116930": 0, "A117045": 1, "A117052": 1, "A117073": 0, "A117131": 0, "A117134": 0, "A117135": 0, "A117204": 1, "A117235": 1, "A117432": 0, "A117643": 0, "A117676": 0, "A117749": 1, "A117795": 0, "A117827": 1, "A117955": 0, "A118044": 0, "A118217": 1, "A118240": 0, "A118465": 1, "A118483": 0, "A118650": 1, "A118710": 1, "A118728": 0, "A119290": 0, "A119340": 0, "A119350": 0, "A119359": 1, "A119362": 1, "A119414": 1, "A119509": 0, "A119751": 0, "A119795": 1, "A119803": 0, "A119825": 0, "A119833": 0, "A119841": 0, "A120094": 1, "A120116": 1, "A120123": 0, "A120136": 1, "A120242": 0, "A120537": 0, "A120559": 1, "A120641": 1, "A120752": 0, "A120858": 0, "A120938": 1, "A121007": 0, "A121064": 1, "A121070": 1, "A121116": 1, "A121284": 1, "A121295": 0, "A121317": 1, "A121349": 1, "A121405": 0, "A121458": 1, "A121596": 1, "A121834": 1, "A122012": 1, "A122069": 1, "A122222": 1, "A122244": 0, "A122277": 1, "A122308": 1, "A122344": 1, "A122538": 0, "A122614": 0, "A122624": 0, "A122837": 0, "A122894": 1, "A122930": 0, "A122959": 1, "A123031": 0, "A123085": 0, "A123346": 0, "A123356": 0, "A123475": 1, "A123525": 1, "A123638": 0, "A123672": 0, "A123784": 0, "A123851": 0, "A123951": 1, "A123968": 1, "A124310": 0, "A124365": 1, "A124471": 0, "A124550": 1, "A124819": 0, "A124838": 0, "A124925": 0, "A124929": 1, "A125087": 0, "A125136": 1, "A125204": 1, "A125234": 0, "A125248": 0, "A125295": 0, "A125467": 1, "A125519": 0, "A125543": 0, "A125551": 0, "A125568": 1, "A125635": 1, "A125680": 1, "A125687": 0, "A125760": 0, "A125889": 0, "A125890": 0, "A125922": 1, "A125949": 1, "A125959": 0, "A125972": 1, "A126026": 0, "A126128": 0, "A126164": 1, "A126339": 0, "A126549": 0, "A126596": 0, "A126610": 0, "A126643": 0, "A126654": 1, "A126663": 1, "A126749": 0, "A126752": 0, "A126829": 0, "A126911": 1, "A127029": 1, "A127154": 0, "A127165": 1, "A127194": 0, "A127311": 0, "A127326": 0, "A127491": 0, "A127531": 1, "A127572": 0, "A127589": 1, "A127630": 0, "A127736": 1, "A127780": 0, "A127799": 0, "A127837": 0, "A127916": 1, "A128076": 1, "A128112": 1, "A128137": 1, "A128146": 0, "A128259": 1, "A128280": 1, "A128592": 1, "A128638": 0, "A128748": 0, "A128769": 0, "A128798": 1, "A128805": 1, "A128836": 1, "A128894": 1, "A129038": 1, "A129158": 0, "A129308": 1, "A129335": 1, "A129434": 1, "A129466": 1, "A129577": 1, "A129582": 0, "A129725": 1, "A129758": 0, "A129804": 1, "A129946": 0, "A129962": 0, "A130036": 1, "A130103": 0, "A130162": 1, "A130390": 1, "A130398": 0, "A130399": 1, "A130403": 1, "A130415": 0, "A130480": 0, "A130531": 0, "A130533": 1, "A130579": 0, "A130642": 1, "A130745": 0, "A130750": 1, "A130777": 0, "A130890": 0, "A130905": 1, "A130964": 0, "A131011": 0, "A131033": 1, "A131073": 1, "A131171": 1, "A131187": 0, "A131260": 1, "A131419": 1, "A131545": 0, "A131599": 0, "A131615": 0, "A131738": 0, "A131763": 1, "A131872": 1, "A131923": 1, "A131959": 0, "A132118": 1, "A132130": 1, "A132133": 0, "A132134": 0, "A132229": 1, "A132325": 1, "A132396": 1, "A132409": 1, "A132475": 0, "A132481": 0, "A132572": 1, "A132616": 0, "A132687": 0, "A132766": 1, "A132767": 0, "A132961": 0, "A132968": 1, "A132998": 1, "A133217": 0, "A133395": 0, "A133538": 0, "A133543": 0, "A133554": 1, "A133655": 1, "A133689": 1, "A133696": 1, "A133965": 0, "A134191": 0, "A134201": 0, "A134223": 0, "A134463": 1, "A134523": 1, "A134622": 1, "A134782": 0, "A135006": 1, "A135033": 0, "A135403": 0, "A135456": 0, "A135463": 1, "A135647": 0, "A135691": 0, "A135710": 1, "A135714": 0, "A135830": 0, "A135855": 1, "A135863": 0, "A135906": 0, "A135949": 1, "A135957": 0, "A135995": 1, "A136007": 0, "A136200": 0, "A136229": 1, "A136445": 0, "A136788": 1, "A136900": 1, "A137267": 0, "A137274": 1, "A137501": 0, "A137508": 1, "A137514": 1, "A137558": 1, "A137589": 0, "A137617": 1, "A137641": 0, "A137656": 0, "A137689": 1, "A137718": 0, "A138125": 1, "A138278": 1, "A138286": 1, "A138294": 1, "A138330": 1, "A138337": 1, "A138410": 0, "A138439": 1, "A138457": 0, "A138621": 0, "A138688": 1, "A138790": 1, "A138863": 0, "A138967": 1, "A139203": 0, "A139342": 0, "A139436": 1, "A139483": 1, "A139664": 0, "A139786": 0, "A139807": 1, "A139890": 0, "A139946": 0, "A140105": 1, "A140202": 0, "A140240": 0, "A140243": 1, "A140355": 0, "A140367": 0, "A140406": 1, "A140451": 0, "A140515": 1, "A140535": 1, "A140538": 1, "A140651": 1, "A140699": 0, "A140955": 0, "A140965": 1, "A141036": 0, "A141047": 1, "A141061": 0, "A141088": 0, "A141156": 0, "A141189": 0, "A141197": 0, "A141200": 0, "A141240": 1, "A141274": 0, "A141279": 0, "A141396": 1, "A141595": 0, "A141704": 0, "A141710": 0, "A141735": 0, "A141747": 1, "A141822": 0, "A141833": 0, "A141859": 1, "A141943": 0, "A141947": 0, "A141963": 0, "A142002": 1, "A142124": 1, "A142195": 1, "A142203": 1, "A142276": 0, "A142289": 1, "A142296": 0, "A142324": 0, "A142401": 1, "A142405": 1, "A142503": 0, "A142504": 0, "A142508": 1, "A142706": 1, "A142735": 0, "A142819": 1, "A142820": 1, "A142866": 0, "A143009": 1, "A143046": 0, "A143052": 0, "A143100": 0, "A143122": 0, "A143181": 1, "A143294": 0, "A143298": 0, "A143535": 1, "A143536": 0, "A143537": 0, "A143540": 0, "A143592": 0, "A143626": 1, "A143675": 1, "A143890": 0, "A143895": 0, "A144260": 1, "A144291": 1, "A144421": 0, "A144434": 0, "A144515": 1, "A144549": 1, "A144599": 1, "A144674": 1, "A144733": 0, "A144927": 1, "A144934": 1, "A144999": 0, "A145088": 1, "A145092": 1, "A145113": 1, "A145263": 1, "A145333": 0, "A145359": 0, "A145463": 0, "A145507": 1, "A145586": 1, "A145624": 0, "A145630": 1, "A145754": 1, "A145786": 1, "A145948": 0, "A146049": 0, "A146144": 1, "A146240": 1, "A146396": 1, "A146506": 0, "A146625": 1, "A146722": 0, "A146784": 1, "A146854": 1, "A146940": 0, "A147146": 1, "A147274": 1, "A147315": 1, "A147318": 0, "A147323": 1, "A147332": 1, "A147407": 0, "A147620": 0, "A147710": 0, "A147723": 1, "A147803": 0, "A147887": 1, "A147921": 0, "A148093": 1, "A148123": 0, "A148141": 1, "A148145": 1, "A148149": 0, "A148469": 0, "A148471": 1, "A148538": 0, "A148559": 0, "A148579": 1, "A148629": 0, "A148702": 0, "A148775": 0, "A148850": 1, "A149054": 1, "A149347": 1, "A149348": 0, "A149590": 1, "A149619": 1, "A149667": 1, "A149720": 0, "A149738": 1, "A149795": 1, "A149832": 1, "A149911": 1, "A149968": 1, "A150068": 1, "A150186": 1, "A150382": 0, "A150391": 1, "A150632": 1, "A150679": 1, "A150772": 0, "A151336": 1, "A151362": 0, "A151368": 0, "A151372": 1, "A151391": 0, "A151410": 1, "A151591": 0, "A151636": 0, "A151729": 0, "A151781": 0, "A151804": 1, "A151930": 0, "A151978": 0, "A152079": 0, "A152081": 0, "A152273": 0, "A152288": 0, "A152291": 1, "A152298": 0, "A152302": 0, "A152304": 1, "A152319": 1, "A152444": 1, "A152469": 1, "A152627": 0, "A152628": 1, "A152646": 0, "A152697": 0, "A152807": 1, "A153116": 0, "A153311": 0, "A153405": 1, "A153499": 1, "A153554": 1, "A153648": 1, "A153677": 1, "A153748": 0, "A153803": 1, "A153817": 0, "A153819": 1, "A153971": 1, "A154003": 0, "A154097": 0, "A154102": 0, "A154125": 1, "A154127": 1, "A154248": 0, "A154544": 0, "A154576": 1, "A154589": 1, "A154596": 1, "A154791": 1, "A154792": 0, "A154809": 1, "A155027": 0, "A155055": 1, "A155078": 1, "A155083": 0, "A155115": 1, "A155174": 0, "A155254": 1, "A155257": 1, "A155355": 1, "A155419": 0, "A155420": 1, "A155479": 1, "A155512": 0, "A155739": 0, "A155768": 0, "A155797": 0, "A155818": 0, "A155926": 0, "A155939": 0, "A156023": 1, "A156075": 0, "A156086": 1, "A156248": 1, "A156290": 1, "A156301": 1, "A156305": 1, "A156422": 0, "A156568": 0, "A156713": 1, "A156715": 1, "A156723": 1, "A156775": 0, "A156785": 0, "A156809": 1, "A156840": 1, "A156879": 1, "A156944": 1, "A157005": 1, "A157072": 1, "A157146": 0, "A157454": 0, "A157539": 1, "A157548": 0, "A157600": 1, "A157723": 1, "A157748": 0, "A157892": 0, "A157905": 0, "A157997": 1, "A158053": 1, "A158131": 1, "A158220": 0, "A158302": 1, "A158335": 1, "A158486": 1, "A158533": 1, "A158663": 1, "A158670": 0, "A158678": 0, "A158679": 1, "A158757": 0, "A158815": 0, "A158816": 0, "A158847": 1, "A158888": 0, "A158891": 0, "A158923": 1, "A159025": 1, "A159117": 1, "A159165": 1, "A159192": 1, "A159196": 1, "A159244": 0, "A159384": 0, "A159440": 1, "A159546": 1, "A159567": 0, "A159652": 0, "A159733": 0, "A159806": 0, "A159814": 0, "A159819": 0, "A159947": 0, "A160028": 1, "A160036": 0, "A160062": 1, "A160070": 0, "A160097": 0, "A160109": 0, "A160144": 1, "A160204": 1, "A160284": 1, "A160345": 0, "A160350": 0, "A160359": 0, "A160393": 0, "A160405": 0, "A160460": 1, "A160461": 1, "A160518": 0, "A160584": 1, "A160587": 1, "A160605": 0, "A160622": 1, "A160689": 0, "A160691": 0, "A160724": 0, "A160766": 1, "A160837": 1, "A160862": 0, "A160987": 1, "A161039": 1, "A161059": 0, "A161226": 1, "A161239": 0, "A161263": 0, "A161300": 0, "A161330": 0, "A161338": 0, "A161351": 0, "A161464": 0, "A161465": 0, "A161469": 0, "A161579": 0, "A161754": 1, "A161767": 1, "A161781": 1, "A161895": 1, "A161907": 1, "A161909": 1, "A161910": 1, "A161933": 0, "A162017": 1, "A162075": 0, "A162188": 0, "A162234": 1, "A162348": 0, "A162495": 0, "A162523": 1, "A162633": 0, "A162645": 0, "A162730": 0, "A162738": 1, "A162813": 1, "A162870": 0, "A162949": 1, "A162997": 1, "A163069": 0, "A163152": 1, "A163178": 1, "A163212": 0, "A163242": 0, "A163333": 0, "A163412": 0, "A163537": 1, "A163576": 1, "A163592": 1, "A163595": 1, "A163634": 0, "A163957": 1, "A164037": 1, "A164080": 1, "A164081": 0, "A164142": 1, "A164151": 0, "A164159": 0, "A164284": 0, "A164429": 1, "A164466": 1, "A164482": 1, "A164524": 1, "A164561": 1, "A164711": 0, "A164786": 0, "A164807": 0, "A164847": 1, "A164901": 0, "A165101": 1, "A165317": 0, "A165355": 0, "A165462": 0, "A165596": 1, "A165982": 1, "A165997": 1, "A166072": 0, "A166241": 1, "A166243": 1, "A166278": 1, "A166295": 1, "A166297": 1, "A166505": 0, "A166518": 0, "A166528": 0, "A166627": 0, "A166657": 1, "A166674": 1, "A166675": 1, "A166732": 1, "A167006": 0, "A167103": 0, "A167198": 1, "A167254": 1, "A167510": 0, "A167526": 0, "A167680": 0, "A167714": 0, "A167793": 0, "A167824": 1, "A167863": 0, "A168041": 0, "A168179": 0, "A168261": 0, "A168303": 0, "A168329": 1, "A168545": 1, "A168549": 0, "A168561": 1, "A168565": 1, "A168579": 1, "A168681": 1, "A168721": 0, "A168764": 1, "A168773": 0, "A168858": 1, "A168866": 1, "A168985": 0, "A169249": 0, "A169251": 1, "A169303": 0, "A169423": 0, "A169534": 1, "A169535": 1, "A169575": 0, "A169612": 0, "A169613": 0, "A169657": 1, "A169693": 0, "A169704": 1, "A169731": 0, "A169743": 1, "A169800": 0, "A169815": 1, "A169915": 0, "A169924": 0, "A169937": 1, "A169957": 0, "A169966": 1, "A170017": 1, "A170058": 1, "A170296": 1, "A170304": 0, "A170319": 1, "A170350": 1, "A170384": 1, "A170398": 0, "A170406": 1, "A170423": 0, "A170435": 0, "A170448": 0, "A170481": 1, "A170498": 0, "A170512": 0, "A170530": 0, "A170651": 0, "A170696": 0, "A170786": 1, "A170906": 1, "A170935": 1, "A170986": 1, "A170991": 0, "A171030": 1, "A171044": 1, "A171049": 1, "A171116": 1, "A171377": 0, "A171458": 0, "A171470": 1, "A171473": 0, "A171496": 1, "A171593": 1, "A171645": 1, "A171694": 1, "A171758": 0, "A171833": 0, "A172043": 1, "A172166": 0, "A172182": 0, "A172301": 0, "A172401": 0, "A172478": 0, "A172499": 0, "A172555": 1, "A172634": 0, "A172692": 0, "A172759": 0, "A172945": 0, "A172977": 0, "A173005": 0, "A173011": 0, "A173053": 1, "A173129": 1, "A173224": 0, "A173249": 0, "A173281": 0, "A173396": 1, "A173504": 0, "A173559": 0, "A173568": 0, "A173655": 0, "A173706": 0, "A173721": 0, "A173750": 1, "A173793": 0, "A173811": 1, "A173944": 1, "A174180": 1, "A174243": 1, "A174249": 1, "A174360": 0, "A174405": 1, "A174430": 0, "A174449": 0, "A174551": 1, "A174576": 1, "A174617": 1, "A174630": 0, "A174654": 1, "A174759": 0, "A174797": 1, "A174816": 1, "A174848": 0, "A174934": 0, "A174954": 0, "A174974": 1, "A175029": 1, "A175128": 1, "A175131": 0, "A175146": 0, "A175286": 0, "A175319": 0, "A175385": 0, "A175389": 0, "A175460": 1, "A175516": 1, "A175586": 0, "A175588": 0, "A175602": 1, "A175649": 1, "A175713": 0, "A175719": 1, "A175767": 1, "A175788": 0, "A175930": 1, "A175977": 0, "A176003": 1, "A176110": 0, "A176123": 1, "A176127": 0, "A176185": 1, "A176238": 1, "A176243": 0, "A176251": 1, "A176308": 1, "A176377": 1, "A176516": 1, "A176519": 1, "A176522": 1, "A176546": 1, "A176634": 0, "A176737": 1, "A176742": 0, "A176749": 1, "A176915": 1, "A177062": 1, "A177088": 0, "A177129": 1, "A177184": 1, "A177356": 1, "A177365": 1, "A177460": 1, "A177480": 1, "A177553": 1, "A177649": 1, "A177697": 0, "A177708": 1, "A177794": 1, "A177863": 1, "A177885": 1, "A177887": 1, "A177902": 0, "A177962": 0, "A178105": 0, "A178145": 0, "A178208": 0, "A178275": 1, "A178361": 0, "A178390": 1, "A178419": 1, "A178466": 1, "A178497": 0, "A178524": 1, "A178532": 1, "A178718": 0, "A178742": 0, "A178772": 1, "A178866": 0, "A178873": 1, "A178876": 0, "A178906": 1, "A178979": 0, "A179119": 1, "A179235": 1, "A179239": 0, "A179341": 0, "A179352": 0, "A179381": 1, "A179384": 0, "A179440": 1, "A179454": 1, "A179527": 1, "A179574": 1, "A179658": 0, "A179695": 1, "A179737": 0, "A179759": 1, "A179778": 0, "A179783": 1, "A179965": 1, "A179973": 0, "A180054": 1, "A180056": 0, "A180240": 0, "A180261": 1, "A180328": 1, "A180330": 0, "A180408": 0, "A180490": 0, "A180583": 1, "A180789": 0, "A180948": 0, "A180965": 1, "A181302": 0, "A181471": 1, "A181586": 1, "A181617": 0, "A181619": 1, "A181679": 0, "A181814": 0, "A181998": 1, "A182065": 0, "A182090": 0, "A182120": 1, "A182158": 1, "A182213": 0, "A182533": 1, "A182576": 0, "A182625": 1, "A182686": 1, "A182778": 1, "A182818": 1, "A182951": 1, "A183046": 1, "A183057": 1, "A183092": 1, "A183174": 0, "A183198": 0, "A183260": 1, "A183297": 1, "A183325": 0, "A183354": 1, "A183587": 0, "A183667": 0, "A183712": 1, "A183796": 0, "A183834": 0, "A183860": 0, "A183870": 0, "A183896": 0, "A184041": 0, "A184116": 0, "A184155": 0, "A184202": 0, "A184290": 0, "A184345": 0, "A184399": 1, "A184433": 1, "A184476": 1, "A184606": 0, "A184607": 1, "A184671": 0, "A184690": 0, "A184700": 0, "A184735": 1, "A184752": 1, "A184850": 0, "A184908": 1, "A184916": 1, "A185013": 1, "A185051": 0, "A185142": 0, "A185196": 1, "A185215": 1, "A185224": 0, "A185243": 1, "A185279": 0, "A185300": 0, "A185336": 1, "A185337": 0, "A185380": 0, "A185431": 0, "A185462": 1, "A185470": 0, "A185487": 1, "A185531": 1, "A185557": 1, "A185591": 1, "A185610": 0, "A185625": 1, "A185765": 1, "A185784": 0, "A185802": 0, "A185891": 0, "A185897": 1, "A185949": 0, "A185959": 0, "A185973": 0, "A185985": 1, "A185986": 0, "A186065": 1, "A186086": 1, "A186193": 1, "A186194": 1, "A186240": 0, "A186241": 1, "A186243": 0, "A186258": 0, "A186290": 0, "A186296": 1, "A186305": 1, "A186321": 1, "A186414": 0, "A186417": 1, "A186505": 1, "A186529": 1, "A186596": 0, "A186797": 1, "A186852": 0, "A186879": 0, "A187226": 0, "A187254": 0, "A187297": 1, "A187346": 1, "A187405": 1, "A187487": 0, "A187613": 0, "A187619": 1, "A187641": 1, "A187742": 1, "A187931": 1, "A187934": 1, "A187972": 1, "A188011": 1, "A188059": 0, "A188102": 0, "A188173": 1, "A188339": 1, "A188381": 0, "A188428": 0, "A188492": 1, "A188539": 0, "A188601": 0, "A188702": 0, "A188772": 0, "A188773": 0, "A188775": 1, "A188828": 1, "A188829": 0, "A188956": 0, "A188988": 1, "A189081": 1, "A189271": 1, "A189320": 1, "A189356": 0, "A189598": 1, "A189645": 0, "A189657": 1, "A189677": 1, "A189696": 1, "A189697": 0, "A189700": 1, "A189767": 0, "A189833": 0, "A189895": 0, "A189950": 0, "A189996": 1, "A190009": 0, "A190053": 0, "A190094": 0, "A190162": 0, "A190216": 0, "A190380": 1, "A190387": 0, "A190411": 0, "A190450": 1, "A190460": 1, "A190511": 1, "A190524": 1, "A190624": 0, "A190703": 0, "A190730": 1, "A190735": 1, "A190805": 1, "A190854": 1, "A190866": 0, "A190874": 0, "A191017": 0, "A191027": 1, "A191053": 1, "A191060": 1, "A191079": 0, "A191097": 1, "A191151": 1, "A191165": 1, "A191363": 0, "A191517": 0, "A191522": 0, "A191628": 1, "A191632": 1, "A191673": 0, "A191702": 1, "A191726": 0, "A191730": 0, "A191925": 0, "A191927": 1, "A192016": 1, "A192077": 1, "A192150": 0, "A192274": 0, "A192288": 0, "A192296": 1, "A192364": 0, "A192381": 0, "A192382": 1, "A192455": 1, "A192484": 0, "A192546": 0, "A192590": 0, "A192649": 1, "A192659": 1, "A192698": 0, "A192744": 1, "A192960": 0, "A192971": 0, "A193028": 0, "A193045": 0, "A193050": 1, "A193325": 0, "A193353": 0, "A193438": 0, "A193625": 0, "A193647": 0, "A193668": 0, "A193698": 1, "A193976": 0, "A194207": 1, "A194226": 1, "A194334": 0, "A194383": 0, "A194538": 0, "A194562": 1, "A194672": 0, "A194754": 0, "A194825": 0, "A194853": 0, "A194993": 0, "A195048": 0, "A195070": 0, "A195083": 0, "A195096": 0, "A195118": 1, "A195431": 0, "A195464": 0, "A195496": 0, "A195501": 0, "A195573": 1, "A195580": 0, "A195660": 0, "A195669": 0, "A195774": 1, "A195780": 1, "A195829": 1, "A195978": 1, "A196062": 1, "A196175": 0, "A196256": 0, "A196307": 0, "A196369": 1, "A196406": 1, "A196628": 1, "A196797": 0, "A196847": 1, "A196933": 0, "A196938": 1, "A196970": 1, "A196974": 0, "A197113": 0, "A197212": 0, "A197248": 0, "A197304": 1, "A197531": 0, "A197693": 1, "A197786": 1, "A197822": 0, "A197826": 1, "A197855": 0, "A197911": 1, "A197937": 1, "A197945": 1, "A197958": 1, "A198012": 1, "A198053": 0, "A198113": 0, "A198114": 1, "A198128": 1, "A198183": 1, "A198205": 0, "A198220": 0, "A198283": 1, "A198552": 0, "A198616": 0, "A198722": 1, "A198819": 0, "A198821": 1, "A198840": 0, "A198855": 0, "A198857": 1, "A198909": 1, "A198938": 1, "A198948": 0, "A198957": 0, "A198966": 1, "A199024": 1, "A199097": 0, "A199155": 0, "A199174": 0, "A199175": 0, "A199179": 0, "A199211": 0, "A199215": 1, "A199222": 1, "A199288": 0, "A199307": 0, "A199349": 0, "A199375": 1, "A199439": 0, "A199477": 1, "A199588": 0, "A199694": 0, "A199721": 1, "A199862": 0, "A199883": 1, "A199899": 0, "A199946": 0, "A200030": 0, "A200034": 0, "A200180": 1, "A200225": 0, "A200402": 0, "A200432": 0, "A200435": 0, "A200464": 0, "A200532": 1, "A200630": 1, "A200774": 1, "A200923": 1, "A201171": 1, "A201332": 0, "A201344": 1, "A201406": 0, "A201412": 0, "A201516": 0, "A201521": 0, "A201558": 1, "A201580": 0, "A201651": 0, "A201665": 1, "A201676": 0, "A201694": 1, "A201713": 1, "A201814": 1, "A201956": 1, "A202032": 1, "A202064": 1, "A202074": 1, "A202110": 0, "A202139": 1, "A202156": 1, "A202192": 1, "A202198": 1, "A202250": 0, "A202261": 1, "A202323": 0, "A202362": 1, "A202411": 1, "A202506": 0, "A202649": 0, "A202736": 0, "A202848": 0, "A202893": 0, "A202914": 1, "A202990": 1, "A203023": 1, "A203064": 1, "A203122": 0, "A203195": 0, "A203212": 0, "A203283": 1, "A203372": 1, "A203402": 0, "A203430": 1, "A203447": 0, "A203470": 0, "A203705": 1, "A203723": 1, "A203750": 0, "A203763": 1, "A203771": 1, "A203777": 1, "A203797": 1, "A203934": 0, "A203977": 0, "A204062": 1, "A204141": 0, "A204252": 1, "A204311": 0, "A204319": 0, "A204413": 0, "A204438": 0, "A204560": 1, "A204602": 1, "A204603": 1, "A204788": 0, "A204825": 0, "A204863": 0, "A204899": 1, "A204907": 1, "A204928": 0, "A204929": 1, "A204930": 0, "A204941": 1, "A204948": 0, "A204992": 0, "A205015": 1, "A205030": 0, "A205036": 0, "A205102": 0, "A205328": 1, "A205349": 0, "A205358": 0, "A205392": 0, "A205443": 1, "A205449": 1, "A205469": 1, "A205470": 0, "A205518": 1, "A205519": 0, "A205530": 1, "A205755": 0, "A205784": 1, "A205882": 1, "A205921": 1, "A205925": 0, "A205984": 0, "A206039": 0, "A206123": 1, "A206141": 1, "A206169": 0, "A206177": 1, "A206237": 0, "A206326": 1, "A206400": 0, "A206515": 1, "A206552": 1, "A206611": 1, "A206642": 1, "A206697": 0, "A206705": 0, "A206752": 0, "A206810": 1, "A206858": 0, "A206883": 0, "A206946": 1, "A206953": 0, "A206992": 1, "A207084": 1, "A207110": 0, "A207179": 0, "A207216": 0, "A207316": 0, "A207358": 0, "A207362": 1, "A207372": 0, "A207387": 1, "A207470": 0, "A207511": 1, "A207512": 1, "A207659": 1, "A207699": 0, "A207723": 1, "A207814": 1, "A207837": 1, "A207849": 1, "A207851": 0, "A207936": 1, "A207949": 0, "A208048": 1, "A208072": 1, "A208254": 1, "A208684": 1, "A208769": 1, "A208904": 0, "A208916": 1, "A208917": 0, "A208924": 1, "A208967": 0, "A208970": 1, "A209009": 0, "A209026": 0, "A209091": 0, "A209166": 1, "A209280": 0, "A209537": 0, "A209550": 1, "A209606": 0, "A209715": 0, "A209805": 0, "A209955": 1, "A209983": 1, "A209987": 1, "A210007": 0, "A210016": 0, "A210192": 1, "A210193": 1, "A210283": 1, "A210292": 1, "A210315": 1, "A210438": 0, "A210498": 1, "A210533": 1, "A210579": 1, "A210610": 0, "A210711": 1, "A210721": 1, "A210810": 1, "A210946": 0, "A210969": 1, "A211035": 1, "A211074": 1, "A211175": 0, "A211200": 0, "A211306": 0, "A211371": 1, "A211453": 1, "A211460": 1, "A211541": 1, "A211561": 0, "A211703": 0, "A211777": 1, "A211778": 1, "A211800": 1, "A211916": 1, "A211925": 0, "A212001": 0, "A212022": 0, "A212044": 1, "A212137": 1, "A212146": 1, "A212221": 1, "A212369": 1, "A212444": 0, "A212468": 1, "A212495": 0, "A212612": 1, "A212645": 1, "A212653": 0, "A212676": 0, "A212700": 0, "A212840": 1, "A212921": 1, "A212967": 0, "A213007": 0, "A213012": 0, "A213154": 1, "A213310": 1, "A213341": 1, "A213410": 0, "A213457": 1, "A213491": 1, "A213535": 0, "A213689": 1, "A213829": 0, "A213951": 1, "A213953": 1, "A214006": 0, "A214046": 0, "A214148": 0, "A214209": 0, "A214293": 1, "A214356": 0, "A214364": 0, "A214398": 1, "A214507": 0, "A214516": 1, "A214559": 0, "A214612": 1, "A214680": 0, "A214682": 0, "A214758": 0, "A214796": 1, "A215093": 0, "A215160": 0, "A215170": 0, "A215207": 0, "A215218": 1, "A215250": 0, "A215328": 1, "A215385": 1, "A215482": 0, "A215487": 1, "A215670": 0, "A215726": 0, "A215787": 1, "A215798": 1, "A215806": 1, "A216021": 1, "A216085": 1, "A216170": 0, "A216245": 0, "A216281": 0, "A216342": 1, "A216344": 0, "A216347": 0, "A216389": 0, "A216451": 1, "A216528": 0, "A216580": 0, "A216658": 0, "A216815": 1, "A216911": 0, "A216948": 0, "A216978": 0, "A217038": 0, "A217135": 1, "A217177": 1, "A217215": 0, "A217344": 0, "A217480": 0, "A217490": 0, "A217496": 0, "A217655": 0, "A217678": 1, "A217685": 0, "A217696": 0, "A217744": 1, "A217800": 0, "A217803": 0, "A217842": 1, "A217849": 1, "A217884": 0, "A217923": 0, "A218016": 1, "A218042": 1, "A218074": 1, "A218178": 1, "A218510": 1, "A218520": 1, "A218605": 1, "A218656": 0, "A218662": 0, "A218762": 0, "A218797": 1, "A218807": 1, "A218839": 1, "A218919": 1, "A219076": 1, "A219084": 1, "A219179": 1, "A219231": 0, "A219270": 0, "A219549": 1, "A219749": 0, "A219798": 0, "A219844": 0, "A219908": 0, "A219973": 0, "A219978": 1, "A220011": 0, "A220073": 1, "A220107": 0, "A220128": 1, "A220133": 0, "A220137": 0, "A220190": 1, "A220302": 0, "A220324": 0, "A220439": 1, "A220442": 1, "A220551": 0, "A220642": 0, "A220656": 0, "A220740": 1, "A220755": 0, "A220772": 1, "A220789": 1, "A220863": 0, "A220873": 0, "A220878": 1, "A220881": 0, "A220886": 1, "A220904": 0, "A221152": 0, "A221236": 1, "A221295": 0, "A221384": 0, "A221386": 1, "A221423": 0, "A221463": 0, "A221772": 1, "A221822": 0, "A221846": 1, "A221862": 1, "A221945": 0, "A221946": 0, "A222034": 0, "A222113": 0, "A222134": 0, "A222189": 1, "A222218": 1, "A222259": 0, "A222390": 1, "A222442": 1, "A222683": 1, "A222758": 1, "A222785": 0, "A222802": 0, "A222843": 1, "A222929": 1, "A222941": 1, "A223024": 1, "A223047": 0, "A223067": 0, "A223071": 0, "A223144": 0, "A223150": 0, "A223411": 0, "A223788": 1, "A223817": 0, "A223913": 0, "A223919": 0, "A223921": 0, "A224002": 1, "A224016": 0, "A224020": 1, "A224040": 1, "A224044": 0, "A224053": 1, "A224099": 0, "A224158": 1, "A224172": 0, "A224178": 0, "A224306": 1, "A224365": 1, "A224387": 0, "A224419": 1, "A224570": 0, "A224628": 0, "A224649": 0, "A224662": 1, "A224668": 0, "A224679": 0, "A224771": 1, "A224796": 1, "A224869": 0, "A224876": 1, "A224946": 1, "A224955": 1, "A224968": 0, "A225072": 0, "A225105": 0, "A225122": 0, "A225123": 1, "A225143": 0, "A225242": 0, "A225269": 0, "A225294": 1, "A225437": 1, "A225451": 1, "A225485": 1, "A225543": 1, "A225544": 0, "A225589": 1, "A225640": 1, "A225697": 0, "A225749": 1, "A225824": 1, "A225832": 1, "A225858": 0, "A225876": 1, "A225947": 1, "A226026": 0, "A226042": 1, "A226130": 1, "A226278": 1, "A226330": 0, "A226415": 1, "A226427": 1, "A226593": 0, "A226621": 1, "A226703": 0, "A226724": 0, "A226735": 0, "A227078": 1, "A227184": 0, "A227392": 1, "A227407": 1, "A227422": 1, "A227563": 1, "A227571": 1, "A227727": 1, "A227746": 1, "A227762": 0, "A227829": 1, "A227890": 1, "A227906": 0, "A228007": 1, "A228022": 1, "A228049": 0, "A228144": 1, "A228145": 0, "A228218": 1, "A228280": 0, "A228351": 0, "A228357": 1, "A228368": 1, "A228403": 1, "A228466": 1, "A228523": 0, "A228551": 0, "A228618": 1, "A228647": 0, "A228658": 0, "A228752": 0, "A228754": 0, "A228755": 0, "A228758": 1, "A228766": 0, "A228811": 1, "A228907": 1, "A228933": 0, "A228960": 1, "A229059": 0, "A229114": 0, "A229178": 1, "A229209": 1, "A229223": 1, "A229232": 0, "A229268": 1, "A229269": 1, "A229321": 1, "A229435": 1, "A229555": 0, "A229598": 0, "A229612": 1, "A229615": 0, "A229622": 0, "A229639": 0, "A229736": 1, "A229782": 1, "A229816": 0, "A229823": 0, "A229870": 0, "A229889": 1, "A229902": 0, "A230031": 1, "A230054": 0, "A230063": 1, "A230065": 1, "A230088": 1, "A230095": 0, "A230258": 1, "A230278": 0, "A230390": 1, "A230415": 0, "A230445": 0, "A230448": 0, "A230453": 1, "A230508": 1, "A230538": 0, "A230577": 0, "A230580": 0, "A230691": 1, "A230711": 1, "A230794": 1, "A230843": 1, "A230893": 1, "A231086": 1, "A231125": 0, "A231128": 0, "A231188": 0, "A231417": 1, "A231423": 0, "A231441": 0, "A231458": 1, "A231461": 0, "A231637": 1, "A231651": 0, "A231718": 1, "A231761": 0, "A231791": 0, "A231797": 0, "A231861": 0, "A231870": 0, "A231985": 1, "A231993": 1, "A232023": 0, "A232026": 0, "A232104": 1, "A232291": 0, "A232305": 1, "A232368": 0, "A232427": 1, "A232456": 1, "A232471": 0, "A232541": 0, "A232542": 1, "A232544": 0, "A232763": 1, "A232771": 0, "A232831": 0, "A232841": 0, "A232844": 0, "A233040": 0, "A233229": 1, "A233235": 1, "A233269": 0, "A233277": 0, "A233375": 0, "A233616": 0, "A233619": 0, "A233635": 1, "A233697": 0, "A233757": 1, "A233807": 1, "A233822": 1, "A233833": 0, "A233856": 0, "A233985": 0, "A234043": 0, "A234116": 1, "A234149": 1, "A234172": 0, "A234177": 1, "A234181": 1, "A234184": 1, "A234209": 0, "A234229": 0, "A234271": 0, "A234288": 1, "A234304": 1, "A234383": 1, "A234420": 1, "A234449": 1, "A234466": 1, "A234549": 1, "A234610": 1, "A234660": 1, "A234668": 0, "A234782": 1, "A234952": 1, "A235037": 1, "A235051": 0, "A235060": 1, "A235213": 0, "A235252": 0, "A235296": 0, "A235539": 0, "A235593": 0, "A235603": 0, "A235606": 1, "A235630": 0, "A235642": 1, "A235684": 1, "A235743": 0, "A235766": 0, "A235780": 1, "A235873": 1, "A236025": 0, "A236076": 0, "A236129": 0, "A236177": 0, "A236269": 1, "A236412": 1, "A236483": 0, "A236530": 1, "A236537": 1, "A236555": 1, "A236597": 0, "A236805": 1, "A236822": 0, "A236870": 1, "A236888": 1, "A236914": 0, "A236924": 0, "A236953": 0, "A236979": 1, "A237039": 0, "A237086": 0, "A237262": 0, "A237313": 1, "A237323": 1, "A237400": 0, "A237462": 1, "A237479": 0, "A237496": 0, "A237619": 0, "A237636": 0, "A237678": 0, "A238011": 1, "A238030": 1, "A238076": 1, "A238079": 1, "A238097": 1, "A238122": 0, "A238132": 1, "A238181": 1, "A238212": 1, "A238247": 1, "A238327": 1, "A238386": 0, "A238437": 0, "A238439": 1, "A238442": 1, "A238460": 1, "A238489": 1, "A238555": 0, "A238680": 0, "A238688": 1, "A238812": 0, "A238926": 1, "A238932": 0, "A238940": 1, "A239026": 0, "A239066": 0, "A239119": 1, "A239137": 0, "A239213": 1, "A239244": 1, "A239402": 1, "A239446": 0, "A239543": 1, "A239574": 0, "A239684": 0, "A239776": 0, "A239799": 1, "A239836": 0, "A239842": 0, "A239860": 0, "A239873": 1, "A239904": 1, "A239908": 1, "A239938": 0, "A240157": 0, "A240185": 0, "A240197": 1, "A240224": 1, "A240384": 0, "A240418": 1, "A240523": 0, "A240529": 1, "A240589": 0, "A240639": 1, "A240646": 1, "A240689": 1, "A240885": 0, "A241036": 0, "A241061": 1, "A241265": 1, "A241279": 1, "A241286": 0, "A241338": 0, "A241441": 1, "A241496": 1, "A241617": 0, "A241702": 1, "A241744": 1, "A241748": 1, "A241787": 0, "A241847": 0, "A241894": 1, "A241902": 1, "A241928": 1, "A242032": 1, "A242043": 0, "A242121": 1, "A242123": 0, "A242133": 0, "A242141": 0, "A242162": 0, "A242259": 1, "A242423": 1, "A242441": 0, "A242447": 0, "A242466": 1, "A242476": 0, "A242516": 0, "A242517": 0, "A242538": 1, "A242560": 1, "A242563": 0, "A242601": 1, "A242758": 0, "A242802": 0, "A242823": 0, "A242829": 0, "A242869": 1, "A242891": 0, "A242922": 1, "A243079": 0, "A243162": 1, "A243270": 0, "A243275": 0, "A243348": 0, "A243380": 0, "A243393": 0, "A243503": 0, "A243586": 0, "A243599": 1, "A243607": 1, "A243615": 0, "A243619": 0, "A243650": 0, "A243764": 1, "A243766": 1, "A243805": 0, "A243877": 1, "A243899": 0, "A243984": 1, "A244047": 1, "A244069": 1, "A244184": 1, "A244259": 1, "A244338": 1, "A244343": 1, "A244375": 1, "A244379": 1, "A244399": 1, "A244457": 1, "A244540": 0, "A244705": 1, "A244769": 0, "A244817": 1, "A244845": 1, "A244869": 1, "A244895": 1, "A244920": 0, "A244950": 0, "A244976": 1, "A245065": 0, "A245130": 1, "A245159": 0, "A245202": 1, "A245264": 1, "A245275": 1, "A245334": 1, "A245340": 1, "A245361": 0, "A245389": 0, "A245415": 0, "A245472": 0, "A245485": 0, "A245536": 0, "A245539": 0, "A245574": 0, "A245649": 0, "A245696": 0, "A245753": 1, "A245881": 0, "A245886": 0, "A245894": 1, "A245949": 0, "A245962": 1, "A246036": 1, "A246137": 0, "A246160": 1, "A246187": 1, "A246296": 1, "A246354": 0, "A246453": 1, "A246492": 0, "A246528": 0, "A246596": 1, "A246602": 1, "A246640": 0, "A246673": 1, "A246715": 1, "A246775": 1, "A246827": 0, "A246888": 1, "A246926": 1, "A246927": 0, "A246935": 0, "A246946": 0, "A246964": 1, "A246966": 0, "A246975": 1, "A247132": 1, "A247174": 1, "A247287": 1, "A247402": 0, "A247507": 1, "A247543": 0, "A247548": 0, "A247771": 0, "A247834": 0, "A247969": 1, "A248030": 0, "A248053": 0, "A248346": 0, "A248473": 0, "A248574": 1, "A248587": 0, "A248723": 1, "A248760": 1, "A248761": 1, "A248763": 1, "A248798": 0, "A248802": 1, "A248887": 1, "A248939": 1, "A248958": 1, "A248960": 0, "A249066": 0, "A249092": 0, "A249148": 0, "A249233": 1, "A249305": 0, "A249451": 0, "A249460": 1, "A249493": 0, "A249515": 0, "A249564": 0, "A249596": 1, "A249718": 0, "A249758": 0, "A249837": 0, "A249892": 0, "A249976": 1, "A250007": 1, "A250011": 0, "A250013": 0, "A250064": 1, "A250142": 0, "A250147": 1, "A250256": 0, "A250303": 0, "A250304": 0, "A250314": 0, "A250347": 1, "A250348": 1, "A250388": 1, "A250389": 0, "A250473": 1, "A250540": 1, "A250549": 1, "A250726": 1, "A250738": 1, "A250787": 0, "A250805": 1, "A250913": 1, "A250917": 1, "A250919": 0, "A250920": 1, "A250997": 1, "A251121": 0, "A251205": 1, "A251252": 1, "A251314": 1, "A251329": 0, "A251335": 0, "A251548": 1, "A251610": 1, "A251650": 0, "A251681": 1, "A251683": 1, "A251728": 0, "A251825": 1, "A251839": 0, "A252250": 0, "A252286": 0, "A252471": 0, "A252472": 0, "A252579": 0, "A252642": 1, "A252755": 0, "A252803": 0, "A252851": 1, "A252937": 0, "A252948": 1, "A252967": 0, "A253043": 0, "A253069": 0, "A253124": 1, "A253297": 1, "A253364": 0, "A253366": 0, "A253378": 0, "A253382": 0, "A253471": 0, "A253486": 0, "A253488": 1, "A253497": 1, "A253528": 0, "A253604": 0, "A253709": 0, "A253755": 0, "A253764": 1, "A253788": 1, "A253835": 0, "A253862": 1, "A253869": 0, "A253993": 1, "A254003": 0, "A254064": 0, "A254069": 1, "A254116": 0, "A254122": 1, "A254133": 0, "A254157": 1, "A254189": 1, "A254283": 0, "A254303": 0, "A254332": 0, "A254390": 1, "A254414": 1, "A254492": 1, "A254519": 1, "A254523": 0, "A254538": 0, "A254763": 1, "A254800": 0, "A255069": 1, "A255328": 1, "A255341": 0, "A255380": 0, "A255438": 1, "A255439": 1, "A255554": 0, "A255665": 1, "A255799": 0, "A255864": 1, "A255896": 0, "A255899": 1, "A256122": 1, "A256262": 0, "A256355": 0, "A256359": 1, "A256688": 0, "A256783": 1, "A256808": 0, "A256861": 0, "A257070": 0, "A257114": 0, "A257144": 1, "A257186": 1, "A257202": 0, "A257339": 0, "A257514": 0, "A257515": 0, "A257516": 1, "A257586": 0, "A257621": 1, "A257657": 0, "A257721": 0, "A257741": 1, "A257897": 1, "A257904": 0, "A258063": 0, "A258082": 1, "A258146": 0, "A258149": 0, "A258285": 0, "A258291": 0, "A258308": 1, "A258313": 0, "A258505": 0, "A258531": 1, "A258673": 1, "A258683": 1, "A258722": 0, "A258749": 0, "A258828": 1, "A258870": 1, "A258904": 0, "A258982": 0, "A259059": 0, "A259068": 0, "A259098": 0, "A259136": 0, "A259283": 0, "A259293": 0, "A259308": 0, "A259401": 1, "A259469": 0, "A259535": 0, "A259648": 1, "A259672": 1, "A259800": 1, "A259894": 1, "A259939": 0, "A259946": 0, "A260047": 1, "A260132": 1, "A260155": 1, "A260187": 1, "A260216": 0, "A260239": 0, "A260240": 0, "A260284": 1, "A260299": 1, "A260316": 0, "A260421": 1, "A260474": 0, "A260531": 0, "A260537": 0, "A260732": 0, "A260766": 1, "A260837": 1, "A260969": 0, "A260977": 1, "A261081": 1, "A261136": 1, "A261245": 0, "A261389": 0, "A261394": 0, "A261490": 0, "A261563": 1, "A261668": 0, "A261802": 0, "A261836": 0, "A261900": 0, "A262002": 0, "A262073": 0, "A262297": 0, "A262476": 1, "A262524": 1, "A262654": 0, "A262659": 1, "A262701": 0, "A262707": 0, "A262730": 0, "A262746": 1, "A262750": 0, "A262772": 0, "A262848": 0, "A262880": 0, "A262905": 1, "A262912": 0, "A262913": 0, "A262977": 0, "A262995": 0, "A263044": 1, "A263055": 0, "A263079": 0, "A263192": 1, "A263203": 1, "A263228": 0, "A263247": 0, "A263323": 0, "A263332": 0, "A263613": 1, "A263703": 1, "A263776": 1, "A263822": 1, "A263849": 0, "A263929": 1, "A263987": 0, "A263988": 1, "A264050": 1, "A264108": 0, "A264158": 1, "A264266": 0, "A264301": 1, "A264368": 1, "A264452": 1, "A264752": 0, "A264778": 0, "A264906": 0, "A264910": 0, "A264961": 1, "A264999": 1, "A265151": 1, "A265160": 1, "A265178": 1, "A265198": 1, "A265266": 0, "A265298": 1, "A265313": 0, "A265369": 0, "A265380": 0, "A265388": 1, "A265420": 1, "A265436": 1, "A265441": 0, "A265452": 1, "A265454": 0, "A265459": 1, "A265559": 1, "A265602": 0, "A265609": 0, "A265647": 0, "A265755": 0, "A265782": 1, "A265795": 1, "A265806": 1, "A265910": 0, "A265933": 1, "A265934": 1, "A265939": 1, "A265980": 1, "A266001": 0, "A266020": 0, "A266168": 1, "A266303": 1, "A266366": 0, "A266433": 1, "A266448": 1, "A266575": 0, "A266591": 1, "A266617": 0, "A266637": 1, "A266796": 0, "A266804": 0, "A266843": 0, "A266844": 1, "A266845": 1, "A266863": 1, "A266881": 1, "A266894": 1, "A266929": 0, "A266963": 0, "A267026": 0, "A267078": 0, "A267126": 1, "A267166": 0, "A267198": 0, "A267216": 1, "A267220": 0, "A267317": 0, "A267370": 1, "A267397": 0, "A267458": 1, "A267467": 1, "A267478": 0, "A267511": 1, "A267740": 1, "A267797": 0, "A267847": 0, "A267848": 1, "A267853": 0, "A267872": 0, "A267885": 0, "A267923": 1, "A267954": 0, "A268106": 0, "A268127": 1, "A268128": 0, "A268224": 0, "A268288": 0, "A268307": 0, "A268313": 1, "A268325": 0, "A268395": 0, "A268442": 0, "A268461": 1, "A268476": 1, "A268491": 0, "A268553": 1, "A268596": 1, "A268609": 1, "A268631": 1, "A268637": 1, "A268641": 1, "A268653": 1, "A268655": 1, "A268671": 1, "A268692": 1, "A268764": 0, "A268793": 0, "A268902": 0, "A268903": 1, "A268986": 0, "A269103": 0, "A269229": 1, "A269308": 1, "A269312": 0, "A269327": 1, "A269331": 1, "A269440": 1, "A269446": 0, "A269455": 0, "A269471": 1, "A269530": 0, "A269561": 1, "A269632": 0, "A269647": 0, "A269786": 0, "A269872": 0, "A269902": 0, "A269904": 1, "A269930": 0, "A269950": 0, "A270030": 1, "A270190": 1, "A270217": 1, "A270220": 0, "A270248": 1, "A270311": 1, "A270322": 1, "A270383": 0, "A270396": 1, "A270410": 1, "A270417": 0, "A270464": 1, "A270549": 0, "A270558": 0, "A270590": 0, "A270593": 1, "A270624": 0, "A270634": 1, "A270636": 1, "A270648": 1, "A270662": 0, "A270753": 1, "A270803": 0, "A270813": 1, "A270888": 0, "A270894": 0, "A270909": 0, "A270970": 1, "A270988": 0, "A271015": 0, "A271044": 1, "A271070": 0, "A271166": 1, "A271260": 0, "A271278": 0, "A271299": 0, "A271333": 1, "A271389": 0, "A271401": 0, "A271559": 1, "A271565": 1, "A271572": 1, "A271591": 1, "A271622": 0, "A271633": 0, "A271652": 1, "A271805": 0, "A271838": 0, "A271842": 1, "A271861": 1, "A271924": 0, "A272032": 0, "A272035": 0, "A272080": 1, "A272081": 0, "A272102": 1, "A272114": 1, "A272128": 1, "A272135": 1, "A272137": 0, "A272214": 1, "A272260": 0, "A272269": 1, "A272286": 1, "A272336": 0, "A272358": 1, "A272427": 1, "A272449": 1, "A272501": 0, "A272511": 1, "A272535": 1, "A272578": 1, "A272582": 0, "A272586": 1, "A272644": 0, "A272695": 1, "A272754": 1, "A272889": 0, "A272943": 1, "A273017": 1, "A273025": 1, "A273055": 0, "A273179": 0, "A273187": 0, "A273193": 1, "A273291": 1, "A273384": 1, "A273426": 0, "A273500": 0, "A273524": 1, "A273608": 0, "A273675": 0, "A273747": 1, "A273794": 1, "A273798": 0, "A273818": 1, "A273849": 1, "A273858": 0, "A273948": 1, "A273949": 1, "A273975": 1, "A273993": 0, "A274007": 1, "A274025": 1, "A274073": 1, "A274143": 0, "A274165": 1, "A274199": 1, "A274275": 1, "A274319": 1, "A274348": 1, "A274349": 0, "A274372": 0, "A274552": 0, "A274601": 1, "A274612": 0, "A274626": 0, "A274667": 1, "A274671": 1, "A274771": 1, "A274796": 1, "A274807": 1, "A274847": 0, "A274897": 1, "A274912": 0, "A274944": 0, "A274981": 1, "A275046": 0, "A275075": 1, "A275078": 1, "A275129": 1, "A275159": 0, "A275181": 0, "A275189": 0, "A275212": 0, "A275214": 1, "A275333": 0, "A275359": 0, "A275441": 0, "A275472": 1, "A275474": 0, "A275570": 0, "A275580": 0, "A275610": 1, "A275825": 0, "A275855": 1, "A275880": 0, "A275911": 0, "A275960": 1, "A276065": 1, "A276104": 1, "A276112": 1, "A276120": 1, "A276147": 0, "A276214": 1, "A276216": 0, "A276218": 0, "A276227": 0, "A276262": 1, "A276307": 0, "A276309": 0, "A276320": 1, "A276372": 1, "A276417": 0, "A276471": 0, "A276513": 1, "A276550": 0, "A276567": 1, "A276616": 0, "A276624": 1, "A276733": 0, "A276827": 0, "A276882": 0, "A276903": 0, "A276965": 0, "A276973": 1, "A277041": 1, "A277121": 1, "A277124": 1, "A277171": 0, "A277319": 1, "A277358": 0, "A277367": 0, "A277371": 1, "A277400": 1, "A277406": 1, "A277423": 0, "A277455": 0, "A277463": 1, "A277488": 1, "A277502": 1, "A277602": 1, "A277657": 0, "A277677": 1, "A277754": 1, "A277769": 1, "A277807": 1, "A277836": 0, "A277866": 0, "A277877": 1, "A277927": 0, "A277940": 1, "A278012": 1, "A278051": 1, "A278076": 1, "A278079": 1, "A278137": 0, "A278203": 1, "A278304": 1, "A278314": 0, "A278358": 0, "A278457": 0, "A278525": 0, "A278571": 1, "A278657": 1, "A278697": 1, "A278702": 1, "A278714": 1, "A278733": 0, "A278757": 0, "A278778": 1, "A278821": 0, "A279009": 0, "A279062": 0, "A279083": 1, "A279086": 0, "A279120": 1, "A279145": 1, "A279165": 0, "A279251": 0, "A279252": 1, "A279260": 1, "A279311": 0, "A279347": 0, "A279494": 0, "A279582": 0, "A279646": 1, "A279657": 1, "A279661": 0, "A279763": 0, "A279924": 1, "A279962": 1, "A279969": 1, "A279981": 1, "A280005": 0, "A280026": 0, "A280035": 1, "A280058": 1, "A280254": 1, "A280273": 0, "A280338": 0, "A280378": 0, "A280382": 0, "A280397": 1, "A280415": 1, "A280425": 1, "A280465": 0, "A280475": 1, "A280537": 0, "A280543": 1, "A280606": 0, "A280613": 0, "A280722": 0, "A280774": 0, "A280809": 1, "A280844": 0, "A280858": 0, "A280993": 1, "A281077": 0, "A281129": 0, "A281162": 0, "A281183": 1, "A281260": 1, "A281431": 1, "A281439": 1, "A281497": 0, "A281498": 0, "A281511": 0, "A281528": 0, "A281563": 1, "A281578": 0, "A281599": 1, "A281600": 0, "A281667": 1, "A281719": 1, "A281778": 0, "A281925": 1, "A281972": 0, "A282017": 1, "A282077": 1, "A282102": 0, "A282105": 0, "A282133": 1, "A282170": 0, "A282231": 1, "A282233": 0, "A282333": 1, "A282366": 1, "A282386": 0, "A282392": 1, "A282462": 0, "A282483": 1, "A282530": 0, "A282551": 1, "A282563": 0, "A282565": 0, "A282614": 0, "A282644": 1, "A282648": 1, "A282661": 1, "A282770": 0, "A282772": 0, "A282799": 0, "A282818": 1, "A282885": 0, "A282933": 1, "A282950": 0, "A282968": 1, "A282970": 1, "A283050": 1, "A283066": 1, "A283083": 1, "A283090": 1, "A283153": 1, "A283164": 0, "A283169": 0, "A283201": 0, "A283220": 1, "A283356": 1, "A283437": 1, "A283446": 0, "A283447": 1, "A283455": 0, "A283527": 1, "A283534": 1, "A283564": 1, "A283595": 0, "A283612": 1, "A283617": 0, "A283662": 1, "A283663": 1, "A283669": 0, "A283675": 0, "A283692": 1, "A283715": 1, "A283736": 0, "A283752": 1, "A283783": 0, "A283800": 1, "A283854": 1, "A283881": 1, "A283884": 1, "A283909": 0, "A283925": 0, "A284042": 0, "A284061": 0, "A284076": 0, "A284109": 1, "A284201": 0, "A284221": 0, "A284239": 0, "A284250": 1, "A284251": 0, "A284300": 0, "A284304": 0, "A284347": 0, "A284368": 0, "A284468": 0, "A284503": 1, "A284509": 1, "A284520": 0, "A284525": 1, "A284564": 0, "A284628": 1, "A284631": 0, "A284654": 0, "A284676": 1, "A284693": 0, "A284695": 1, "A284706": 1, "A284744": 1, "A284754": 0, "A284793": 0, "A284812": 1, "A284865": 1, "A284874": 1, "A284878": 0, "A284972": 0, "A284997": 1, "A285040": 0, "A285049": 0, "A285054": 1, "A285075": 0, "A285134": 1, "A285161": 1, "A285172": 1, "A285185": 0, "A285188": 1, "A285216": 0, "A285222": 1, "A285234": 0, "A285241": 0, "A285334": 1, "A285340": 1, "A285343": 1, "A285344": 0, "A285350": 0, "A285362": 0, "A285379": 0, "A285402": 1, "A285426": 1, "A285510": 0, "A285525": 0, "A285564": 0, "A285570": 1, "A285600": 0, "A285627": 1, "A285633": 1, "A285671": 0, "A285698": 1, "A285743": 0, "A285788": 0, "A285811": 0, "A285823": 0, "A285837": 1, "A285851": 1, "A285852": 0, "A285868": 1, "A285945": 0, "A285984": 0, "A286001": 1, "A286035": 1, "A286087": 1, "A286099": 0, "A286142": 1, "A286163": 1, "A286201": 0, "A286208": 0, "A286248": 1, "A286297": 0, "A286302": 1, "A286344": 1, "A286384": 1, "A286422": 0, "A286494": 0, "A286499": 1, "A286522": 1, "A286525": 1, "A286526": 0, "A286550": 1, "A286554": 1} --------------------------------------------------------------------------------