├── scripts ├── dataset │ ├── __init__.py │ ├── data_aug.py │ ├── source_dataset.py │ ├── source_dataset_affinity.py │ ├── source_dataset_mito.py │ └── target_dataset_mito.py ├── loss │ ├── __init__.py │ └── loss.py ├── utils │ ├── __init__.py │ ├── difference.py │ ├── tools_self.py │ ├── utils.py │ ├── metrics.py │ ├── test_augmentation.py │ ├── postprocessing.py │ ├── show.py │ ├── affinity.py │ └── pre_processing.py ├── model │ ├── discriminator_davsn.py │ ├── discriminator_damtnet.py │ ├── accel_deeplabv2.py │ ├── CoDetectionCNN.py │ └── advanced_model.py ├── config │ ├── mitoh2r.yaml │ ├── mitor2h.yaml │ ├── vnc2lucchi1.yaml │ └── vnc2lucchi2.yaml ├── inference.py └── inference_mito.py ├── models ├── mitoh2r │ └── readme.md ├── mitor2h │ └── readme.md ├── vnc2lucchi1 │ └── readme.md └── vnc2lucchi2 │ └── readme.md ├── inference ├── mitoh2r │ └── scores.txt ├── mitor2h │ └── scores.txt ├── vnc2lucchi1 │ └── scores.txt └── vnc2lucchi2 │ └── scores.txt ├── images ├── network.png ├── framework.png └── visual_results.png ├── presentation ├── MICCAI2022_poster_paper1029.pdf └── MICCAI2022_video_paper1029.pdf ├── logs ├── mitoh2r │ ├── events.out.tfevents.1644426015.0d8ce3b46664 │ └── valid.txt ├── mitor2h │ ├── events.out.tfevents.1644508614.666345c3124a │ └── valid.txt ├── vnc2lucchi1 │ ├── events.out.tfevents.1644128951.0d8ce3b46664 │ └── valid.txt └── vnc2lucchi2 │ ├── events.out.tfevents.1644289603.666345c3124a │ └── valid.txt ├── data ├── VNC3 │ └── readme.md ├── Lucchi │ └── readme.md └── Mito │ └── readme.md ├── requirements.txt └── README.md /scripts/dataset/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /scripts/loss/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /scripts/utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /models/mitoh2r/readme.md: -------------------------------------------------------------------------------- 1 | Put 'model.ckpt' here. 2 | 3 | -------------------------------------------------------------------------------- /models/mitor2h/readme.md: -------------------------------------------------------------------------------- 1 | Put 'model.ckpt' here. 2 | 3 | -------------------------------------------------------------------------------- /models/vnc2lucchi1/readme.md: -------------------------------------------------------------------------------- 1 | Put 'model.ckpt' here. 2 | 3 | -------------------------------------------------------------------------------- /models/vnc2lucchi2/readme.md: -------------------------------------------------------------------------------- 1 | Put 'model.ckpt' here. 2 | 3 | -------------------------------------------------------------------------------- /inference/mitoh2r/scores.txt: -------------------------------------------------------------------------------- 1 | mAP=0.9682, F1=0.8851, MCC=0.8829, IoU=0.7941 2 | -------------------------------------------------------------------------------- /inference/mitor2h/scores.txt: -------------------------------------------------------------------------------- 1 | mAP=0.9255, F1=0.8556, MCC=0.8493, IoU=0.7477 2 | -------------------------------------------------------------------------------- /inference/vnc2lucchi1/scores.txt: -------------------------------------------------------------------------------- 1 | mAP=0.8948, F1=0.8129, MCC=0.8053, IoU=0.6865 2 | -------------------------------------------------------------------------------- /inference/vnc2lucchi2/scores.txt: -------------------------------------------------------------------------------- 1 | mAP=0.9244, F1=0.8518, MCC=0.8448, IoU=0.7431 2 | -------------------------------------------------------------------------------- /images/network.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/weih527/DA-ISC/HEAD/images/network.png -------------------------------------------------------------------------------- /images/framework.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/weih527/DA-ISC/HEAD/images/framework.png -------------------------------------------------------------------------------- /images/visual_results.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/weih527/DA-ISC/HEAD/images/visual_results.png -------------------------------------------------------------------------------- /presentation/MICCAI2022_poster_paper1029.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/weih527/DA-ISC/HEAD/presentation/MICCAI2022_poster_paper1029.pdf -------------------------------------------------------------------------------- /presentation/MICCAI2022_video_paper1029.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/weih527/DA-ISC/HEAD/presentation/MICCAI2022_video_paper1029.pdf -------------------------------------------------------------------------------- /logs/mitoh2r/events.out.tfevents.1644426015.0d8ce3b46664: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/weih527/DA-ISC/HEAD/logs/mitoh2r/events.out.tfevents.1644426015.0d8ce3b46664 -------------------------------------------------------------------------------- /logs/mitor2h/events.out.tfevents.1644508614.666345c3124a: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/weih527/DA-ISC/HEAD/logs/mitor2h/events.out.tfevents.1644508614.666345c3124a -------------------------------------------------------------------------------- /logs/vnc2lucchi1/events.out.tfevents.1644128951.0d8ce3b46664: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/weih527/DA-ISC/HEAD/logs/vnc2lucchi1/events.out.tfevents.1644128951.0d8ce3b46664 -------------------------------------------------------------------------------- /logs/vnc2lucchi2/events.out.tfevents.1644289603.666345c3124a: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/weih527/DA-ISC/HEAD/logs/vnc2lucchi2/events.out.tfevents.1644289603.666345c3124a -------------------------------------------------------------------------------- /data/VNC3/readme.md: -------------------------------------------------------------------------------- 1 | Put downloaded data here. 2 | 3 | It contains two folders: 4 | - training (it contains 20 .png images) 5 | - training_groundtruth (it contains 20 .png images) 6 | -------------------------------------------------------------------------------- /data/Lucchi/readme.md: -------------------------------------------------------------------------------- 1 | Put downloaded data here. 2 | 3 | It contains four folders: 4 | - testing (it contains 165 .png images) 5 | - testing_groundtruth (it contains 165 .png images) 6 | - training (it contains 165 .png images) 7 | - training_groundtruth (it contains 165 .png images) 8 | -------------------------------------------------------------------------------- /data/Mito/readme.md: -------------------------------------------------------------------------------- 1 | Put downloaded data here. 2 | 3 | It contains two folders: 4 | - human (it contains four files: testing.hdf, testing_groundtruth.hdf, training.hdf, training_groundtruth.hdf) 5 | - rat (it contains four files: testing.hdf, testing_groundtruth.hdf, training.hdf, training_groundtruth.hdf) 6 | -------------------------------------------------------------------------------- /scripts/model/discriminator_davsn.py: -------------------------------------------------------------------------------- 1 | from torch import nn 2 | 3 | 4 | def get_fc_discriminator(num_classes, ndf=64): 5 | return nn.Sequential( 6 | nn.Conv2d(num_classes, ndf, kernel_size=4, stride=2, padding=1), 7 | nn.LeakyReLU(negative_slope=0.2, inplace=True), 8 | nn.Conv2d(ndf, ndf * 2, kernel_size=4, stride=2, padding=1), 9 | nn.LeakyReLU(negative_slope=0.2, inplace=True), 10 | nn.Conv2d(ndf * 2, ndf * 4, kernel_size=4, stride=2, padding=1), 11 | nn.LeakyReLU(negative_slope=0.2, inplace=True), 12 | nn.Conv2d(ndf * 4, ndf * 8, kernel_size=4, stride=2, padding=1), 13 | nn.LeakyReLU(negative_slope=0.2, inplace=True), 14 | nn.Conv2d(ndf * 8, 1, kernel_size=4, stride=2, padding=1), 15 | ) 16 | -------------------------------------------------------------------------------- /scripts/utils/difference.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Description: 3 | Author: weihuang 4 | Date: 2021-11-18 12:39:28 5 | LastEditors: weihuang 6 | LastEditTime: 2021-11-18 12:51:21 7 | ''' 8 | import os 9 | import os.path as osp 10 | import numpy as np 11 | from PIL import Image 12 | 13 | in_path = '../data/Lucchi/training_groundtruth' 14 | out_path = '../data/Lucchi/training_groundtruth_diff_10' 15 | if not osp.exists(out_path): 16 | os.makedirs(out_path) 17 | 18 | NUM = 165 19 | stride = 10 20 | for i in range(NUM-stride): 21 | img1 = np.asarray(Image.open(osp.join(in_path, str(i).zfill(3)+'.png'))) 22 | img2 = np.asarray(Image.open(osp.join(in_path, str(i+stride).zfill(3)+'.png'))) 23 | img1 = (img1 / 255).astype(np.bool) 24 | img2 = (img2 / 255).astype(np.bool) 25 | # diff = img1 ^ img2 # 异或 26 | diff = np.bitwise_xor(img1, img2) 27 | diff = (diff).astype(np.uint8) * 255 28 | Image.fromarray(diff).save(osp.join(out_path, str(i).zfill(3)+'.png')) 29 | print('Done') 30 | -------------------------------------------------------------------------------- /scripts/dataset/data_aug.py: -------------------------------------------------------------------------------- 1 | import albumentations as albu 2 | import numpy as np 3 | import cv2 4 | from albumentations import * 5 | 6 | 7 | def strong_aug(p=.5, cropsize=[512, 512]): 8 | return Compose([ 9 | Flip(), 10 | Transpose(), 11 | Rotate(), 12 | OneOf([Resize(p=0.2, height=cropsize[0], width=cropsize[1]), 13 | RandomSizedCrop(((256, 512)), p=0.2, height=cropsize[0], width=cropsize[1], interpolation=2), 14 | ], p=0.2), 15 | RandomBrightnessContrast(), 16 | MotionBlur(p=0.2), 17 | ElasticTransform(p=0.3), 18 | ], p=p) 19 | 20 | 21 | def create_transformer(transformations, images): 22 | target = {} 23 | for i, image in enumerate(images[1:]): 24 | target['image' + str(i)] = 'image' 25 | return albu.Compose(transformations, p=0.5, additional_targets=target)(image=images[0], 26 | mask=images[1] 27 | ) 28 | 29 | 30 | def aug_img_lab(img, lab, cropsize, p=0.5): 31 | images = [img, lab] 32 | transformed = create_transformer(strong_aug(p=p, cropsize=cropsize), images) 33 | return transformed['image'], transformed['mask'] 34 | -------------------------------------------------------------------------------- /scripts/config/mitoh2r.yaml: -------------------------------------------------------------------------------- 1 | NAME: 'mito_h2r_lre4_wcrp' 2 | 3 | MODEL: 4 | input_nc: 1 5 | output_nc: 2 6 | num_classes: 2 7 | input_channels: 64 8 | level: 1 9 | 10 | TRAIN: 11 | resume: False 12 | model_name: '' 13 | if_valid: True 14 | cache_path: '../caches/' 15 | save_path: '../models/' 16 | loss_func: 'WeightedMSELoss' # 'WeightedBCELoss', 'BCELoss' 17 | if_adv_weight: False 18 | cross_loss_source: False 19 | cross_loss_target: True 20 | weight_cross: 0.1 21 | consistency_weight_rampup: True 22 | 23 | opt_type: 'adam' # sgd 24 | lr_mode: 'fixed' 25 | total_iters: 200000 26 | rampup_iters: 50000 27 | learning_rate: 0.0001 28 | learning_rate_ms: 0.0001 29 | learning_rate_mt: 0.0001 30 | display_freq: 100 31 | show_freq: 500 32 | valid_freq: 500 33 | save_freq: 500 34 | power: 0.9 35 | weight_adv_temporal: 0.001 36 | weight_adv_spatial: 0.001 37 | lamda_wd: 0.001 38 | 39 | batch_size: 1 40 | num_workers: 4 41 | if_cuda: True 42 | 43 | random_seed: 555 # -1 is none 44 | 45 | DATA: 46 | data_dir_img: '../data/Mito/human/training.hdf' 47 | data_dir_label: '../data/Mito/human/training_groundtruth.hdf' 48 | data_list: ~ 49 | input_size: 512 50 | source_stride: 1 51 | data_dir_target: '../data/Mito/rat/training.hdf' 52 | data_dir_target_label: '../data/Mito/rat/training_groundtruth.hdf' 53 | data_list_target: ~ 54 | input_size_target: 512 55 | target_stride: 1 56 | data_dir_val: '../data/Mito/rat/testing.hdf' 57 | data_dir_val_label: '../data/Mito/rat/testing_groundtruth.hdf' 58 | data_list_val: ~ 59 | input_size_test: 1024 60 | 61 | TEST: 62 | pad: 0 63 | model_name: '' -------------------------------------------------------------------------------- /scripts/config/mitor2h.yaml: -------------------------------------------------------------------------------- 1 | NAME: 'mito_r2h_lre4_wadv001' 2 | 3 | MODEL: 4 | input_nc: 1 5 | output_nc: 2 6 | num_classes: 2 7 | input_channels: 64 8 | level: 1 9 | 10 | TRAIN: 11 | resume: False 12 | model_name: '' 13 | if_valid: True 14 | cache_path: '../caches/' 15 | save_path: '../models/' 16 | loss_func: 'WeightedMSELoss' # 'WeightedBCELoss', 'BCELoss' 17 | if_adv_weight: False 18 | cross_loss_source: False 19 | cross_loss_target: True 20 | weight_cross: 0.1 21 | consistency_weight_rampup: False 22 | 23 | opt_type: 'adam' # sgd 24 | lr_mode: 'fixed' 25 | total_iters: 200000 26 | rampup_iters: 50000 27 | learning_rate: 0.0001 28 | learning_rate_ms: 0.0001 29 | learning_rate_mt: 0.0001 30 | display_freq: 100 31 | show_freq: 500 32 | valid_freq: 500 33 | save_freq: 500 34 | power: 0.9 35 | weight_adv_temporal: 0.01 36 | weight_adv_spatial: 0.01 37 | lamda_wd: 0.001 38 | 39 | batch_size: 1 40 | num_workers: 4 41 | if_cuda: True 42 | 43 | random_seed: 555 # -1 is none 44 | 45 | DATA: 46 | data_dir_img: '../data/Mito/rat/training.hdf' 47 | data_dir_label: '../data/Mito/rat/training_groundtruth.hdf' 48 | data_list: ~ 49 | input_size: 512 50 | source_stride: 1 51 | data_dir_target: '../data/Mito/human/training.hdf' 52 | data_dir_target_label: '../data/Mito/human/training_groundtruth.hdf' 53 | data_list_target: ~ 54 | input_size_target: 512 55 | target_stride: 1 56 | data_dir_val: '../data/Mito/human/testing.hdf' 57 | data_dir_val_label: '../data/Mito/human/testing_groundtruth.hdf' 58 | data_list_val: ~ 59 | input_size_test: 1024 60 | 61 | TEST: 62 | pad: 0 63 | model_name: '' -------------------------------------------------------------------------------- /scripts/config/vnc2lucchi1.yaml: -------------------------------------------------------------------------------- 1 | NAME: 'lucchi_subset1_lre4_wcrp' 2 | 3 | MODEL: 4 | input_nc: 1 5 | output_nc: 2 6 | num_classes: 2 7 | input_channels: 64 8 | level: 1 9 | 10 | TRAIN: 11 | resume: False 12 | model_name: '' 13 | if_valid: True 14 | cache_path: '../caches/' 15 | save_path: '../models/' 16 | loss_func: 'WeightedMSELoss' # 'WeightedBCELoss', 'BCELoss' 17 | if_adv_weight: False 18 | cross_loss_source: False 19 | cross_loss_target: True 20 | weight_cross: 0.1 21 | consistency_weight_rampup: True 22 | 23 | opt_type: 'adam' # sgd 24 | lr_mode: 'fixed' 25 | total_iters: 200000 26 | rampup_iters: 50000 27 | learning_rate: 0.0001 28 | learning_rate_ms: 0.0001 29 | learning_rate_mt: 0.0001 30 | display_freq: 100 31 | show_freq: 500 32 | valid_freq: 500 33 | save_freq: 500 34 | power: 0.9 35 | weight_adv_temporal: 0.001 36 | weight_adv_spatial: 0.001 37 | lamda_wd: 0.001 38 | 39 | batch_size: 1 40 | num_workers: 4 41 | if_cuda: True 42 | 43 | random_seed: 555 # -1 is none 44 | 45 | DATA: 46 | data_dir_img: '../data/VNC3/training/' 47 | data_dir_label: '../data/VNC3/training_groundtruth/' 48 | data_list: '../data/VNC3/train.txt' 49 | input_size: 512 50 | source_stride: 1 51 | data_dir_target: '../data/Lucchi/training' 52 | data_dir_target_label: '../data/Lucchi/training_groundtruth' 53 | data_list_target: '../data/Lucchi/train.txt' 54 | input_size_target: 512 55 | target_stride: 10 56 | data_dir_val: '../data/Lucchi/testing' 57 | data_dir_val_label: '../data/Lucchi/testing_groundtruth' 58 | data_list_val: '../data/Lucchi/testing.txt' 59 | input_size_target: 512 60 | 61 | TEST: 62 | pad: 0 63 | model_name: '' -------------------------------------------------------------------------------- /scripts/config/vnc2lucchi2.yaml: -------------------------------------------------------------------------------- 1 | NAME: 'lucchi_subset2_lre4_wcrp' 2 | 3 | MODEL: 4 | input_nc: 1 5 | output_nc: 2 6 | num_classes: 2 7 | input_channels: 64 8 | level: 1 9 | 10 | TRAIN: 11 | resume: False 12 | model_name: '' 13 | if_valid: True 14 | cache_path: '../caches/' 15 | save_path: '../models/' 16 | loss_func: 'WeightedMSELoss' # 'WeightedBCELoss', 'BCELoss' 17 | if_adv_weight: False 18 | cross_loss_source: False 19 | cross_loss_target: True 20 | weight_cross: 0.1 21 | consistency_weight_rampup: True 22 | 23 | opt_type: 'adam' # sgd 24 | lr_mode: 'fixed' 25 | total_iters: 200000 26 | rampup_iters: 50000 27 | learning_rate: 0.0001 28 | learning_rate_ms: 0.0001 29 | learning_rate_mt: 0.0001 30 | display_freq: 100 31 | show_freq: 500 32 | valid_freq: 500 33 | save_freq: 500 34 | power: 0.9 35 | weight_adv_temporal: 0.001 36 | weight_adv_spatial: 0.001 37 | lamda_wd: 0.001 38 | 39 | batch_size: 1 40 | num_workers: 4 41 | if_cuda: True 42 | 43 | random_seed: 555 # -1 is none 44 | 45 | DATA: 46 | data_dir_img: '../data/VNC3/training/' 47 | data_dir_label: '../data/VNC3/training_groundtruth/' 48 | data_list: '../data/VNC3/train.txt' 49 | input_size: 512 50 | source_stride: 1 51 | data_dir_target: '../data/Lucchi/testing' 52 | data_dir_target_label: '../data/Lucchi/testing_groundtruth' 53 | data_list_target: '../data/Lucchi/testing.txt' 54 | input_size_target: 512 55 | target_stride: 10 56 | data_dir_val: '../data/Lucchi/training' 57 | data_dir_val_label: '../data/Lucchi/training_groundtruth' 58 | data_list_val: '../data/Lucchi/train.txt' 59 | input_size_target: 512 60 | 61 | TEST: 62 | pad: 0 63 | model_name: '' -------------------------------------------------------------------------------- /scripts/utils/tools_self.py: -------------------------------------------------------------------------------- 1 | import SimpleITK as sitk 2 | import numpy as np 3 | import nibabel, math 4 | 5 | 6 | def cal_crop_num(img_size, in_size): 7 | if img_size[0] % in_size[0] == 0: 8 | crop_n1 = math.ceil(img_size[0] / in_size[0]) + 1 9 | else: 10 | crop_n1 = math.ceil(img_size[0] / in_size[0]) 11 | 12 | if img_size[1] % in_size[1] == 0: 13 | crop_n2 = math.ceil(img_size[1] / in_size[1]) + 1 14 | else: 15 | crop_n2 = math.ceil(img_size[1] / in_size[1]) 16 | 17 | if img_size[2] % in_size[2] == 0: 18 | crop_n3 = math.ceil(img_size[2] / in_size[2]) + 1 19 | else: 20 | crop_n3 = math.ceil(img_size[2] / in_size[2]) 21 | return crop_n1, crop_n2, crop_n3 22 | 23 | 24 | def save_array_as_nii_volume(data, filename, reference_name = None): 25 | """ 26 | save a numpy array as nifty image 27 | inputs: 28 | data: a numpy array with shape [Depth, Height, Width] 29 | filename: the ouput file name 30 | reference_name: file name of the reference image of which affine and header are used 31 | outputs: None 32 | """ 33 | # data = np.flipud(data) 34 | # data = np.fliplr(data) 35 | # data = np.transpose(data, [2, 0, 1]) 36 | img = sitk.GetImageFromArray(data) 37 | if(reference_name is not None): 38 | img_ref = sitk.ReadImage(reference_name) 39 | img.CopyInformation(img_ref) 40 | sitk.WriteImage(img, filename) 41 | 42 | def find_last(string,str): 43 | last_position=-1 44 | while True: 45 | position=string.find(str,last_position+1) 46 | if position==-1: 47 | return last_position 48 | last_position=position 49 | 50 | def load_nifty_volume_as_array(filename, with_header = False): 51 | """ 52 | load nifty image into numpy array, and transpose it based on the [z,y,x] axis order 53 | The output array shape is like [Depth, Height, Width] 54 | inputs: 55 | filename: the input file name, should be *.nii or *.nii.gz 56 | with_header: return affine and hearder infomation 57 | outputs: 58 | data: a numpy data array 59 | """ 60 | img = nibabel.load(filename) 61 | data = img.get_data() 62 | data = np.transpose(data, [2, 1, 0]) 63 | if with_header: 64 | return data, img.affine, img.header 65 | else: 66 | return data 67 | 68 | 69 | if __name__ == "__main__": 70 | print(find_last('t_t1_t2_t3','_')) 71 | ttt = 't/d/g/g/s/df/d/gggg' 72 | n = find_last(ttt,'/') 73 | print(ttt) 74 | print(ttt[:n]) 75 | 76 | -------------------------------------------------------------------------------- /scripts/utils/utils.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Description: 3 | Author: weihuang 4 | Date: 2021-11-18 15:47:44 5 | LastEditors: Please set LastEditors 6 | LastEditTime: 2021-11-29 17:54:38 7 | ''' 8 | import torch 9 | import random 10 | import numpy as np 11 | 12 | def setup_seed(seed): 13 | torch.manual_seed(seed) 14 | # torch.cuda.manual_seed(seed) 15 | torch.cuda.manual_seed_all(seed) 16 | np.random.seed(seed) 17 | random.seed(seed) 18 | 19 | def lr_poly(base_lr, iter, max_iter, power): 20 | """ Poly_LR scheduler 21 | """ 22 | return base_lr * ((1 - float(iter) / max_iter) ** power) 23 | 24 | def _adjust_learning_rate(optimizer, i_iter, learning_rate, max_iters, power): 25 | lr = lr_poly(learning_rate, i_iter, max_iters, power) 26 | optimizer.param_groups[0]['lr'] = lr 27 | if len(optimizer.param_groups) > 1: 28 | optimizer.param_groups[1]['lr'] = lr * 10 29 | 30 | def adjust_learning_rate(optimizer, i_iter, learning_rate, max_iters, power): 31 | """ adject learning rate for main segnet 32 | """ 33 | _adjust_learning_rate(optimizer, i_iter, learning_rate, max_iters, power) 34 | 35 | def adjust_learning_rate_discriminator(optimizer, i_iter, learning_rate, max_iters, power): 36 | _adjust_learning_rate(optimizer, i_iter, learning_rate, max_iters, power) 37 | 38 | def sigmoid_rampup(current, rampup_length): 39 | """Exponential rampup from https://arxiv.org/abs/1610.02242""" 40 | if rampup_length == 0: 41 | return 1.0 42 | else: 43 | current = np.clip(current, 0.0, rampup_length) 44 | phase = 1.0 - current / rampup_length 45 | return float(np.exp(-5.0 * phase * phase)) 46 | 47 | def get_current_consistency_weight(epoch, consistency=0.1, consistency_rampup=10000.0): 48 | # Consistency ramp-up from https://arxiv.org/abs/1610.02242 49 | return consistency * sigmoid_rampup(epoch, consistency_rampup) 50 | 51 | def prob_2_entropy(prob): 52 | """ convert probabilistic prediction maps to weighted self-information maps 53 | """ 54 | n, c, h, w = prob.size() 55 | return -torch.mul(prob, torch.log2(prob + 1e-30)) / np.log2(c) 56 | 57 | def inference_results(pred, previous, mode='lucchi'): 58 | pred = torch.argmax(pred, dim=1).float() 59 | pred = pred.data.cpu().numpy() 60 | pred = np.squeeze(pred) 61 | if mode == 'lucchi': 62 | pred = pred[176:-176, 48:-48] 63 | 64 | temp_cpred = previous.copy() 65 | if np.sum(previous) == 0: 66 | temp_cpred += pred.astype(np.uint8) 67 | else: 68 | temp_cpred += pred.astype(np.uint8) 69 | temp_cpred = temp_cpred.astype(np.float32) / 2.0 70 | temp_cpred[temp_cpred >= 0.5] = 1 71 | temp_cpred[temp_cpred < 0.5] = 0 72 | temp_cpred = temp_cpred.astype(np.uint8) 73 | return temp_cpred 74 | 75 | def inference_results2(pred, previous, mode='lucchi'): 76 | # pred = torch.argmax(pred, dim=1).float() 77 | pred = torch.nn.functional.softmax(pred, dim=1) 78 | pred = pred[:, 1] 79 | pred = pred.data.cpu().numpy() 80 | pred = np.squeeze(pred) 81 | if mode == 'lucchi': 82 | pred = pred[176:-176, 48:-48] 83 | 84 | temp_cpred = previous.copy() 85 | if np.sum(previous) == 0: 86 | temp_cpred += pred 87 | else: 88 | temp_cpred += pred 89 | temp_cpred = temp_cpred / 2.0 90 | temp_cpred[temp_cpred<0] = 0 91 | temp_cpred[temp_cpred>1] = 1 92 | return temp_cpred -------------------------------------------------------------------------------- /scripts/loss/loss.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Description: 3 | Author: weihuang 4 | Date: 2021-11-18 15:47:44 5 | LastEditors: weihuang 6 | LastEditTime: 2021-11-22 22:38:26 7 | ''' 8 | import torch 9 | import torch.nn.functional as F 10 | import torch.nn as nn 11 | from torch.autograd import Variable 12 | 13 | 14 | class CrossEntropy2d(nn.Module): 15 | def __init__(self, reduction="mean", ignore_label=255): 16 | super(CrossEntropy2d, self).__init__() 17 | self.reduction = reduction 18 | self.ignore_label = ignore_label 19 | 20 | def forward(self, predict, target, weight=None): 21 | """ 22 | Args: 23 | predict:(n, c, h, w) 24 | target:(n, h, w) 25 | weight (Tensor, optional): a manual rescaling weight given to each class. 26 | If given, has to be a Tensor of size "nclasses" 27 | """ 28 | assert not target.requires_grad 29 | assert predict.dim() == 4 30 | assert target.dim() == 3 31 | assert predict.size(0) == target.size(0), "{0} vs {1} ".format(predict.size(0), target.size(0)) 32 | assert predict.size(2) == target.size(1), "{0} vs {1} ".format(predict.size(2), target.size(1)) 33 | assert predict.size(3) == target.size(2), "{0} vs {1} ".format(predict.size(3), target.size(3)) 34 | n, c, h, w = predict.size() 35 | target_mask = (target >= 0) * (target != self.ignore_label) 36 | target = target[target_mask] 37 | if not target.data.dim(): 38 | return Variable(torch.zeros(1)) 39 | predict = predict.transpose(1, 2).transpose(2, 3).contiguous() 40 | predict = predict[target_mask.view(n, h, w, 1).repeat(1, 1, 1, c)].view(-1, c) 41 | loss = F.cross_entropy(predict, target, weight=weight, reduction=self.reduction) 42 | return loss 43 | 44 | class FocalLoss(nn.Module): 45 | def __init__(self, alpha=1, gamma=0, size_average=True): 46 | super(FocalLoss, self).__init__() 47 | self.alpha = alpha 48 | self.gamma = gamma 49 | self.size_average = size_average 50 | 51 | def forward(self, inputs, targets): 52 | ce_loss = F.cross_entropy(inputs, targets, reduction='none') 53 | pt = torch.exp(-ce_loss) 54 | focal_loss = self.alpha * (1-pt)**self.gamma * ce_loss 55 | if self.size_average: 56 | return focal_loss.mean() 57 | else: 58 | return focal_loss.sum() 59 | 60 | class MSELoss(nn.Module): 61 | def forward(self,input,target): 62 | return torch.mean((input-target)**2) 63 | 64 | class BCELoss(nn.Module): 65 | def forward(self, y_pred, y_label): 66 | y_truth_tensor = torch.FloatTensor(y_pred.size()) 67 | y_truth_tensor.fill_(y_label) 68 | y_truth_tensor = y_truth_tensor.to(y_pred.get_device()) 69 | return nn.BCEWithLogitsLoss()(y_pred, y_truth_tensor) 70 | 71 | class WeightedBCELoss(nn.Module): 72 | def forward(self, input_y, target, weight): 73 | return F.binary_cross_entropy(input_y, target, weight) 74 | 75 | class L1Loss_weighted(nn.Module): 76 | def forward(self, input, target, weights): 77 | loss = weights * torch.abs(input - target) 78 | loss = torch.mean(loss) 79 | return loss 80 | 81 | def weighted_l1_loss(input, target, weights): 82 | loss = weights * torch.abs(input - target) 83 | loss = torch.mean(loss) 84 | return loss 85 | 86 | def bce_loss(y_pred, y_label): 87 | y_truth_tensor = torch.FloatTensor(y_pred.size()) 88 | y_truth_tensor.fill_(y_label) 89 | y_truth_tensor = y_truth_tensor.to(y_pred.get_device()) 90 | return nn.BCEWithLogitsLoss()(y_pred, y_truth_tensor) -------------------------------------------------------------------------------- /scripts/model/discriminator_damtnet.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | import torch.nn.functional as F 3 | 4 | import torch.nn as nn 5 | import torch.nn.functional as F 6 | 7 | 8 | class labelDiscriminator(nn.Module): 9 | def __init__(self, num_classes, ndf=64): 10 | super(labelDiscriminator, self).__init__() 11 | # self.conv1 = nn.Conv2d(num_classes, ndf, kernel_size=3, padding=1) 12 | self.conv1 = nn.Conv2d(num_classes, ndf, kernel_size=4, stride=2,padding=1) 13 | self.conv2 = nn.Conv2d(ndf, ndf * 2, kernel_size=4, stride=2,padding=1) 14 | self.conv3 = nn.Conv2d(ndf * 2, ndf * 4, kernel_size=4, stride=2,padding=1) 15 | self.conv4 = nn.Conv2d(ndf * 4, ndf * 8, kernel_size=4, stride=2,padding=1) 16 | self.classifier = nn.Conv2d(ndf * 8, 1, kernel_size=4, stride=2,padding=1) 17 | self.gpN1 = nn.GroupNorm(num_groups=32, num_channels=num_classes, eps=1e-5, affine=False) 18 | self.gpN2 = nn.GroupNorm(num_groups=32, num_channels=num_classes, eps=1e-5, affine=False) 19 | self.gpN3 = nn.GroupNorm(num_groups=32, num_channels=num_classes, eps=1e-5, affine=False) 20 | self.gpN4 = nn.GroupNorm(num_groups=32, num_channels=num_classes, eps=1e-5, affine=False) 21 | self.leaky_relu = nn.LeakyReLU(negative_slope=0.2, inplace=True) 22 | 23 | def forward(self, x): 24 | x = self.conv1(x) 25 | x = self.gpN1(x) 26 | x = self.leaky_relu(x) 27 | 28 | # x = F.max_pool2d(x, kernel_size=2) 29 | 30 | x = self.conv2(x) 31 | x = self.gpN2(x) 32 | x = self.leaky_relu(x) 33 | 34 | # x = F.max_pool2d(x, kernel_size=2) 35 | 36 | x = self.conv3(x) 37 | x = self.gpN3(x) 38 | x = self.leaky_relu(x) 39 | 40 | # x = F.max_pool2d(x, kernel_size=2) 41 | 42 | x = self.conv4(x) 43 | x = self.gpN4(x) 44 | x = self.leaky_relu(x) 45 | 46 | # x = F.max_pool2d(x, kernel_size=2) 47 | 48 | x = self.classifier(x) 49 | 50 | # x = F.max_pool2d(x, kernel_size=2) 51 | 52 | return x 53 | 54 | 55 | class featureDiscriminator(nn.Module): 56 | def __init__(self, input_channels, input_size, num_classes, fc_classifier=3): 57 | super(featureDiscriminator, self).__init__() 58 | self.fc_classifier = fc_classifier # 全连接层 59 | self.fc_channels = [288, 144, 2] 60 | # self.fc_channels = [512, 256, 2] 61 | self.conv_channels = [48, 48, 48] 62 | # self.conv_channels = [input_channels//2,input_channels//4] 63 | self.input_size = input_size 64 | 65 | self.conv_features = nn.Sequential() 66 | self.fc_features = nn.Sequential() 67 | 68 | # convolutional layers 69 | in_channels = input_channels 70 | data_size = input_size 71 | for i, out_channels in enumerate(self.conv_channels): 72 | conv = nn.Sequential(nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1), 73 | nn.GroupNorm(num_groups=4, num_channels=out_channels, eps=0, affine=False), 74 | nn.ReLU()) 75 | self.conv_features.add_module('conv%d' % (i + 1), conv) 76 | in_channels = out_channels 77 | data_size = data_size // 2 78 | 79 | # full connections 80 | in_channels = self.conv_channels[-1] * data_size * data_size 81 | for i, out_channels in enumerate(self.fc_channels): 82 | if i == fc_classifier - 1: 83 | fc = nn.Sequential(nn.Linear(int(in_channels), out_channels)) 84 | else: 85 | fc = nn.Sequential(nn.Linear(int(in_channels), out_channels), 86 | nn.GroupNorm(num_groups=4, num_channels=out_channels, eps=0, affine=False), 87 | nn.ReLU()) 88 | self.fc_features.add_module('linear%d' % (i + 1), fc) 89 | in_channels = out_channels 90 | 91 | def forward(self, x): 92 | 93 | for i in range(len(self.conv_channels)): 94 | x = getattr(self.conv_features, 'conv%d' % (i + 1))(x) 95 | x = F.max_pool2d(x, kernel_size=2) 96 | 97 | x = x.view(x.size(0), -1) 98 | for i in range(self.fc_classifier): 99 | x = getattr(self.fc_features, 'linear%d' % (i + 1))(x) 100 | 101 | return x 102 | -------------------------------------------------------------------------------- /scripts/utils/metrics.py: -------------------------------------------------------------------------------- 1 | """Common image segmentation metrics. 2 | """ 3 | 4 | import torch 5 | import numpy as np 6 | from PIL import Image 7 | import matplotlib.pyplot as plt 8 | 9 | EPS = 1e-10 10 | 11 | 12 | def nanmean(x): 13 | """Computes the arithmetic mean ignoring any NaNs.""" 14 | return torch.mean(x[x == x]) 15 | 16 | 17 | def _fast_hist(true, pred, num_classes): 18 | # pred = pred.float() 19 | # true = true.float() 20 | mask = (true >= 0) & (true < num_classes) 21 | hist = torch.bincount( 22 | num_classes * true[mask] + pred[mask], 23 | minlength=num_classes ** 2, 24 | ).reshape(num_classes, num_classes).float() 25 | return hist 26 | 27 | 28 | def overall_pixel_accuracy(hist): 29 | """Computes the total pixel accuracy. 30 | 31 | The overall pixel accuracy provides an intuitive 32 | approximation for the qualitative perception of the 33 | label when it is viewed in its overall shape but not 34 | its details. 35 | 36 | Args: 37 | hist: confusion matrix. 38 | 39 | Returns: 40 | overall_acc: the overall pixel accuracy. 41 | """ 42 | correct = torch.diag(hist).sum() 43 | total = hist.sum() 44 | overall_acc = correct / (total + EPS) 45 | return overall_acc 46 | 47 | 48 | def per_class_pixel_accuracy(hist): 49 | """Computes the average per-class pixel accuracy. 50 | 51 | The per-class pixel accuracy is a more fine-grained 52 | version of the overall pixel accuracy. A model could 53 | score a relatively high overall pixel accuracy by 54 | correctly predicting the dominant labels or areas 55 | in the image whilst incorrectly predicting the 56 | possibly more important/rare labels. Such a model 57 | will score a low per-class pixel accuracy. 58 | 59 | Args: 60 | hist: confusion matrix. 61 | 62 | Returns: 63 | avg_per_class_acc: the average per-class pixel accuracy. 64 | """ 65 | correct_per_class = torch.diag(hist) 66 | total_per_class = hist.sum(dim=1) 67 | per_class_acc = correct_per_class / (total_per_class + EPS) 68 | avg_per_class_acc = nanmean(per_class_acc) 69 | return avg_per_class_acc 70 | 71 | class AverageMeter(object): 72 | def __init__(self): 73 | self.reset() 74 | 75 | def reset(self): 76 | self.val = 0 77 | self.avg = 0 78 | self.sum = 0 79 | self.count = 0 80 | 81 | def update(self, val, n=1): 82 | self.val = val 83 | self.sum += val * n 84 | self.count += n 85 | self.avg = self.sum / self.count 86 | 87 | 88 | def dice_coeff(pred, target): 89 | ims = [pred, target] 90 | np_ims = [] 91 | for item in ims: 92 | if 'str' in str(type(item)): 93 | item = np.array(Image.open(item)) 94 | elif 'PIL' in str(type(item)): 95 | item = np.array(item) 96 | elif 'torch' in str(type(item)): 97 | item = item.numpy() 98 | np_ims.append(item) 99 | 100 | pred = np_ims[0] 101 | target = np_ims[1] 102 | 103 | smooth = 0.000001 104 | 105 | m1 = pred.flatten() # Flatten 106 | m2 = target.flatten() # Flatten 107 | intersection = (m1 * m2).sum() 108 | intersection = np.float(intersection) 109 | 110 | bing = (np.uint8(m1) | np.uint8(m2)).sum() 111 | bing = bing.astype('float') 112 | jac = (intersection + smooth) / (bing + smooth) 113 | 114 | dice = (2. * intersection + smooth) / (m1.sum() + m2.sum() + smooth) 115 | 116 | return dice, jac 117 | 118 | 119 | def dice_coeff_checkforBatch(pred, target, batch_size): 120 | batch_dice = 0 121 | batch_jac = 0 122 | for index in range(batch_size): 123 | dice, jac = dice_coeff(pred[index, ...], target[index, ...]) 124 | batch_dice += dice 125 | batch_jac += jac 126 | return batch_dice / batch_size, batch_jac / batch_size 127 | 128 | 129 | if __name__ == "__main__": 130 | # t1 = torch.rand((5, 4, 2)) 131 | # t1 = t1 > 0.5 132 | # t2 = torch.rand((5, 4, 2)) 133 | # t2 = t2 > 0.5 134 | # t1 = t1.int() 135 | # t2 = t2.int() 136 | # overall_acc, avg_per_class_acc, avg_jacc, avg_dice = eval_metrics(t2, t1, 2) 137 | # print("acc:{0} , perclassacc:{1}, jcc:{2}, dice:{3}" .format(overall_acc, avg_per_class_acc, avg_jacc, avg_dice)) 138 | t1 = torch.rand((5, 4, 2)) 139 | t1 = t1 > 0.5 140 | t2 = torch.rand((5, 4, 2)) 141 | t2 = t2 > 0.5 142 | t1 = t1.int() 143 | t2 = t2.int() 144 | dice = dice_coeff(t1, t2) 145 | print(dice) 146 | -------------------------------------------------------------------------------- /scripts/utils/test_augmentation.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Description: 3 | Author: weihuang 4 | Date: 2021-11-17 09:38:38 5 | LastEditors: weihuang 6 | LastEditTime: 2021-11-17 10:37:47 7 | ''' 8 | 9 | import os 10 | import cv2 11 | import torch 12 | import numpy as np 13 | from torch.utils import data 14 | from torch.autograd import Variable 15 | from utils.metrics import dice_coeff 16 | from utils.postprocessing import postpre 17 | from dataset.target_dataset import targetDataSet_test 18 | from utils.tools_self import save_array_as_nii_volume 19 | from utils.show import save_prediction_image 20 | 21 | def test_model(model, valloader, save_dir, i_iter): 22 | device = torch.device('cuda:0') 23 | total_dice = 0 24 | total_jac = 0 25 | count = 0 26 | pred_total = [] 27 | original_msk_total = [] 28 | for i_pic, (images_v, masks_v, original_msk, _, name)in enumerate(valloader): 29 | stacked_img = torch.Tensor([]).to(device) 30 | for index in range(images_v.size()[1]): 31 | with torch.no_grad(): 32 | image_v = Variable(images_v[:, index, :, :].unsqueeze(0).to(device)) 33 | try: 34 | _, output = model(image_v) 35 | output = torch.argmax(output, dim=1).float() 36 | stacked_img = torch.cat((stacked_img, output)) 37 | except RuntimeError as e: 38 | if 'out of memory' in str(e): 39 | print('| WARNING: ran out of memory') 40 | if hasattr(torch.cuda, 'empty_cache'): 41 | torch.cuda.empty_cache() 42 | else: 43 | raise e 44 | pred, original_msk = save_prediction_image(stacked_img, name, i_iter, save_dir, original_msk) 45 | dim = pred.shape 46 | 47 | dice, jac = dice_coeff(pred, original_msk) 48 | count = count + 1 49 | 50 | total_dice = total_dice + dice 51 | total_jac = total_jac + jac 52 | 53 | print("%d. val_jac is:%f . val_dice is:%f " % (i_pic, jac, dice)) 54 | 55 | pred_total = np.append(pred_total, pred) 56 | original_msk_total = np.append(original_msk_total, original_msk) 57 | 58 | D3_dice, D3_jac = dice_coeff(pred_total, original_msk_total) 59 | D2_dice = total_dice / count 60 | D2_jac = total_jac / count 61 | print('3D dice: %4f' % D3_dice, '3D jac: %4f' % D3_jac, 62 | '2D dice: %4f' % D2_dice, '2D jac: %4f' % D2_jac) 63 | 64 | pred_total = pred_total.reshape(count, dim[0], dim[1]) 65 | original_msk_total = original_msk_total.reshape(count, dim[0], dim[1]) 66 | return pred_total, original_msk_total 67 | 68 | def test_augmentation(testmodel, pred_ori, input_size_target, cfg, save_dir): 69 | pred_final = pred_ori 70 | for test_aug in range(4): 71 | print('the %d test_aug' % test_aug, 'for %s' % save_dir) 72 | testloader = data.DataLoader( 73 | targetDataSet_test(cfg.DATA.data_dir_val, 74 | cfg.DATA.data_dir_val_label, 75 | cfg.DATA.data_list_val, 76 | test_aug, 77 | crop_size=input_size_target), 78 | batch_size=1, shuffle=False) 79 | 80 | pred_total, original_msk_total = test_model(testmodel, testloader, save_dir, 1000) 81 | pred_total = postpre(pred_total, save_dir, test_aug) 82 | D3_dice, D3_jac = dice_coeff(pred_total, original_msk_total) 83 | 84 | total_dice = 0 85 | total_jac = 0 86 | pics = pred_total.shape[0] 87 | for i in range(pics): 88 | pred = pred_total[i, :, :] 89 | msk = original_msk_total[i, :, :] 90 | dice, jac = dice_coeff(pred, msk) 91 | total_dice = total_dice + dice 92 | total_jac = total_jac + jac 93 | 94 | print('3D dice: %4f' % D3_dice, '3D jac: %4f' % D3_jac, 95 | '2D dice: %4f' % (total_dice / (pics)), '2D jac: %4f' % (total_jac / (pics))) 96 | 97 | if test_aug == 0: 98 | msk_final = original_msk_total 99 | if test_aug == 1: 100 | for i in range(pred_total.shape[0]): 101 | pred_total[i, :, :] = cv2.flip(pred_total[i, :, :], 1) 102 | if test_aug == 2: 103 | for i in range(pred_total.shape[0]): 104 | pred_total[i, :, :] = cv2.flip(pred_total[i, :, :], 0) 105 | if test_aug == 3: 106 | for i in range(pred_total.shape[0]): 107 | pred_total[i, :, :] = cv2.flip(pred_total[i, :, :], -1) 108 | 109 | pred_final = pred_final + pred_total 110 | 111 | pred_final = pred_final / 4 112 | pred_final[pred_final >= 0.5] = 1 113 | pred_final[pred_final < 0.5] = 0 114 | 115 | desired_path = save_dir + 'final' + '/' 116 | if not os.path.exists(desired_path): 117 | os.makedirs(desired_path) 118 | export_name = 'test.nii.gz' 119 | save_array_as_nii_volume(pred_final, desired_path + export_name) 120 | 121 | return pred_final, msk_final 122 | -------------------------------------------------------------------------------- /scripts/dataset/source_dataset.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Description: 3 | Author: weihuang 4 | Date: 2021-11-18 15:47:44 5 | LastEditors: weihuang 6 | LastEditTime: 2021-11-18 19:18:39 7 | ''' 8 | import os 9 | import sys 10 | import torch 11 | import random 12 | import numpy as np 13 | from PIL import Image 14 | import os.path as osp 15 | from random import randint 16 | from torch.utils import data 17 | from utils.pre_processing import normalization2, approximate_image, cropping 18 | from dataset.data_aug import aug_img_lab 19 | 20 | 21 | class sourceDataSet(data.Dataset): 22 | def __init__(self, root_img, root_label, list_path, crop_size=(512, 512), stride=1): 23 | self.root_img = root_img 24 | self.root_label = root_label 25 | self.list_path = list_path 26 | self.crop_size = crop_size 27 | self.stride = stride 28 | self.img_ids = [i_id.strip() for i_id in open(list_path)] 29 | self.length = len(self.img_ids) 30 | 31 | def __len__(self): 32 | # return int(sys.maxsize) 33 | return 400000 34 | 35 | def __getitem__(self, index): 36 | k = random.randint(0, len(self.img_ids)-1-self.stride) 37 | current_img = np.asarray(Image.open(osp.join(self.root_img, self.img_ids[k])), dtype=np.uint8) 38 | current_label = np.asarray(Image.open(osp.join(self.root_label, self.img_ids[k])), dtype=np.uint8) 39 | aux_img = np.asarray(Image.open(osp.join(self.root_img, self.img_ids[k+self.stride])), dtype=np.uint8) 40 | aux_label = np.asarray(Image.open(osp.join(self.root_label, self.img_ids[k+self.stride])), dtype=np.uint8) 41 | 42 | # data augmentation 43 | current_img = normalization2(current_img.astype(np.float32), max=1, min=0) 44 | aux_img = normalization2(aux_img.astype(np.float32), max=1, min=0) 45 | seed = np.random.randint(2147483647) 46 | random.seed(seed) 47 | current_img, current_label = aug_img_lab(current_img, current_label, self.crop_size) 48 | random.seed(seed) 49 | aux_img, aux_label = aug_img_lab(aux_img, aux_label, self.crop_size) 50 | current_label = approximate_image(current_label.copy()) 51 | aux_label = approximate_image(aux_label.copy()) 52 | 53 | # cropping image with the input size 54 | size = current_img.shape 55 | y_loc = randint(0, size[0] - self.crop_size[0]) 56 | x_loc = randint(0, size[1] - self.crop_size[1]) 57 | current_img = cropping(current_img, self.crop_size[0], self.crop_size[1], y_loc, x_loc) 58 | current_label = cropping(current_label, self.crop_size[0], self.crop_size[1], y_loc, x_loc) 59 | aux_img = cropping(aux_img, self.crop_size[0], self.crop_size[1], y_loc, x_loc) 60 | aux_label = cropping(aux_label, self.crop_size[0], self.crop_size[1], y_loc, x_loc) 61 | 62 | current_img = np.expand_dims(current_img, axis=0) # add additional dimension 63 | current_img = torch.from_numpy(current_img.astype(np.float32)).float() 64 | aux_img = np.expand_dims(aux_img, axis=0) # add additional dimension 65 | aux_img = torch.from_numpy(aux_img.astype(np.float32)).float() 66 | 67 | current_label = (current_label / 255).astype(np.bool) 68 | aux_label = (aux_label / 255).astype(np.bool) 69 | diff = np.bitwise_xor(current_label, aux_label) 70 | current_label = torch.from_numpy(current_label.astype(np.float32)).long() 71 | aux_label = torch.from_numpy(aux_label.astype(np.float32)).long() 72 | diff = torch.from_numpy(diff.astype(np.float32)).long() 73 | 74 | return current_img, current_label, aux_img, aux_label, diff 75 | 76 | 77 | if __name__ == '__main__': 78 | # data_dir_img = '../data/VNC3/training/' 79 | # data_dir_label = '../data/VNC3/training_groundtruth/' 80 | # data_list = '../data/VNC3/train.txt' 81 | data_dir_img = '../data/Lucchi/training' 82 | data_dir_label = '../data/Lucchi/training_groundtruth' 83 | data_list = '../data/Lucchi/train.txt' 84 | input_size = (512, 512) 85 | stride = 10 86 | dst = sourceDataSet(data_dir_img, 87 | data_dir_label, 88 | data_list, 89 | crop_size=input_size, 90 | stride=stride) 91 | 92 | out_path = './data_temp' 93 | if not osp.exists(out_path): 94 | os.makedirs(out_path) 95 | for i, data in enumerate(dst): 96 | if i < 50: 97 | print(i) 98 | current_img, current_label, aux_img, aux_label, diff = data 99 | current_img = (current_img.numpy() * 255).astype(np.uint8) 100 | current_label = (current_label.numpy() * 255).astype(np.uint8) 101 | current_img = current_img.squeeze() 102 | aux_img = (aux_img.numpy() * 255).astype(np.uint8) 103 | aux_label = (aux_label.numpy() * 255).astype(np.uint8) 104 | aux_img = aux_img.squeeze() 105 | diff = (diff.numpy() * 255).astype(np.uint8) 106 | concat1 = np.concatenate([current_img, aux_img, diff], axis=1) 107 | concat2 = np.concatenate([current_label, aux_label, diff], axis=1) 108 | concat = np.concatenate([concat1, concat2], axis=0) 109 | Image.fromarray(concat).save(osp.join(out_path, str(i).zfill(4)+'.png')) 110 | else: 111 | break 112 | print('Done') 113 | -------------------------------------------------------------------------------- /scripts/inference.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Description: 3 | Author: weihuang 4 | Date: 2021-11-16 21:17:31 5 | LastEditors: Please set LastEditors 6 | LastEditTime: 2023-01-13 21:14:37 7 | ''' 8 | 9 | import os 10 | import cv2 11 | import yaml 12 | import time 13 | import argparse 14 | import numpy as np 15 | from tqdm import tqdm 16 | from attrdict import AttrDict 17 | from collections import OrderedDict 18 | 19 | import torch 20 | import torch.nn as nn 21 | from torch.utils import data 22 | import torch.nn.functional as F 23 | 24 | from model.CoDetectionCNN import CoDetectionCNN 25 | from dataset.target_dataset import targetDataSet_val_twoimgs, Evaluation 26 | from utils.utils import inference_results, inference_results2 27 | from utils.show import show_test 28 | 29 | import warnings 30 | warnings.filterwarnings("ignore") 31 | 32 | if __name__ == '__main__': 33 | parser = argparse.ArgumentParser() 34 | parser.add_argument('-c', '--cfg', type=str, default='vnc2lucchi1', help='config file') 35 | parser.add_argument('-mn', '--model_name', type=str, default='vnc2lucchi1') 36 | parser.add_argument('-mm', '--mode_map', type=str, default='map') 37 | parser.add_argument('-sw', '--show', action='store_true', default=False) 38 | args = parser.parse_args() 39 | 40 | cfg_file = args.cfg + '.yaml' 41 | print('cfg_file: ' + cfg_file) 42 | with open('./config/' + cfg_file, 'r') as f: 43 | cfg = AttrDict(yaml.load(f, Loader=yaml.FullLoader)) 44 | 45 | trained_model = args.model_name 46 | out_path = os.path.join('../inference', trained_model) 47 | if not os.path.exists(out_path): 48 | os.makedirs(out_path) 49 | print('out_path: ' + out_path) 50 | seg_img_path = os.path.join(out_path, 'seg_img') 51 | if not os.path.exists(seg_img_path): 52 | os.makedirs(seg_img_path) 53 | 54 | device = torch.device('cuda:0') 55 | model = CoDetectionCNN(n_channels=cfg.MODEL.input_nc, 56 | n_classes=cfg.MODEL.output_nc).to(device) 57 | 58 | ckpt_path = os.path.join('../models', trained_model, 'model.ckpt') 59 | checkpoint = torch.load(ckpt_path) 60 | new_state_dict = OrderedDict() 61 | state_dict = checkpoint['model_weights'] 62 | for k, v in state_dict.items(): 63 | # name = k[7:] # remove module. 64 | name = k 65 | new_state_dict[name] = v 66 | model.load_state_dict(new_state_dict) 67 | model = model.to(device) 68 | model.eval() 69 | 70 | val_data = targetDataSet_val_twoimgs(cfg.DATA.data_dir_val, 71 | cfg.DATA.data_dir_val_label, 72 | cfg.DATA.data_list_val, 73 | crop_size=(cfg.DATA.input_size_target, cfg.DATA.input_size_target), 74 | stride=cfg.DATA.target_stride) 75 | valid_provider = torch.utils.data.DataLoader(val_data, 76 | batch_size=1, 77 | shuffle=False) 78 | 79 | target_evaluation = Evaluation(root_label=cfg.DATA.data_dir_val_label, 80 | list_path=cfg.DATA.data_list_val) 81 | print('Begin inference...') 82 | f_valid_txt = open(os.path.join(out_path, 'scores.txt'), 'w') 83 | target_stride = cfg.DATA.target_stride 84 | preds_int = np.zeros((165, 768, 1024), dtype=np.uint8) 85 | preds = np.zeros((165, 768, 1024), dtype=np.float32) 86 | t1 = time.time() 87 | for i_pic, (cimg, _, aimg, _, _) in enumerate(valid_provider): 88 | cimg = cimg.to(device) 89 | aimg = aimg.to(device) 90 | img_cat = torch.cat([cimg, aimg], dim=1) 91 | with torch.no_grad(): 92 | cpred, apred = model(img_cat, diff=False) 93 | preds_int[i_pic] = inference_results(cpred, preds_int[i_pic]) 94 | preds_int[i_pic+target_stride] = inference_results(apred, preds_int[i_pic+target_stride]) 95 | preds[i_pic] = inference_results2(cpred, preds[i_pic]) 96 | preds[i_pic+target_stride] = inference_results2(apred, preds[i_pic+target_stride]) 97 | t2 = time.time() 98 | print('Prediction time (s):', (t2 - t1)) 99 | 100 | if args.show: 101 | print('Show...') 102 | show_test(preds_int, target_evaluation.get_gt(), cfg.DATA.data_dir_val, seg_img_path) 103 | 104 | if args.mode_map == 'map': 105 | # mAP, F1, MCC, and IoU 106 | print('Measure on mAP, F1, MCC, and IoU...') 107 | t3 = time.time() 108 | mAP, F1, MCC, IoU = target_evaluation(preds, mode='map') 109 | t4 = time.time() 110 | print('mAP=%.4f, F1=%.4f, MCC=%.4f, IoU=%.4f' % (mAP, F1, MCC, IoU)) 111 | print('Measurement time (s):', (t4 - t3)) 112 | f_valid_txt.write('mAP=%.4f, F1=%.4f, MCC=%.4f, IoU=%.4f' % (mAP, F1, MCC, IoU)) 113 | f_valid_txt.write('\n') 114 | else: 115 | # dice and jac 116 | print('Measure on Dice and JAC...') 117 | mean_dice, mean_jac = target_evaluation(preds_int, mode='dice') 118 | print('dice=%.6f, jac=%.6f' % (mean_dice, mean_jac)) 119 | f_valid_txt.write('dice=%.6f, jac=%.6f' % (mean_dice, mean_jac)) 120 | f_valid_txt.write('\n') 121 | f_valid_txt.close() 122 | 123 | print('Done') -------------------------------------------------------------------------------- /scripts/inference_mito.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Description: 3 | Author: weihuang 4 | Date: 2021-11-16 21:17:31 5 | LastEditors: Please set LastEditors 6 | LastEditTime: 2023-01-13 21:51:31 7 | ''' 8 | 9 | import os 10 | import cv2 11 | import yaml 12 | import h5py 13 | import time 14 | import argparse 15 | import numpy as np 16 | from tqdm import tqdm 17 | from attrdict import AttrDict 18 | from collections import OrderedDict 19 | from PIL import Image 20 | 21 | import torch 22 | import torch.nn as nn 23 | from torch.utils import data 24 | import torch.nn.functional as F 25 | 26 | from model.CoDetectionCNN import CoDetectionCNN 27 | from dataset.target_dataset_mito import targetDataSet_test_twoimgs, Evaluation 28 | from utils.show import show_test 29 | 30 | import warnings 31 | warnings.filterwarnings("ignore") 32 | 33 | if __name__ == '__main__': 34 | parser = argparse.ArgumentParser() 35 | parser.add_argument('-c', '--cfg', type=str, default='mitor2h', help='config file') 36 | parser.add_argument('-mn', '--model_name', type=str, default='mitor2h') 37 | parser.add_argument('-mm', '--mode_map', type=str, default='map_2d') 38 | parser.add_argument('-sw', '--show', action='store_true', default=False) 39 | args = parser.parse_args() 40 | 41 | cfg_file = args.cfg + '.yaml' 42 | print('cfg_file: ' + cfg_file) 43 | with open('./config/' + cfg_file, 'r') as f: 44 | cfg = AttrDict(yaml.load(f, Loader=yaml.FullLoader)) 45 | 46 | trained_model = args.model_name 47 | out_path = os.path.join('../inference', trained_model) 48 | if not os.path.exists(out_path): 49 | os.makedirs(out_path) 50 | print('out_path: ' + out_path) 51 | seg_img_path = os.path.join(out_path, 'seg_img') 52 | if not os.path.exists(seg_img_path): 53 | os.makedirs(seg_img_path) 54 | 55 | device = torch.device('cuda:0') 56 | model = CoDetectionCNN(n_channels=cfg.MODEL.input_nc, 57 | n_classes=cfg.MODEL.output_nc).to(device) 58 | 59 | ckpt_path = os.path.join('../models', trained_model, 'model.ckpt') 60 | checkpoint = torch.load(ckpt_path) 61 | new_state_dict = OrderedDict() 62 | state_dict = checkpoint['model_weights'] 63 | for k, v in state_dict.items(): 64 | # name = k[7:] # remove module. 65 | name = k 66 | new_state_dict[name] = v 67 | model.load_state_dict(new_state_dict) 68 | model = model.to(device) 69 | model.eval() 70 | 71 | val_data = targetDataSet_test_twoimgs(cfg.DATA.data_dir_val, 72 | cfg.DATA.data_dir_val_label, 73 | cfg.DATA.data_list_val, 74 | crop_size=(cfg.DATA.input_size_test, cfg.DATA.input_size_test), 75 | stride=cfg.DATA.target_stride) 76 | valid_provider = torch.utils.data.DataLoader(val_data, batch_size=1) 77 | 78 | target_evaluation = Evaluation(root_label=cfg.DATA.data_dir_val_label, 79 | list_path=cfg.DATA.data_list_val) 80 | print('Begin inference...') 81 | f_valid_txt = open(os.path.join(out_path, 'scores.txt'), 'w') 82 | print('the number of sub-volume:', len(val_data)) 83 | t1 = time.time() 84 | pbar = tqdm(total=len(val_data)) 85 | for k, data in enumerate(valid_provider, 0): 86 | cimg, aimg = data 87 | cimg = cimg.to(device) 88 | aimg = aimg.to(device) 89 | img_cat = torch.cat([cimg, aimg], dim=1) 90 | with torch.no_grad(): 91 | cpred, apred = model(img_cat, diff=False) 92 | cpred = torch.nn.functional.softmax(cpred, dim=1) 93 | cpred = cpred[:, 1] 94 | apred = torch.nn.functional.softmax(apred, dim=1) 95 | apred = apred[:, 1] 96 | cpred = np.squeeze(cpred.data.cpu().numpy()) 97 | apred = np.squeeze(apred.data.cpu().numpy()) 98 | val_data.add_vol(cpred, apred) 99 | pbar.update(1) 100 | pbar.close() 101 | preds = val_data.get_results() 102 | t2 = time.time() 103 | print('Prediction time (s):', (t2 - t1)) 104 | 105 | f_out = h5py.File(os.path.join(out_path, 'preds.hdf'), 'w') 106 | f_out.create_dataset('main', data=preds, dtype=np.float32, compression='gzip') 107 | f_out.close() 108 | 109 | if args.show: 110 | print('Show...') 111 | preds_int = preds.copy() 112 | preds_int[preds_int>=0.5] = 1 113 | preds_int[preds_int<0.5] = 0 114 | # show_test(preds_int, target_evaluation.get_gt(), cfg.DATA.data_dir_val, seg_img_path) 115 | for k in range(preds_int.shape[0]): 116 | temp = preds_int[k] 117 | temp = (temp * 255).astype(np.uint8) 118 | Image.fromarray(temp).save(os.path.join(seg_img_path, str(k).zfill(4)+'.png')) 119 | del preds_int 120 | 121 | # mAP, F1, MCC, and IoU 122 | print('Measure on mAP, F1, MCC, and IoU...') 123 | t3 = time.time() 124 | mAP, F1, MCC, IoU = target_evaluation(preds, mode=args.mode_map) 125 | t4 = time.time() 126 | print('mAP=%.4f, F1=%.4f, MCC=%.4f, IoU=%.4f' % (mAP, F1, MCC, IoU)) 127 | print('Measurement time (s):', (t4 - t3)) 128 | f_valid_txt.write('mAP=%.4f, F1=%.4f, MCC=%.4f, IoU=%.4f' % (mAP, F1, MCC, IoU)) 129 | f_valid_txt.write('\n') 130 | f_valid_txt.close() 131 | 132 | print('Done') -------------------------------------------------------------------------------- /scripts/dataset/source_dataset_affinity.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Descripttion: 3 | version: 0.0 4 | Author: Wei Huang 5 | Date: 2022-01-07 23:24:27 6 | ''' 7 | import os 8 | import sys 9 | import torch 10 | import random 11 | import numpy as np 12 | from PIL import Image 13 | import os.path as osp 14 | from random import randint 15 | from torch.utils import data 16 | from utils.pre_processing import normalization2, approximate_image, cropping 17 | from dataset.data_aug import aug_img_lab 18 | from utils.affinity import gen_affs 19 | 20 | 21 | class sourceDataSet(data.Dataset): 22 | def __init__(self, root_img, root_label, list_path, crop_size=(512, 512), stride=1): 23 | self.root_img = root_img 24 | self.root_label = root_label 25 | self.list_path = list_path 26 | self.crop_size = crop_size 27 | self.stride = stride 28 | self.img_ids = [i_id.strip() for i_id in open(list_path)] 29 | self.length = len(self.img_ids) 30 | 31 | def __len__(self): 32 | # return int(sys.maxsize) 33 | return 400000 34 | 35 | def __getitem__(self, index): 36 | k = random.randint(0, len(self.img_ids)-1-self.stride) 37 | current_img = np.asarray(Image.open(osp.join(self.root_img, self.img_ids[k])), dtype=np.uint8) 38 | current_label = np.asarray(Image.open(osp.join(self.root_label, self.img_ids[k])), dtype=np.uint8) 39 | aux_img = np.asarray(Image.open(osp.join(self.root_img, self.img_ids[k+self.stride])), dtype=np.uint8) 40 | aux_label = np.asarray(Image.open(osp.join(self.root_label, self.img_ids[k+self.stride])), dtype=np.uint8) 41 | 42 | # data augmentation 43 | current_img = normalization2(current_img.astype(np.float32), max=1, min=0) 44 | aux_img = normalization2(aux_img.astype(np.float32), max=1, min=0) 45 | seed = np.random.randint(2147483647) 46 | random.seed(seed) 47 | current_img, current_label = aug_img_lab(current_img, current_label, self.crop_size) 48 | random.seed(seed) 49 | aux_img, aux_label = aug_img_lab(aux_img, aux_label, self.crop_size) 50 | current_label = approximate_image(current_label.copy()) 51 | aux_label = approximate_image(aux_label.copy()) 52 | 53 | # cropping image with the input size 54 | size = current_img.shape 55 | y_loc = randint(0, size[0] - self.crop_size[0]) 56 | x_loc = randint(0, size[1] - self.crop_size[1]) 57 | current_img = cropping(current_img, self.crop_size[0], self.crop_size[1], y_loc, x_loc) 58 | current_label = cropping(current_label, self.crop_size[0], self.crop_size[1], y_loc, x_loc) 59 | aux_img = cropping(aux_img, self.crop_size[0], self.crop_size[1], y_loc, x_loc) 60 | aux_label = cropping(aux_label, self.crop_size[0], self.crop_size[1], y_loc, x_loc) 61 | 62 | current_img = np.expand_dims(current_img, axis=0) # add additional dimension 63 | current_img = torch.from_numpy(current_img.astype(np.float32)).float() 64 | aux_img = np.expand_dims(aux_img, axis=0) # add additional dimension 65 | aux_img = torch.from_numpy(aux_img.astype(np.float32)).float() 66 | 67 | # current_label = (current_label / 255).astype(np.bool) 68 | # aux_label = (aux_label / 255).astype(np.bool) 69 | # diff = np.bitwise_xor(current_label, aux_label) 70 | current_label = (current_label / 255).astype(np.float32) 71 | aux_label = (aux_label / 255).astype(np.float32) 72 | affs = gen_affs(current_label, aux_label, shifts=[0,1,3,5,9]) 73 | current_label = torch.from_numpy(current_label.astype(np.float32)).long() 74 | aux_label = torch.from_numpy(aux_label.astype(np.float32)).long() 75 | affs = torch.from_numpy(affs.astype(np.float32)).long() 76 | 77 | return current_img, current_label, aux_img, aux_label, affs 78 | 79 | 80 | if __name__ == '__main__': 81 | # data_dir_img = '../data/VNC3/training/' 82 | # data_dir_label = '../data/VNC3/training_groundtruth/' 83 | # data_list = '../data/VNC3/train.txt' 84 | data_dir_img = '../data/Lucchi/training' 85 | data_dir_label = '../data/Lucchi/training_groundtruth' 86 | data_list = '../data/Lucchi/train.txt' 87 | input_size = (512, 512) 88 | stride = 10 89 | dst = sourceDataSet(data_dir_img, 90 | data_dir_label, 91 | data_list, 92 | crop_size=input_size, 93 | stride=stride) 94 | 95 | out_path = './data_temp' 96 | if not osp.exists(out_path): 97 | os.makedirs(out_path) 98 | for i, data in enumerate(dst): 99 | if i < 50: 100 | print(i) 101 | current_img, current_label, aux_img, aux_label, diff = data 102 | current_img = (current_img.numpy() * 255).astype(np.uint8) 103 | current_label = (current_label.numpy() * 255).astype(np.uint8) 104 | current_img = current_img.squeeze() 105 | aux_img = (aux_img.numpy() * 255).astype(np.uint8) 106 | aux_label = (aux_label.numpy() * 255).astype(np.uint8) 107 | aux_img = aux_img.squeeze() 108 | diff = (diff.numpy() * 255).astype(np.uint8) 109 | concat1 = np.concatenate([current_img, aux_img, diff], axis=1) 110 | concat2 = np.concatenate([current_label, aux_label, diff], axis=1) 111 | concat = np.concatenate([concat1, concat2], axis=0) 112 | Image.fromarray(concat).save(osp.join(out_path, str(i).zfill(4)+'.png')) 113 | else: 114 | break 115 | print('Done') 116 | -------------------------------------------------------------------------------- /scripts/utils/postprocessing.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | import os 4 | from utils.tools_self import save_array_as_nii_volume 5 | from skimage import measure 6 | from PIL import Image 7 | import matplotlib.pyplot as plt 8 | import numpy as np 9 | from skimage import morphology 10 | 11 | def postpre_single(img, test_aug=0): 12 | if test_aug == 5: 13 | img_fill = fillHole_single(img) 14 | img_open = openimg_single(img_fill, k1_size=10, k2_size=10) 15 | img_out = remove_small_object_single(img_open, area=600) 16 | else: 17 | img_fill = fillHole_single(img) 18 | img_open = openimg_single(img_fill, k1_size=15, k2_size=15) 19 | img_out = remove_small_object_single(img_open, area=400) 20 | return img_out 21 | 22 | def fillHole_single(img): 23 | img = (img*255).astype(np.uint8) 24 | 25 | mask = 255 - img 26 | marker = np.zeros_like(img) 27 | marker[0,:] = 255 28 | marker[-1,:] = 255 29 | marker[:,0] = 255 30 | marker[:,-1] = 255 31 | 32 | SE = cv2.getStructuringElement(shape = cv2.MORPH_CROSS, ksize = (3,3)) 33 | while True: 34 | marker_pre = marker 35 | dilation = cv2.dilate(marker, kernel = SE) 36 | marker = np.min((dilation,mask), axis = 0) 37 | if (marker_pre == marker).all(): 38 | break 39 | dst = 255 - marker 40 | dst = dst / 255.0 41 | return dst 42 | 43 | def openimg_single(img, k1_size, k2_size): 44 | k1_erode = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (k1_size, k1_size)) 45 | k2_dilate = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (k2_size, k2_size)) 46 | 47 | img_eroded = cv2.erode(img, k1_erode) 48 | img_out = cv2.dilate(img_eroded, k2_dilate) 49 | return img_out 50 | 51 | def remove_small_object_single(img, area): 52 | img = (img).astype(np.int) 53 | labels = measure.label(img, connectivity=1) 54 | img_out = morphology.remove_small_objects(labels, min_size=area, connectivity=1, in_place=False) 55 | img_out[img_out>0]=1 56 | img_out = img_out.astype('uint8') 57 | return img_out 58 | 59 | def postpre(img,save_dir,test_aug): 60 | 61 | # test: testing subset parameter 62 | if test_aug == 5: 63 | img_fill = fillHole(img,save_dir,test_aug) 64 | 65 | img_open = openimg(img_fill,save_dir,test_aug,k1_size=10,k2_size=10) 66 | 67 | img_out = remove_small_object(img_open,save_dir,test_aug,area = 600) 68 | 69 | else: 70 | img_fill = fillHole(img,save_dir,test_aug) 71 | 72 | img_open = openimg(img_fill,save_dir,test_aug,k1_size=15,k2_size=15) 73 | 74 | img_out = remove_small_object(img_open,save_dir,test_aug,area = 400) 75 | # test: training subset parameter 76 | # if test_aug == 5: 77 | # img_fill = fillHole(img, save_dir, test_aug) 78 | # 79 | # img_open = openimg(img_fill, save_dir, test_aug, k1_size=20, k2_size=20) 80 | # 81 | # img_out = remove_small_object(img_open, save_dir, test_aug, area=600) 82 | # 83 | # else: 84 | # img_fill = fillHole(img, save_dir, test_aug) 85 | # 86 | # img_open = openimg(img_fill, save_dir, test_aug, k1_size=15, k2_size=15) 87 | # 88 | # img_out = remove_small_object(img_open, save_dir, test_aug, area=600) 89 | 90 | # organize images in every epoch 91 | desired_path = save_dir + '/_postpre'+'_'+str(test_aug)+'/' 92 | # Create the path if it does not exist 93 | if not os.path.exists(desired_path): 94 | os.makedirs(desired_path) 95 | 96 | export_name = 'test.nii.gz' 97 | save_array_as_nii_volume(img_out, desired_path + export_name) 98 | 99 | return img_out 100 | 101 | def fillHole(img_arr,save_dir,test_aug): 102 | 103 | num = 0 104 | 105 | img_arr = (img_arr*255).astype(np.uint8) 106 | img_arr_out = img_arr.copy() 107 | 108 | size = img_arr.shape 109 | 110 | for i in range(size[0]): 111 | img = img_arr[i,:,:] 112 | 113 | mask = 255 - img 114 | 115 | marker = np.zeros_like(img) 116 | marker[0,:] = 255 117 | marker[-1,:] = 255 118 | marker[:,0] = 255 119 | marker[:,-1] = 255 120 | 121 | marker_0 = marker.copy() 122 | 123 | SE = cv2.getStructuringElement(shape = cv2.MORPH_CROSS,ksize = (3,3)) 124 | while True: 125 | marker_pre = marker 126 | dilation = cv2.dilate(marker,kernel = SE) 127 | marker = np.min((dilation,mask),axis = 0) 128 | if (marker_pre == marker).all(): 129 | break 130 | dst = 255-marker 131 | 132 | img_arr_out[i,:,:] = dst/255 133 | 134 | return img_arr_out 135 | 136 | def openimg(img,save_dir,test_aug,k1_size,k2_size): 137 | 138 | k1_erode = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (k1_size, k1_size)) 139 | k2_dilate = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (k2_size, k2_size)) 140 | 141 | size = img.shape 142 | img_arr = img 143 | img_arr_out = img.copy() 144 | 145 | for i in range(size[0]): 146 | pics = img_arr[i, :, :] 147 | 148 | img_eroded = cv2.erode(pics,k1_erode) 149 | img_out = cv2.dilate(img_eroded,k2_dilate) 150 | 151 | img_arr_out[i, :, :] = img_out 152 | 153 | return img_arr_out 154 | 155 | def remove_small_object(img,save_dir,test_aug,area): 156 | 157 | img = (img).astype(np.int) 158 | labels = measure.label(img,connectivity=1) 159 | # print(np.unique(labels)) 160 | label_att = measure.regionprops(labels) 161 | # for i in range(len(label_att)): 162 | # print(label_att[i].area) 163 | img_out = morphology.remove_small_objects(labels, min_size=area, connectivity=1, in_place=False) 164 | img_out[img_out>0]=1 165 | img_out = img_out.astype('uint8') 166 | 167 | return img_out 168 | 169 | 170 | 171 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | absl-py==0.9.0 2 | alabaster==0.7.12 3 | albumentations==1.3.0 4 | anaconda-client==1.7.2 5 | anaconda-navigator==1.9.7 6 | anaconda-project==0.8.3 7 | asn1crypto==1.0.1 8 | astor==0.8.1 9 | astroid==2.3.1 10 | astropy==3.2.2 11 | atomicwrites==1.3.0 12 | attrdict==2.0.1 13 | attrs==19.2.0 14 | Babel==2.7.0 15 | backcall==0.1.0 16 | backports.functools-lru-cache==1.5 17 | backports.os==0.1.1 18 | backports.shutil-get-terminal-size==1.0.0 19 | backports.tempfile==1.0 20 | backports.weakref==1.0.post1 21 | beautifulsoup4==4.8.0 22 | bitarray==1.0.1 23 | bkcharts==0.2 24 | bleach==3.1.0 25 | bokeh==1.3.4 26 | boto==2.49.0 27 | Bottleneck==1.2.1 28 | cachetools==4.0.0 29 | certifi==2020.12.5 30 | cffi==1.12.3 31 | chardet==3.0.4 32 | Click==7.0 33 | cloudpickle==1.2.2 34 | cluster-tools==0.2.1 35 | clyent==1.2.2 36 | colorama==0.4.1 37 | conda==4.9.2 38 | conda-build==3.18.9 39 | conda-package-handling==1.6.0 40 | conda-verify==3.4.2 41 | contextlib2==0.6.0 42 | cryptography==2.7 43 | cycler==0.10.0 44 | Cython==0.29.16 45 | cytoolz==0.10.0 46 | dask==2.5.2 47 | decorator==4.4.0 48 | defusedxml==0.6.0 49 | distributed==2.5.2 50 | docutils==0.15.2 51 | elf==0.2.2 52 | entrypoints==0.3 53 | et-xmlfile==1.0.1 54 | fastcache==1.1.0 55 | filelock==3.0.12 56 | Flask==1.1.1 57 | fsspec==0.5.2 58 | future==0.17.1 59 | gast==0.2.2 60 | gevent==1.4.0 61 | glob2==0.7 62 | gmpy2==2.0.8 63 | google-auth==1.11.3 64 | google-auth-oauthlib==0.4.1 65 | google-pasta==0.2.0 66 | greenlet==0.4.15 67 | grpcio==1.27.2 68 | h5py==2.9.0 69 | HeapDict==1.0.1 70 | html5lib==1.0.1 71 | idna==2.8 72 | imagecodecs==2020.2.18 73 | imageio==2.6.0 74 | imagesize==1.1.0 75 | importlib-metadata==0.23 76 | ipykernel==5.1.2 77 | ipython==7.8.0 78 | ipython-genutils==0.2.0 79 | ipywidgets==7.5.1 80 | isort==4.3.21 81 | itsdangerous==1.1.0 82 | jdcal==1.4.1 83 | jedi==0.15.1 84 | jeepney==0.4.1 85 | Jinja2==2.10.3 86 | joblib==0.13.2 87 | json5==0.8.5 88 | jsonschema==3.0.2 89 | jupyter==1.0.0 90 | jupyter-client==5.3.3 91 | jupyter-console==6.0.0 92 | jupyter-core==4.5.0 93 | jupyterlab==1.1.4 94 | jupyterlab-server==1.0.6 95 | Keras==2.3.1 96 | Keras-Applications==1.0.8 97 | Keras-Preprocessing==1.1.0 98 | keyring==18.0.0 99 | kiwisolver==1.1.0 100 | lazy-object-proxy==1.4.2 101 | libarchive-c==2.8 102 | libtiff==0.4.2 103 | lief==0.9.0 104 | llvmlite==0.29.0 105 | locket==0.2.0 106 | lockfile==0.12.2 107 | luigi==3.0.2 108 | lxml==4.4.1 109 | mahotas==1.4.9 110 | Markdown==3.2.1 111 | MarkupSafe==1.1.1 112 | matplotlib==3.1.1 113 | mccabe==0.6.1 114 | mistune==0.8.4 115 | mkl-fft==1.0.14 116 | mkl-random==1.1.0 117 | mkl-service==2.3.0 118 | mock==3.0.5 119 | more-itertools==7.2.0 120 | mpmath==1.1.0 121 | msgpack==0.6.1 122 | multipledispatch==0.6.0 123 | navigator-updater==0.2.1 124 | nbconvert==5.6.0 125 | nbformat==4.4.0 126 | ndflow==0.1.0 127 | networkx==2.3 128 | nltk==3.4.5 129 | nose==1.3.7 130 | notebook==6.0.1 131 | numba==0.45.1 132 | numexpr==2.7.0 133 | numpy==1.17.2 134 | numpydoc==0.9.1 135 | oauthlib==3.1.0 136 | olefile==0.46 137 | opencv-python==4.7.0.68 138 | opencv-python-headless==4.7.0.68 139 | openpyxl==3.0.0 140 | opt-einsum==3.2.0 141 | packaging==19.2 142 | pandas==0.25.1 143 | pandocfilters==1.4.2 144 | parso==0.5.1 145 | partd==1.0.0 146 | path.py==12.0.1 147 | pathlib2==2.3.5 148 | patsy==0.5.1 149 | pep8==1.7.1 150 | pexpect==4.7.0 151 | pickleshare==0.7.5 152 | Pillow==6.2.0 153 | pkginfo==1.5.0.1 154 | pluggy==0.13.0 155 | ply==3.11 156 | prometheus-client==0.5.0 157 | prompt-toolkit==2.0.10 158 | protobuf==3.11.3 159 | psutil==5.6.3 160 | ptyprocess==0.6.0 161 | py==1.8.0 162 | pyasn1==0.4.8 163 | pyasn1-modules==0.2.8 164 | pycodestyle==2.5.0 165 | pycosat==0.6.3 166 | pycparser==2.19 167 | pycrypto==2.6.1 168 | pycurl==7.43.0.3 169 | pyflakes==2.1.1 170 | Pygments==2.4.2 171 | pylint==2.4.2 172 | pyodbc==4.0.27 173 | pyOpenSSL==19.0.0 174 | pyparsing==2.4.2 175 | pyrsistent==0.15.4 176 | PySocks==1.7.1 177 | pytest==5.4.3 178 | pytest-arraydiff==0.3 179 | pytest-astropy==0.5.0 180 | pytest-doctestplus==0.4.0 181 | pytest-openfiles==0.4.0 182 | pytest-remotedata==0.3.2 183 | python-daemon==2.2.4 184 | python-dateutil==2.8.0 185 | pytz==2019.3 186 | PyWavelets==1.1.1 187 | PyYAML==5.1.2 188 | pyzmq==18.1.0 189 | QtAwesome==0.6.0 190 | qtconsole==4.5.5 191 | QtPy==1.9.0 192 | qudida==0.0.4 193 | requests==2.22.0 194 | requests-oauthlib==1.3.0 195 | rope==0.14.0 196 | rsa==4.0 197 | ruamel-yaml==0.15.46 198 | scikit-image==0.18.1 199 | scikit-learn==0.21.3 200 | scipy==1.3.1 201 | seaborn==0.9.0 202 | SecretStorage==3.1.1 203 | Send2Trash==1.5.0 204 | simplegeneric==0.8.1 205 | SimpleITK==1.2.4 206 | singledispatch==3.4.0.3 207 | six==1.12.0 208 | skan==0.8 209 | snowballstemmer==2.0.0 210 | sortedcollections==1.1.2 211 | sortedcontainers==2.1.0 212 | soupsieve==1.9.3 213 | Sphinx==2.2.0 214 | sphinxcontrib-applehelp==1.0.1 215 | sphinxcontrib-devhelp==1.0.1 216 | sphinxcontrib-htmlhelp==1.0.2 217 | sphinxcontrib-jsmath==1.0.1 218 | sphinxcontrib-qthelp==1.0.2 219 | sphinxcontrib-serializinghtml==1.1.3 220 | sphinxcontrib-websupport==1.1.2 221 | spyder==3.3.6 222 | spyder-kernels==0.5.2 223 | SQLAlchemy==1.3.9 224 | statsmodels==0.10.1 225 | sympy==1.4 226 | tables==3.5.2 227 | tblib==1.4.0 228 | tensorboard==2.0.2 229 | tensorboardX==2.0 230 | tensorflow-estimator==2.0.1 231 | tensorflow-gpu==2.0.0 232 | termcolor==1.1.0 233 | terminado==0.8.2 234 | testpath==0.4.2 235 | tifffile==2020.2.16 236 | toml==0.10.2 237 | toolz==0.10.0 238 | torch==1.0.1 239 | torchvision==0.2.2 240 | tornado==6.0.3 241 | tqdm==4.19.9 242 | traitlets==4.3.3 243 | typing-extensions==4.4.0 244 | unicodecsv==0.14.1 245 | urllib3==1.24.2 246 | wcwidth==0.1.7 247 | webencodings==0.5.1 248 | Werkzeug==0.16.0 249 | widgetsnbextension==3.5.1 250 | wrapt==1.11.2 251 | wurlitzer==1.0.3 252 | xlrd==1.2.0 253 | XlsxWriter==1.2.1 254 | xlwt==1.3.0 255 | zict==1.0.0 256 | zipp==0.6.0 257 | -------------------------------------------------------------------------------- /scripts/utils/show.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Description: 3 | Author: weihuang 4 | Date: 2021-11-15 17:37:00 5 | LastEditors: Please set LastEditors 6 | LastEditTime: 2021-11-21 17:13:38 7 | ''' 8 | import os 9 | import cv2 10 | import math 11 | from numpy.lib.npyio import save 12 | import torch 13 | import numpy as np 14 | from PIL import Image 15 | 16 | from utils.pre_processing import division_array, image_concatenate 17 | 18 | def polarize(img): 19 | ''' Polarize the value to zero and one 20 | Args: 21 | img (numpy): numpy array of image to be polarized 22 | return: 23 | img (numpy): numpy array only with zero and one 24 | ''' 25 | img[img >= 0.5] = 1 26 | img[img < 0.5] = 0 27 | return img 28 | 29 | def convert2png(img): 30 | img = img.data.cpu().numpy() 31 | img = np.squeeze(img) 32 | img[img<0] = 0 33 | img[img>1] = 1 34 | img = (img * 255).astype(np.uint8) 35 | return img 36 | 37 | def show_training(iters, img, label, pred, save_path, tag='d'): 38 | img = convert2png(img) 39 | label = convert2png(label) 40 | pred = torch.argmax(pred, dim=0).float() 41 | pred = convert2png(pred) 42 | concat = np.concatenate([img, label, pred], axis=1) 43 | Image.fromarray(concat).save(os.path.join(save_path, str(iters).zfill(6)+'_%s.png' % tag)) 44 | 45 | def show_training_allresults(iters, 46 | cimg, 47 | clabel, 48 | cpred, 49 | aimg, 50 | alabel, 51 | apred, 52 | dlabel, 53 | dpred, 54 | ccross, 55 | across, 56 | save_path, 57 | tag='s'): 58 | cimg = convert2png(cimg) 59 | aimg = convert2png(aimg) 60 | clabel = convert2png(clabel) 61 | alabel = convert2png(alabel) 62 | dlabel = convert2png(dlabel) 63 | ccross = convert2png(ccross) 64 | across = convert2png(across) 65 | cpred = torch.argmax(cpred, dim=0).float() 66 | cpred = convert2png(cpred) 67 | apred = torch.argmax(apred, dim=0).float() 68 | apred = convert2png(apred) 69 | dpred = torch.argmax(dpred, dim=0).float() 70 | dpred = convert2png(dpred) 71 | concat1 = np.concatenate([cimg, clabel, cpred, dpred, ccross], axis=1) 72 | concat2 = np.concatenate([aimg, alabel, apred, dlabel, across], axis=1) 73 | concat = np.concatenate([concat1, concat2], axis=0) 74 | Image.fromarray(concat).save(os.path.join(save_path, str(iters).zfill(6)+'_%s.png' % tag)) 75 | 76 | def show_test(preds, labels, raw_path, save_path): 77 | num = labels.shape[0] 78 | for k in range(num): 79 | img = np.asarray(Image.open(os.path.join(raw_path, str(k).zfill(3)+'.png'))) 80 | pred = preds[k] 81 | label = labels[k] 82 | img = draw_label(img, pred, label) 83 | cv2.imwrite(os.path.join(save_path, str(k).zfill(3)+'.png'), img) 84 | 85 | def draw_label(img, pred, label): 86 | if img.max() <= 1: 87 | img = (img * 255).astype(np.uint8) 88 | if pred.max() <= 1: 89 | pred = (pred * 255).astype(np.uint8) 90 | else: 91 | pred = pred.astype(np.uint8) 92 | if label.max() <= 1: 93 | label = (label * 255).astype(np.uint8) 94 | else: 95 | label = label.astype(np.uint8) 96 | if len(img.shape) == 2: 97 | img = img[:,:,np.newaxis] 98 | img = np.repeat(img, 3, 2) 99 | contours_lb, _ = cv2.findContours(label, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) 100 | img = cv2.drawContours(img, contours_lb, -1, (0,0,255), 2) 101 | contours_pred, _ = cv2.findContours(pred, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) 102 | img = cv2.drawContours(img, contours_pred, -1, (0,255,0), 2) 103 | return img 104 | 105 | def save_prediction_image(stacked_img, im_name, iters, save_folder_name, original_msk): 106 | """save images to save_path 107 | Args: 108 | stacked_img (numpy): stacked cropped images 109 | save_folder_name (str): saving folder name 110 | division_array(388, 2, 3, 768, 1024): 111 | 388: label patch size 112 | 2, divide num in heigh 113 | 3, divide num in width 114 | 768: image height 115 | 1024: image width 116 | """ 117 | 118 | crop_size = stacked_img[0].size() 119 | 120 | maxsize = original_msk.shape[1:] 121 | 122 | output_shape = original_msk.shape[1:] 123 | crop_n1 = math.ceil(output_shape[0] / crop_size[0]) 124 | crop_n2 = math.ceil(output_shape[1] / crop_size[1]) 125 | if crop_n1 == 1: 126 | crop_n1 = crop_n1 127 | else: 128 | crop_n1 = crop_n1 + 1 129 | if crop_n2 == 1: 130 | crop_n2 = crop_n2 131 | else: 132 | crop_n2 = crop_n2 + 1 133 | 134 | div_arr = division_array(stacked_img.size(1), crop_n1, crop_n2, output_shape[0], output_shape[1]) 135 | img_cont = image_concatenate(stacked_img.cpu().data.numpy(), crop_n1, crop_n2, output_shape[0], output_shape[1]) 136 | 137 | img_cont = polarize((img_cont) / div_arr) 138 | img_cont_np = img_cont.astype('uint8') 139 | 140 | img_cont = Image.fromarray(img_cont_np * 255) 141 | # organize images in every epoch 142 | desired_path = os.path.join(save_folder_name, str(iters).zfill(6)) 143 | # desired_path = save_folder_name + '_iter_' + str(iter) + '/' 144 | # Create the path if it does not exist 145 | if not os.path.exists(desired_path): 146 | os.makedirs(desired_path) 147 | # Save Image! 148 | export_name = str(im_name) + '.png' 149 | # img_cont.save(desired_path + export_name) 150 | img_cont.save(os.path.join(desired_path, export_name)) 151 | return img_cont_np, original_msk 152 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Domain Adaptive Mitochondria Segmentation via Enforcing Inter-Section Consistency [[paper](https://link.springer.com/chapter/10.1007/978-3-031-16440-8_9)] 2 | 3 | **Accepted by MICCAI-2022** 4 | 5 | **Wei Huang**, Xiaoyu Liu, Zhen Cheng, Yueyi Zhang, and Zhiwei Xiong* 6 | 7 | University of Science and Technology of China (USTC), Hefei, China 8 | 9 | Institute of Artificial Intelligence, Hefei Comprehensive National Science Center, Hefei, China 10 | 11 | *Corresponding Author 12 | 13 | ## Abstract 14 | Deep learning-based methods for mitochondria segmentation require sufficient annotations on Electron Microscopy (EM) volumes, which are often expensive and time-consuming to collect. Recently, Unsupervised Domain Adaptation (UDA) has been proposed to avoid annotating on target EM volumes by exploiting annotated source EM volumes. However, existing UDA methods for mitochondria segmentation only address the intra-section gap between source and target volumes but ignore the inter-section gap between them, which restricts the generalization capability of the learned model on target volumes. In this paper, for the first time, we propose a domain adaptive mitochondria segmentation method via enforcing inter-section consistency. The key idea is to learn an inter-section residual on the segmentation results of adjacent sections using a CNN. The inter-section residuals predicted from source and target volumes are then aligned via adversarial learning. Meanwhile, guided by the learned inter-section residual, we can generate pseudo labels to supervise the segmentation of adjacent sections inside the target volume, which further enforces inter-section consistency. Extensive experiments demonstrate the superiority of our proposed method on four representative and diverse EM datasets. Code is available at https://github.com/weih527/DA-ISC. 15 | 16 | ## Framework and Network Architecture 17 | ![framework](./images/framework.png) 18 | 19 | ![network](./images/network.png) 20 | 21 | ## Environment 22 | This code was tested with Pytorch 1.0.1 (later versions may work), CUDA 9.0, Python 3.7.4 and Ubuntu 16.04. It is worth mentioning that, besides some commonly used image processing packages. 23 | 24 | If you have a [Docker](https://www.docker.com/) environment, we strongly recommend you to pull our image as follows, 25 | ```shell 26 | docker pull registry.cn-hangzhou.aliyuncs.com/renwu527/auto-emseg:v5.4 27 | ``` 28 | or 29 | ```shell 30 | docker pull renwu527/auto-emseg:v5.4 31 | ``` 32 | Besides, we need to instanll some python packages manually: 33 | ```shell 34 | pip install albumentations 35 | pip uninstall opencv-python # remove the old version 36 | pip install opencv-python 37 | ``` 38 | 39 | The entire installed python packages can be found in 'requirements.txt' 40 | 41 | ## Datasets 42 | ### Data Properties 43 | 44 | |Datasets|VNC III|Lucchi|MitoEM-R|MitoEM-H| 45 | |-|-|-|-|-| 46 | |Organism|Drosophila|Mouse|Rat|Human| 47 | |Tissue|Ventral nerve cord|Hippocampus|Cortex|Cortex| 48 | |Device|ssTEM|FIB-SEM|mbSEM|mbSEM| 49 | |Resolution|50x5x5 nm|5x5x5 nm|30x8x8 nm|30x8x8 nm| 50 | |Training set|20x1024x1024|165x768x1024|400x4096x4096|400x4096x4096| 51 | |Test set|None|165x768x1024|100x4096x4096|100x4096x4096| 52 | |Website|[GitHub](https://github.com/unidesigner/groundtruth-drosophila-vnc)|[EPFL](https://www.epfl.ch/labs/cvlab/data/data-em/)|[MitoEM](https://mitoem.grand-challenge.org/)|[MitoEM](https://mitoem.grand-challenge.org/)| 53 | 54 | **You can download our processed data directly from [GoogleDrive](https://drive.google.com/drive/folders/15oFhlWoBACOpyEUDx2-wbnUj3aIwYydv?usp=sharing) or [BaiduYun](https://pan.baidu.com/s/15BS0Sa_LPuyKxNJaAob4Eg) (Access code: weih). However, because the MitoEM dataset is too large (>10GB), we cannot put it in our cloud storage. It is recommended to download it from the official website.** 55 | 56 | ### Data Tree 57 | ```python 58 | |--./data 59 | | |--Lucchi 60 | | | |--testing 61 | | | |--testing_groundtruth 62 | | | |--training 63 | | | |--training_groundtruth 64 | | |--Mito 65 | | | |--human 66 | | | | |--testing.hdf 67 | | | | |--testing_groundtruth.hdf 68 | | | | |--training.hdf 69 | | | | |--training_groundtruth.hdf 70 | | | |--rat 71 | | | | |--testing.hdf 72 | | | | |--testing_groundtruth.hdf 73 | | | | |--training.hdf 74 | | | | |--training_groundtruth.hdf 75 | | |--VNC3 76 | | | |--training 77 | | | |--training_groundtruth 78 | ``` 79 | 80 | ## Training 81 | We train our method on one NVIDIA Tianxp GPU. 82 | Our training log files can be found in './logs'. 83 | 84 | ### VNC III --> Lucchi (Subset1) 85 | ```python 86 | cd scripts 87 | python main.py -c vnc2lucchi1 88 | ``` 89 | 90 | ### VNC III --> Lucchi (Subset2) 91 | ```python 92 | cd scripts 93 | python main.py -c vnc2lucchi2 94 | ``` 95 | 96 | ### MitoEM-R --> MitoEM-H 97 | ```python 98 | cd scripts 99 | python main_mito.py -c mitor2h 100 | ``` 101 | 102 | ### MitoEM-H --> MitoEM-R 103 | ```python 104 | cd scripts 105 | python main_mito.py -c mitoh2r 106 | ``` 107 | 108 | ## Inference 109 | We test our trained model on one NVIDIA Tianxp GPU. 110 | 111 | **We store our trained models at [GoogleDrive](https://drive.google.com/drive/folders/1OiL9-qcfsdncVJhvg0ELmTPqhQhneUmT?usp=sharing) or [BaiduYun](https://pan.baidu.com/s/1Jg8XVzcTvhrGnOXXMM4Alg) (Access code: weih)** 112 | 113 | ### VNC III --> Lucchi (Subset1) 114 | ```python 115 | cd scripts 116 | python inference.py -c vnc2lucchi1 -mn vnc2lucchi1 -sw 117 | ``` 118 | Print 119 | ```python 120 | cfg_file: vnc2lucchi1.yaml 121 | out_path: ../inference/vnc2lucchi1 122 | Begin inference... 123 | Prediction time (s): 138.40105080604553 124 | Measure on mAP, F1, MCC, and IoU... 125 | mAP=0.8948, F1=0.8129, MCC=0.8053, IoU=0.6865 126 | Measurement time (s): 917.6475455760956 127 | Done 128 | ``` 129 | ### VNC III --> Lucchi (Subset2) 130 | ```python 131 | cd scripts 132 | python inference.py -c vnc2lucchi2 -mn vnc2lucchi2 -sw 133 | ``` 134 | Print 135 | ```python 136 | cfg_file: vnc2lucchi2.yaml 137 | out_path: ../inference/vnc2lucchi2 138 | Begin inference... 139 | Prediction time (s): 144.69077563285828 140 | Measure on mAP, F1, MCC, and IoU... 141 | mAP=0.9244, F1=0.8518, MCC=0.8448, IoU=0.7431 142 | Measurement time (s): 912.5876989364624 143 | Done 144 | ``` 145 | 146 | ### MitoEM-R --> MitoEM-H 147 | **It needs large memory for quantitative measurement (>100GB)** 148 | 149 | ```python 150 | cd scripts 151 | python inference_mito.py -c mitor2h -mn mitor2h -sw 152 | ``` 153 | Print 154 | ```python 155 | cfg_file: mitor2h.yaml 156 | out_path: ../inference/mitor2h 157 | Load ../data/Mito/human/testing.hdf 158 | raw shape: (100, 4096, 4096) 159 | padded raw shape: (100, 4608, 4608) 160 | iters: 6336 161 | Load ../data/Mito/human/testing_groundtruth.hdf 162 | Begin inference... 163 | the number of sub-volume: 6336 164 | Prediction time (s): 1438.0159723758698 165 | Measure on mAP, F1, MCC, and IoU... 166 | mAP=0.9256, F1=0.8557, MCC=0.8495, IoU=0.7479 167 | Measurement time (s): 4127.024113416672 168 | Done 169 | ``` 170 | 171 | ### MitoEM-H --> MitoEM-R 172 | **It needs large memory for quantitative measurement (>100GB)** 173 | 174 | ```python 175 | cd scripts 176 | python inference_mito.py -c mitoh2r -mn mitoh2r -sw 177 | ``` 178 | Print 179 | ```python 180 | cfg_file: mitoh2r.yaml 181 | out_path: ../inference/mitoh2r 182 | Load ../data/Mito/rat/testing.hdf 183 | raw shape: (100, 4096, 4096) 184 | padded raw shape: (100, 4608, 4608) 185 | iters: 6336 186 | Load ../data/Mito/rat/testing_groundtruth.hdf 187 | Begin inference... 188 | the number of sub-volume: 6336 189 | Prediction time (s): 1441.9779460430145 190 | Measure on mAP, F1, MCC, and IoU... 191 | mAP=0.9682, F1=0.8851, MCC=0.8829, IoU=0.7941 192 | Measurement time (s): 4129.04722571373 193 | Done 194 | ``` 195 | 196 | ## Visual Results 197 | ![visual_results](./images/visual_results.png) 198 | 199 | ## Contact 200 | If you have any problem with the released code, please do not hesitate to contact me by email (weih527@mail.ustc.edu.cn). 201 | -------------------------------------------------------------------------------- /scripts/model/accel_deeplabv2.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | affine_par = True 5 | 6 | class Bottleneck(nn.Module): 7 | expansion = 4 8 | 9 | def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None): 10 | super(Bottleneck, self).__init__() 11 | # change 12 | self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, stride=stride, bias=False) 13 | self.bn1 = nn.BatchNorm2d(planes, affine=affine_par) 14 | for i in self.bn1.parameters(): 15 | i.requires_grad = False 16 | padding = dilation 17 | # change 18 | self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, 19 | padding=padding, bias=False, dilation=dilation) 20 | self.bn2 = nn.BatchNorm2d(planes, affine=affine_par) 21 | for i in self.bn2.parameters(): 22 | i.requires_grad = False 23 | self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False) 24 | self.bn3 = nn.BatchNorm2d(planes * 4, affine=affine_par) 25 | for i in self.bn3.parameters(): 26 | i.requires_grad = False 27 | self.relu = nn.ReLU(inplace=True) 28 | self.downsample = downsample 29 | self.stride = stride 30 | 31 | def forward(self, x): 32 | residual = x 33 | out = self.conv1(x) 34 | out = self.bn1(out) 35 | out = self.relu(out) 36 | out = self.conv2(out) 37 | out = self.bn2(out) 38 | out = self.relu(out) 39 | out = self.conv3(out) 40 | out = self.bn3(out) 41 | if self.downsample is not None: 42 | residual = self.downsample(x) 43 | out += residual 44 | out = self.relu(out) 45 | return out 46 | 47 | class ClassifierModule(nn.Module): 48 | def __init__(self, inplanes, dilation_series, padding_series, num_classes): 49 | super(ClassifierModule, self).__init__() 50 | self.conv2d_list = nn.ModuleList() 51 | for dilation, padding in zip(dilation_series, padding_series): 52 | self.conv2d_list.append( 53 | nn.Conv2d(inplanes, num_classes, kernel_size=3, stride=1, padding=padding, 54 | dilation=dilation, bias=True)) 55 | 56 | for m in self.conv2d_list: 57 | m.weight.data.normal_(0, 0.01) 58 | 59 | def forward(self, x): 60 | out = self.conv2d_list[0](x) 61 | for i in range(len(self.conv2d_list) - 1): 62 | out += self.conv2d_list[i + 1](x) 63 | return out 64 | 65 | class ResNetMulti(nn.Module): 66 | def __init__(self, block, layers, num_classes, multi_level): 67 | self.multi_level = multi_level 68 | self.inplanes = 64 69 | super(ResNetMulti, self).__init__() 70 | self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, 71 | bias=False) 72 | self.bn1 = nn.BatchNorm2d(64, affine=affine_par) 73 | for i in self.bn1.parameters(): 74 | i.requires_grad = False 75 | self.relu = nn.ReLU(inplace=True) 76 | self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1, ceil_mode=True) # change 77 | self.layer1 = self._make_layer(block, 64, layers[0]) 78 | self.layer2 = self._make_layer(block, 128, layers[1], stride=2) 79 | self.layer3 = self._make_layer(block, 256, layers[2], stride=1, dilation=2) 80 | self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=4) 81 | if self.multi_level: 82 | self.layer5 = ClassifierModule(1024, [6, 12, 18, 24], [6, 12, 18, 24], num_classes) 83 | self.layer6 = ClassifierModule(2048, [6, 12, 18, 24], [6, 12, 18, 24], num_classes) 84 | for m in self.modules(): 85 | if isinstance(m, nn.Conv2d): 86 | m.weight.data.normal_(0, 0.01) 87 | elif isinstance(m, nn.BatchNorm2d): 88 | m.weight.data.fill_(1) 89 | m.bias.data.zero_() 90 | self.sf_layer = self.get_score_fusion_layer(num_classes) 91 | 92 | def get_score_fusion_layer(self, num_classes): 93 | sf_layer = nn.Conv2d(num_classes * 2, num_classes, kernel_size=1, stride=1, padding=0, bias=False) 94 | nn.init.zeros_(sf_layer.weight) 95 | nn.init.eye_(sf_layer.weight[:, :num_classes, :, :].squeeze(-1).squeeze(-1)) 96 | return sf_layer 97 | 98 | def _make_layer(self, block, planes, blocks, stride=1, dilation=1): 99 | downsample = None 100 | if (stride != 1 101 | or self.inplanes != planes * block.expansion 102 | or dilation == 2 103 | or dilation == 4): 104 | downsample = nn.Sequential( 105 | nn.Conv2d(self.inplanes, planes * block.expansion, 106 | kernel_size=1, stride=stride, bias=False), 107 | nn.BatchNorm2d(planes * block.expansion, affine=affine_par)) 108 | for i in downsample._modules['1'].parameters(): 109 | i.requires_grad = False 110 | layers = [] 111 | layers.append( 112 | block(self.inplanes, planes, stride, dilation=dilation, downsample=downsample)) 113 | self.inplanes = planes * block.expansion 114 | for i in range(1, blocks): 115 | layers.append(block(self.inplanes, planes, dilation=dilation)) 116 | return nn.Sequential(*layers) 117 | 118 | def forward(self, cf, kf): 119 | cf = self.conv1(cf) 120 | cf = self.bn1(cf) 121 | cf = self.relu(cf) 122 | cf = self.maxpool(cf) 123 | cf = self.layer1(cf) 124 | cf = self.layer2(cf) 125 | cf = self.layer3(cf) 126 | if self.multi_level: 127 | cf_aux = self.layer5(cf) 128 | else: 129 | cf_aux = None 130 | cf = self.layer4(cf) 131 | cf = self.layer6(cf) 132 | with torch.no_grad(): 133 | kf = self.conv1(kf) 134 | kf = self.bn1(kf) 135 | kf = self.relu(kf) 136 | kf = self.maxpool(kf) 137 | kf = self.layer1(kf) 138 | kf = self.layer2(kf) 139 | kf = self.layer3(kf) 140 | if self.multi_level: 141 | kf_aux = self.layer5(kf) 142 | else: 143 | kf_aux = None 144 | kf = self.layer4(kf) 145 | kf = self.layer6(kf) 146 | return cf_aux, cf, kf_aux, kf 147 | 148 | def get_1x_lr_params_no_scale(self): 149 | """ 150 | This generator returns all the parameters of the net except for 151 | the last classification layer. Note that for each batchnorm layer, 152 | requires_grad is set to False in deeplab_resnet.py, therefore this function does not return 153 | any batchnorm parameter 154 | """ 155 | b = [] 156 | b.append(self.conv1) 157 | b.append(self.bn1) 158 | b.append(self.layer1) 159 | b.append(self.layer2) 160 | b.append(self.layer3) 161 | b.append(self.layer4) 162 | 163 | for i in range(len(b)): 164 | for j in b[i].modules(): 165 | jj = 0 166 | for k in j.parameters(): 167 | jj += 1 168 | if k.requires_grad: 169 | yield k 170 | 171 | def get_10x_lr_params(self): 172 | """ 173 | This generator returns all the parameters for the last layer of the net, 174 | which does the classification of pixel into classes 175 | """ 176 | b = [] 177 | if self.multi_level: 178 | b.append(self.layer5.parameters()) 179 | b.append(self.layer6.parameters()) 180 | 181 | for j in range(len(b)): 182 | for i in b[j]: 183 | yield i 184 | 185 | def get_1x_lr_params_sf_layer(self): 186 | b = [] 187 | b.append(self.sf_layer.parameters()) 188 | for j in range(len(b)): 189 | for i in b[j]: 190 | yield i 191 | 192 | def optim_parameters(self, lr): 193 | return [{'params': self.get_1x_lr_params_no_scale(), 'lr': lr}, 194 | {'params': self.get_1x_lr_params_sf_layer(), 'lr': lr}, 195 | {'params': self.get_10x_lr_params(), 'lr': 10 * lr}] 196 | 197 | 198 | def get_accel_deeplab_v2(num_classes=19, multi_level=True): 199 | model = ResNetMulti(Bottleneck, [3, 4, 23, 3], num_classes, multi_level) 200 | return model 201 | 202 | 203 | if __name__ == "__main__": 204 | import numpy as np 205 | from ptflops import get_model_complexity_info 206 | input = np.random.random((2,3,512,512)).astype(np.float32) 207 | x = torch.tensor(input).to('cuda:0') 208 | 209 | model = get_accel_deeplab_v2(num_classes=2, multi_level=True).cuda() 210 | cf_aux, cf, kf_aux, kf = model(x, x) 211 | print(cf_aux.shape) 212 | print(cf.shape) 213 | print(kf_aux.shape) 214 | print(kf.shape) 215 | 216 | # macs, params = get_model_complexity_info(model, (3, 512, 512), as_strings=True, 217 | # print_per_layer_stat=True, verbose=True) 218 | # print('{:<30} {:<8}'.format('Computational complexity: ', macs)) 219 | # print('{:<30} {:<8}'.format('Number of parameters: ', params)) -------------------------------------------------------------------------------- /scripts/utils/affinity.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Descripttion: 3 | version: 0.0 4 | Author: Wei Huang 5 | Date: 2022-01-07 21:20:55 6 | ''' 7 | 8 | import torch 9 | import numpy as np 10 | 11 | def binary_func(img): 12 | img[img <= 0.5] = 0 13 | img[img > 0.5] = 1 14 | return img 15 | 16 | def difference(img1, img2, shift): 17 | if shift == 0: 18 | diff = np.bitwise_xor(img1, img2) 19 | diff = diff.astype(np.float32) 20 | return diff 21 | else: 22 | img1_shift = np.zeros_like(img1) 23 | img1_shift[shift:, :] = img1[:-shift, :] 24 | diff1 = np.bitwise_xor(img1_shift, img2) 25 | diff1 = diff1.astype(np.float32) 26 | 27 | img1_shift = np.zeros_like(img1) 28 | img1_shift[:, shift:] = img1[:, :-shift] 29 | diff2 = np.bitwise_xor(img1_shift, img2) 30 | diff2 = diff2.astype(np.float32) 31 | return [diff1, diff2] 32 | 33 | def gen_affs(img1, img2, shifts=[0]): 34 | assert len(shifts) > 0, "shifts must be [0, ...]" 35 | 36 | img1 = binary_func(img1) 37 | img2 = binary_func(img2) 38 | 39 | if img1.max() > 1: 40 | img1 = img1.astype(np.float32) / 255 41 | img1 = img1.astype(bool) 42 | if img2.max() > 1: 43 | img2 = img2.astype(np.float32) / 255 44 | img2 = img2.astype(bool) 45 | 46 | length = len(shifts) 47 | h, w = img1.shape 48 | affs = np.zeros((length*2-1, h, w), dtype=np.float32) 49 | for i, shift in enumerate(shifts): 50 | diff = difference(img1.copy(), img2.copy(), shift) 51 | if shift == 0: 52 | affs[0] = diff 53 | else: 54 | affs[2*i-1] = diff[0] 55 | affs[2*i] = diff[1] 56 | return affs 57 | 58 | def recover_previous_single(img2, affs, shift): 59 | if shift == 0: 60 | img1 = np.abs(affs - img2) 61 | img1 = binary_func(img1) 62 | return img1 63 | else: 64 | affs1 = affs[0] 65 | affs2 = affs[1] 66 | img1_1 = np.abs(affs1 - img2) 67 | img1_1_shift = np.zeros_like(img1_1) 68 | img1_1_shift[:-shift, :] = img1_1[shift:, :] 69 | img1_1_shift = binary_func(img1_1_shift) 70 | img1_2 = np.abs(affs2 - img2) 71 | img1_2_shift = np.zeros_like(img1_2) 72 | img1_2_shift[:, :-shift] = img1_2[:, shift:] 73 | img1_2_shift = binary_func(img1_2_shift) 74 | return [img1_1_shift, img1_2_shift] 75 | 76 | def recover_subsequent_single(img1, affs, shift): 77 | if shift == 0: 78 | img2 = np.abs(affs - img1) 79 | img2 = binary_func(img2) 80 | return img2 81 | else: 82 | affs1 = affs[0] 83 | affs2 = affs[1] 84 | img2_1_shift = np.zeros_like(img1) 85 | img2_1_shift[shift:, :] = img1[:-shift, :] 86 | img2_1 = np.abs(affs1 - img2_1_shift) 87 | img2_1 = binary_func(img2_1) 88 | img2_2_shift = np.zeros_like(img1) 89 | img2_2_shift[:, shift:] = img1[:, :-shift] 90 | img2_2 = np.abs(affs2 - img2_2_shift) 91 | img2_2 = binary_func(img2_2) 92 | return [img2_1, img2_2] 93 | 94 | def recover_previous(img2, affs, shifts): 95 | img1_recovered = np.zeros_like(affs, dtype=np.float32) 96 | for i, shift in enumerate(shifts): 97 | if shift == 0: 98 | temp_img1 = recover_previous_single(img2, affs[0], shift) 99 | img1_recovered[0] = temp_img1 100 | else: 101 | temp_img1 = recover_previous_single(img2, [affs[2*i-1], affs[2*i]], shift) 102 | img1_recovered[2*i-1] = temp_img1[0] 103 | img1_recovered[2*i] = temp_img1[1] 104 | return img1_recovered 105 | 106 | def recover_subsequent(img1, affs, shifts): 107 | img2_recovered = np.zeros_like(affs, dtype=np.float32) 108 | for i, shift in enumerate(shifts): 109 | if shift == 0: 110 | temp_img2 = recover_subsequent_single(img1, affs[0], shift) 111 | img2_recovered[0] = temp_img2 112 | else: 113 | temp_img2 = recover_subsequent_single(img1, [affs[2*i-1], affs[2*i]], shift) 114 | img2_recovered[2*i-1] = temp_img2[0] 115 | img2_recovered[2*i] = temp_img2[1] 116 | return img2_recovered 117 | 118 | def recover(img1, img2, affs, shifts=[0], binary=True): 119 | if binary: 120 | img1 = binary_func(img1) 121 | img2 = binary_func(img2) 122 | affs = binary_func(affs) 123 | img1_recovered = np.zeros_like(affs, dtype=np.float32) 124 | img2_recovered = np.zeros_like(affs, dtype=np.float32) 125 | for i, shift in enumerate(shifts): 126 | if shift == 0: 127 | temp_img1 = recover_previous_single(img2, affs[0], shift) 128 | img1_recovered[0] = temp_img1 129 | temp_img2 = recover_subsequent_single(img1, affs[0], shift) 130 | img2_recovered[0] = temp_img2 131 | else: 132 | temp_img1 = recover_previous_single(img2, [affs[2*i-1], affs[2*i]], shift) 133 | img1_recovered[2*i-1] = temp_img1[0] 134 | img1_recovered[2*i] = temp_img1[1] 135 | temp_img2 = recover_subsequent_single(img1, [affs[2*i-1], affs[2*i]], shift) 136 | img2_recovered[2*i-1] = temp_img2[0] 137 | img2_recovered[2*i] = temp_img2[1] 138 | return img1_recovered, img2_recovered 139 | 140 | def recover_previous_single_torch(img2, affs, shift): 141 | if shift == 0: 142 | img1 = torch.abs(affs - img2) 143 | return img1 144 | else: 145 | affs1 = affs[0] 146 | affs2 = affs[1] 147 | img1_1 = torch.abs(affs1 - img2) 148 | img1_1_shift = torch.zeros_like(img1_1) 149 | img1_1_shift[:, :-shift, :] = img1_1[:, shift:, :] 150 | img1_2 = torch.abs(affs2 - img2) 151 | img1_2_shift = torch.zeros_like(img1_2) 152 | img1_2_shift[:, :, :-shift] = img1_2[:, :, shift:] 153 | return [img1_1_shift, img1_2_shift] 154 | 155 | def recover_subsequent_single_torch(img1, affs, shift): 156 | if shift == 0: 157 | img2 = torch.abs(affs - img1) 158 | return img2 159 | else: 160 | affs1 = affs[0] 161 | affs2 = affs[1] 162 | img2_1_shift = torch.zeros_like(img1) 163 | img2_1_shift[:, shift:, :] = img1[:, :-shift, :] 164 | img2_1 = torch.abs(affs1 - img2_1_shift) 165 | img2_2_shift = torch.zeros_like(img1) 166 | img2_2_shift[:, :, shift:] = img1[:, :, :-shift] 167 | img2_2 = torch.abs(affs2 - img2_2_shift) 168 | return [img2_1, img2_2] 169 | 170 | def recover_torch(img1, img2, affs, shifts=[0]): 171 | # img1 = torch.squeeze(img1, dim=0) 172 | # img2 = torch.squeeze(img2, dim=0) 173 | # affs = torch.squeeze(affs, dim=0) 174 | img1_recovered = torch.zeros_like(affs) 175 | img2_recovered = torch.zeros_like(affs) 176 | for i, shift in enumerate(shifts): 177 | if shift == 0: 178 | temp_img1 = recover_previous_single_torch(img2, affs[:, 0], shift) 179 | img1_recovered[:, 0] = temp_img1 180 | temp_img2 = recover_subsequent_single_torch(img1, affs[:, 0], shift) 181 | img2_recovered[:, 0] = temp_img2 182 | else: 183 | temp_img1 = recover_previous_single_torch(img2, [affs[:, 2*i-1], affs[:, 2*i]], shift) 184 | img1_recovered[:, 2*i-1] = temp_img1[0] 185 | img1_recovered[:, 2*i] = temp_img1[1] 186 | temp_img2 = recover_subsequent_single_torch(img1, [affs[:, 2*i-1], affs[:, 2*i]], shift) 187 | img2_recovered[:, 2*i-1] = temp_img2[0] 188 | img2_recovered[:, 2*i] = temp_img2[1] 189 | # img1_recovered = torch.unsqueeze(img1_recovered, dim=0) 190 | # img2_recovered = torch.unsqueeze(img2_recovered, dim=0) 191 | return img1_recovered, img2_recovered 192 | 193 | def img_mean(img): 194 | mean_img = torch.mean(img, dim=1, keepdim=False) 195 | return mean_img 196 | 197 | if __name__ == '__main__': 198 | import os 199 | from PIL import Image 200 | 201 | data_path = '../data/VNC3/training_groundtruth' 202 | img1 = np.asarray(Image.open(os.path.join(data_path, '000.png'))).astype(np.float32) / 255.0 203 | img2 = np.asarray(Image.open(os.path.join(data_path, '001.png'))).astype(np.float32) / 255.0 204 | 205 | shifts = [0,1,3,5,9] 206 | 207 | img1 = binary_func(img1) 208 | img2 = binary_func(img2) 209 | 210 | affs = gen_affs(img1, img2, shifts) 211 | print(affs.shape) 212 | 213 | # shift = 1 214 | # diff = affs[shift] 215 | # diff = (diff * 255).astype(np.uint8) 216 | # Image.fromarray(diff).save(os.path.join(data_path, 'diff_%d.png' % shift)) 217 | 218 | img1_recovered, img2_recovered = recover(img1, img2, affs, shifts, binary=True) 219 | print(img1_recovered.shape) 220 | print(img2_recovered.shape) 221 | 222 | num = img2_recovered.shape[0] 223 | img1 = img1[:-9, :-9] 224 | for i in range(num): 225 | # img2_recovered_tmp = img2_recovered[i] 226 | # delta = np.sum(np.abs(img2 - img2_recovered_tmp)) 227 | img1_recovered_tmp = img1_recovered[i] 228 | img1_recovered_tmp = img1_recovered_tmp[:-9, :-9] 229 | delta = np.sum(np.abs(img1 - img1_recovered_tmp)) 230 | print(delta) 231 | -------------------------------------------------------------------------------- /scripts/model/CoDetectionCNN.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | 5 | class DoubleConv(nn.Module): 6 | """(conv => BN => ReLU) * 2""" 7 | 8 | def __init__(self, in_ch, out_ch): 9 | super(DoubleConv, self).__init__() 10 | self.conv = nn.Sequential( 11 | nn.Conv2d(in_ch, out_ch, 3, padding=1), 12 | # nn.BatchNorm2d(out_ch), 13 | nn.InstanceNorm2d(out_ch), 14 | nn.ReLU(inplace=True), 15 | nn.Conv2d(out_ch, out_ch, 3, padding=1), 16 | # nn.BatchNorm2d(out_ch), 17 | nn.InstanceNorm2d(out_ch), 18 | nn.ReLU(inplace=True), 19 | ) 20 | 21 | def forward(self, x): 22 | x = self.conv(x) 23 | return x 24 | 25 | 26 | class Inconv(nn.Module): 27 | def __init__(self, in_ch, out_ch): 28 | super(Inconv, self).__init__() 29 | self.conv = DoubleConv(in_ch, out_ch) 30 | 31 | def forward(self, x): 32 | x = self.conv(x) 33 | return x 34 | 35 | 36 | class Down(nn.Module): 37 | def __init__(self, in_ch, out_ch): 38 | super(Down, self).__init__() 39 | self.mpconv = nn.Sequential(nn.MaxPool2d(2), DoubleConv(in_ch, out_ch)) 40 | 41 | def forward(self, x): 42 | x = self.mpconv(x) 43 | return x 44 | 45 | 46 | class Up(nn.Module): 47 | def __init__(self, in_ch, out_ch, bilinear=False): 48 | super(Up, self).__init__() 49 | 50 | # would be a nice idea if the upsampling could be learned too, 51 | # but my machine do not have enough memory to handle all those weights 52 | if bilinear: 53 | self.up = nn.Upsample(scale_factor=2, mode="bilinear", align_corners=True) 54 | else: 55 | self.up = nn.ConvTranspose2d(in_ch // 2, in_ch // 2, 2, stride=2) 56 | 57 | self.conv = DoubleConv(in_ch, out_ch) 58 | 59 | def forward(self, x1, x2): 60 | x1 = self.up(x1) 61 | # diffX = x1.size()[2] - x2.size()[2] 62 | # diffY = x1.size()[3] - x2.size()[3] 63 | # x2 = F.pad(x2, (diffX // 2, int(diffX / 2), diffY // 2, int(diffY / 2))) 64 | x = torch.cat([x2, x1], dim=1) 65 | x = self.conv(x) 66 | return x 67 | 68 | 69 | class Up_cat3(nn.Module): 70 | def __init__(self, in_ch, out_ch, bilinear=False): 71 | super(Up_cat3, self).__init__() 72 | 73 | # would be a nice idea if the upsampling could be learned too, 74 | # but my machine do not have enough memory to handle all those weights 75 | if bilinear: 76 | self.up = nn.Upsample(scale_factor=2, mode="bilinear", align_corners=True) 77 | else: 78 | self.up = nn.ConvTranspose2d(in_ch // 3, in_ch // 3, 2, stride=2) 79 | 80 | self.conv = DoubleConv(in_ch, out_ch) 81 | 82 | def forward(self, x1, x2, x3): 83 | x1 = self.up(x1) 84 | # diffX = x1.size()[2] - x2.size()[2] 85 | # diffY = x1.size()[3] - x2.size()[3] 86 | # x2 = F.pad(x2, (diffX // 2, int(diffX / 2), diffY // 2, int(diffY / 2))) 87 | x = torch.cat([x3, x2, x1], dim=1) 88 | x = self.conv(x) 89 | return x 90 | 91 | 92 | class Upself(Up): 93 | def forward(self, x): 94 | x = self.up(x) 95 | x = self.conv(x) 96 | return x 97 | 98 | 99 | class Outconv(nn.Module): 100 | def __init__(self, in_ch, out_ch, sig): 101 | super(Outconv, self).__init__() 102 | self.conv = nn.Conv2d(in_ch, out_ch, 1) 103 | self.act = nn.Sigmoid() 104 | self.sig = sig 105 | 106 | def forward(self, x): 107 | x = self.conv(x) 108 | if self.sig: 109 | x = self.act(x) 110 | return x 111 | 112 | 113 | class Outconv2(nn.Module): 114 | def __init__(self, in_ch, out_ch, sig): 115 | super(Outconv2, self).__init__() 116 | self.conv = nn.Sequential( 117 | nn.Conv2d(in_ch, in_ch, 3, padding=1), 118 | # nn.BatchNorm2d(in_ch), 119 | nn.InstanceNorm2d(in_ch), 120 | nn.ReLU(inplace=True), 121 | nn.Conv2d(in_ch, out_ch, 3, padding=1), 122 | ) 123 | self.act = nn.Sigmoid() 124 | self.sig = sig 125 | 126 | def forward(self, x): 127 | x = self.conv(x) 128 | if self.sig: 129 | x = self.act(x) 130 | return x 131 | 132 | 133 | class CoDetectionCNN(nn.Module): 134 | def __init__(self, n_channels, n_classes, sig=False): 135 | super().__init__() 136 | self.inc = Inconv(n_channels, 64) 137 | 138 | self.down1 = Down(64, 128) 139 | self.down2 = Down(256, 256) 140 | self.down3 = Down(256, 512) 141 | self.down4 = Down(512, 512) 142 | 143 | self.up1 = Up(1024, 256) 144 | self.up2 = Up(512, 128) 145 | self.up3_1 = Up(256, 64) 146 | self.up3_2 = Up(256, 64) 147 | self.up4_1 = Up(128, 32) 148 | self.up4_2 = Up(128, 32) 149 | 150 | self.up1_diff = Up(1024, 256) 151 | self.up2_diff = Up(512, 128) 152 | self.up3_diff = Up_cat3(128*3, 64) 153 | self.up4_diff = Up_cat3(64*3, 32) 154 | 155 | self.out_1 = Outconv2(32, n_classes, sig=sig) 156 | self.out_2 = Outconv2(32, n_classes, sig=sig) 157 | self.out_diff = Outconv2(32, n_classes, sig=sig) 158 | 159 | def forward(self, x, diff=True): 160 | x1 = x[:, 0:1, :, :] 161 | x2 = x[:, 1::, :, :] 162 | 163 | # encoder 164 | down0_1 = self.inc(x1) # B, 64, 512, 512 165 | down0_2 = self.inc(x2) # B, 64, 512, 512 166 | down1_1 = self.down1(down0_1) # B, 128, 256, 256 167 | down1_2 = self.down1(down0_2) # B, 128, 256, 256 168 | x_cat = torch.cat([down1_1, down1_2], dim=1) # B, 256, 256, 256 169 | down2 = self.down2(x_cat) # B, 256, 128, 128 170 | down3 = self.down3(down2) # B, 512, 64, 64 171 | down4 = self.down4(down3) # B, 512, 32, 32 172 | 173 | # decoder 174 | up1 = self.up1(down4, down3) # B, 256, 64, 64 175 | up2 = self.up2(up1, down2) # B, 128, 128, 128 176 | up3_1 = self.up3_1(up2, down1_1) # B, 64, 256, 256 177 | up3_2 = self.up3_2(up2, down1_2) # B, 64, 256, 256 178 | up4_1 = self.up4_1(up3_1, down0_1) # B, 32, 512, 512 179 | up4_2 = self.up4_2(up3_2, down0_2) # B, 32, 512, 512 180 | 181 | # output 182 | out_1 = self.out_1(up4_1) # B, 2, 512, 512 183 | out_2 = self.out_2(up4_2) # B, 2, 512, 512 184 | 185 | if diff: 186 | # decoder2 187 | up1_diff = self.up1_diff(down4, down3) 188 | up2_diff = self.up2_diff(up1_diff, down2) 189 | up3_diff = self.up3_diff(up2_diff, down1_1, down1_2) 190 | up4_diff = self.up4_diff(up3_diff, down0_1, down0_2) 191 | out_diff = self.out_diff(up4_diff) 192 | return out_1, out_2, out_diff 193 | else: 194 | return out_1, out_2 195 | 196 | class CoDetectionCNN_affs(nn.Module): 197 | def __init__(self, n_channels, n_classes, n_affs, sig=False): 198 | super().__init__() 199 | self.inc = Inconv(n_channels, 64) 200 | 201 | self.down1 = Down(64, 128) 202 | self.down2 = Down(256, 256) 203 | self.down3 = Down(256, 512) 204 | self.down4 = Down(512, 512) 205 | 206 | self.up1 = Up(1024, 256) 207 | self.up2 = Up(512, 128) 208 | self.up3_1 = Up(256, 64) 209 | self.up3_2 = Up(256, 64) 210 | self.up4_1 = Up(128, 32) 211 | self.up4_2 = Up(128, 32) 212 | 213 | self.up1_diff = Up(1024, 256) 214 | self.up2_diff = Up(512, 128) 215 | self.up3_diff = Up_cat3(128*3, 64) 216 | self.up4_diff = Up_cat3(64*3, 32) 217 | 218 | self.out_1 = Outconv2(32, n_classes, sig=sig) 219 | self.out_2 = Outconv2(32, n_classes, sig=sig) 220 | self.out_diff = Outconv2(32, n_affs, sig=True) 221 | 222 | def forward(self, x, diff=True): 223 | x1 = x[:, 0:1, :, :] 224 | x2 = x[:, 1::, :, :] 225 | 226 | # encoder 227 | down0_1 = self.inc(x1) # B, 64, 512, 512 228 | down0_2 = self.inc(x2) # B, 64, 512, 512 229 | down1_1 = self.down1(down0_1) # B, 128, 256, 256 230 | down1_2 = self.down1(down0_2) # B, 128, 256, 256 231 | x_cat = torch.cat([down1_1, down1_2], dim=1) # B, 256, 256, 256 232 | down2 = self.down2(x_cat) # B, 256, 128, 128 233 | down3 = self.down3(down2) # B, 512, 64, 64 234 | down4 = self.down4(down3) # B, 512, 32, 32 235 | 236 | # decoder 237 | up1 = self.up1(down4, down3) # B, 256, 64, 64 238 | up2 = self.up2(up1, down2) # B, 128, 128, 128 239 | up3_1 = self.up3_1(up2, down1_1) # B, 64, 256, 256 240 | up3_2 = self.up3_2(up2, down1_2) # B, 64, 256, 256 241 | up4_1 = self.up4_1(up3_1, down0_1) # B, 32, 512, 512 242 | up4_2 = self.up4_2(up3_2, down0_2) # B, 32, 512, 512 243 | 244 | # output 245 | out_1 = self.out_1(up4_1) # B, 2, 512, 512 246 | out_2 = self.out_2(up4_2) # B, 2, 512, 512 247 | 248 | if diff: 249 | # decoder2 250 | up1_diff = self.up1_diff(down4, down3) 251 | up2_diff = self.up2_diff(up1_diff, down2) 252 | up3_diff = self.up3_diff(up2_diff, down1_1, down1_2) 253 | up4_diff = self.up4_diff(up3_diff, down0_1, down0_2) 254 | out_diff = self.out_diff(up4_diff) 255 | return out_1, out_2, out_diff 256 | else: 257 | return out_1, out_2 258 | 259 | 260 | if __name__ == "__main__": 261 | from ptflops import get_model_complexity_info 262 | x = torch.rand((1, 2, 512, 512)) 263 | model = CoDetectionCNN(n_channels=1, n_classes=2, sig=False) 264 | out = model(x) 265 | for i in range(len(out)): 266 | print(out[i].shape) 267 | 268 | macs, params = get_model_complexity_info(model, (2, 512, 512), as_strings=True, 269 | print_per_layer_stat=True, verbose=True) 270 | print('{:<30} {:<8}'.format('Computational complexity: ', macs)) 271 | print('{:<30} {:<8}'.format('Number of parameters: ', params)) -------------------------------------------------------------------------------- /scripts/dataset/source_dataset_mito.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Description: 3 | Author: weihuang 4 | Date: 2021-11-18 15:47:44 5 | LastEditors: Please set LastEditors 6 | LastEditTime: 2021-11-29 20:07:05 7 | ''' 8 | import os 9 | import sys 10 | import h5py 11 | import torch 12 | import random 13 | import numpy as np 14 | from PIL import Image 15 | import os.path as osp 16 | from random import randint 17 | from torch.utils import data 18 | from scipy.ndimage.interpolation import rotate 19 | from utils.pre_processing import normalization2, approximate_image, cropping 20 | from dataset.data_aug import aug_img_lab 21 | 22 | 23 | class sourceDataSet(data.Dataset): 24 | def __init__(self, root_img, root_label, list_path=None, crop_size=(512, 512), stride=1): 25 | print('Load %s' % root_img) 26 | f = h5py.File(root_img, 'r') 27 | self.raws = f['main'][:] 28 | f.close() 29 | f = h5py.File(root_label, 'r') 30 | self.labels = f['main'][:] 31 | f.close() 32 | print('Data shape:', self.raws.shape) 33 | self.crop_size = crop_size 34 | self.stride = stride 35 | self.length = self.raws.shape[0] 36 | self.padding = 100 37 | self.padded_size = [x+2*self.padding for x in self.crop_size] 38 | 39 | def __len__(self): 40 | # return int(sys.maxsize) 41 | return 400000 42 | 43 | def __getitem__(self, index): 44 | k = random.randint(0, self.length - 1 - self.stride) 45 | current_img = self.raws[k] 46 | current_label = self.labels[k] 47 | aux_img = self.raws[k+self.stride] 48 | aux_label = self.labels[k+self.stride] 49 | 50 | # cropping image with the input size 51 | size = current_img.shape 52 | y_loc = randint(0, size[0] - self.padded_size[0]) 53 | x_loc = randint(0, size[1] - self.padded_size[1]) 54 | current_img = cropping(current_img, self.padded_size[0], self.padded_size[1], y_loc, x_loc) 55 | current_label = cropping(current_label, self.padded_size[0], self.padded_size[1], y_loc, x_loc) 56 | aux_img = cropping(aux_img, self.padded_size[0], self.padded_size[1], y_loc, x_loc) 57 | aux_label = cropping(aux_label, self.padded_size[0], self.padded_size[1], y_loc, x_loc) 58 | 59 | # data augmentation 60 | current_img = normalization2(current_img.astype(np.float32), max=1, min=0) 61 | aux_img = normalization2(aux_img.astype(np.float32), max=1, min=0) 62 | seed = np.random.randint(2147483647) 63 | random.seed(seed) 64 | current_img, current_label = aug_img_lab(current_img, current_label, self.crop_size) 65 | random.seed(seed) 66 | aux_img, aux_label = aug_img_lab(aux_img, aux_label, self.crop_size) 67 | current_label = approximate_image(current_label.copy()) 68 | aux_label = approximate_image(aux_label.copy()) 69 | 70 | # crop padding 71 | if current_img.shape[0] > self.crop_size[0]: 72 | current_img = current_img[self.padding:-self.padding, self.padding:-self.padding] 73 | current_label = current_label[self.padding:-self.padding, self.padding:-self.padding] 74 | aux_img = aux_img[self.padding:-self.padding, self.padding:-self.padding] 75 | aux_label = aux_label[self.padding:-self.padding, self.padding:-self.padding] 76 | 77 | current_img = np.expand_dims(current_img, axis=0) # add additional dimension 78 | current_img = torch.from_numpy(current_img.astype(np.float32)).float() 79 | aux_img = np.expand_dims(aux_img, axis=0) # add additional dimension 80 | aux_img = torch.from_numpy(aux_img.astype(np.float32)).float() 81 | 82 | current_label = (current_label / 255).astype(np.bool) 83 | aux_label = (aux_label / 255).astype(np.bool) 84 | diff = np.bitwise_xor(current_label, aux_label) 85 | current_label = torch.from_numpy(current_label.astype(np.float32)).long() 86 | aux_label = torch.from_numpy(aux_label.astype(np.float32)).long() 87 | diff = torch.from_numpy(diff.astype(np.float32)).long() 88 | 89 | return current_img, current_label, aux_img, aux_label, diff 90 | 91 | 92 | class sourceDataSet_chang(data.Dataset): 93 | def __init__(self, root_img, root_label, list_path=None, crop_size=(512, 512), stride=1): 94 | print('Load %s' % root_img) 95 | f = h5py.File(root_img, 'r') 96 | self.raws = f['main'][:] 97 | f.close() 98 | f = h5py.File(root_label, 'r') 99 | self.labels = f['main'][:] 100 | f.close() 101 | print('Data shape:', self.raws.shape) 102 | self.crop_size = crop_size 103 | self.stride = stride 104 | self.length = self.raws.shape[0] 105 | self.padding = 100 106 | self.padded_size = [x+2*self.padding for x in self.crop_size] 107 | self.rigid_aug = True 108 | self.elastic = True 109 | self.angle = (0,359) 110 | self.prob = 0.8 111 | 112 | def __len__(self): 113 | # return int(sys.maxsize) 114 | return 400000 115 | 116 | def __getitem__(self, index): 117 | k = random.randint(0, self.length - 1 - self.stride) 118 | current_img = self.raws[k] 119 | current_label = self.labels[k] 120 | aux_img = self.raws[k+self.stride] 121 | aux_label = self.labels[k+self.stride] 122 | 123 | # cropping image with the input size 124 | size = current_img.shape 125 | y_loc = randint(0, size[0] - self.padded_size[0]) 126 | x_loc = randint(0, size[1] - self.padded_size[1]) 127 | current_img = cropping(current_img, self.padded_size[0], self.padded_size[1], y_loc, x_loc) 128 | current_label = cropping(current_label, self.padded_size[0], self.padded_size[1], y_loc, x_loc) 129 | aux_img = cropping(aux_img, self.padded_size[0], self.padded_size[1], y_loc, x_loc) 130 | aux_label = cropping(aux_label, self.padded_size[0], self.padded_size[1], y_loc, x_loc) 131 | 132 | # data augmentation 133 | if self.elastic and random.uniform(0,1) < self.prob: 134 | do_elastic = True 135 | else: 136 | do_elastic = False 137 | # rigid augmentation 138 | if self.rigid_aug: 139 | if random.uniform(0,1) < 0.5: 140 | current_img = np.flip(current_img, axis=0) 141 | current_label = np.flip(current_label, axis=0) 142 | aux_img = np.flip(aux_img, axis=0) 143 | aux_label = np.flip(aux_label, axis=0) 144 | if random.uniform(0,1) < 0.5: 145 | current_img = np.flip(current_img, axis=1) 146 | current_label = np.flip(current_label, axis=1) 147 | aux_img = np.flip(aux_img, axis=1) 148 | aux_label = np.flip(aux_label, axis=1) 149 | 150 | k = random.choice([0,1,2,3]) 151 | current_img = np.rot90(current_img, k) 152 | current_label = np.rot90(current_label, k) 153 | aux_img = np.rot90(aux_img, k) 154 | aux_label = np.rot90(aux_label, k) 155 | # elastic deformation 156 | if do_elastic: 157 | angle = random.randint(self.angle[0], self.angle[1]) 158 | current_img = current_img.astype(np.float32) 159 | current_img = rotate(current_img, angle, axes=(0,1), reshape=False, order=3) 160 | current_label = rotate(current_label, angle, axes=(0,1), reshape=False, order=0) 161 | aux_img = aux_img.astype(np.float32) 162 | aux_img = rotate(aux_img, angle, axes=(0,1), reshape=False, order=3) 163 | aux_label = rotate(aux_label, angle, axes=(0,1), reshape=False, order=0) 164 | 165 | # crop padding 166 | if current_img.shape[0] > self.crop_size[0]: 167 | current_img = current_img[self.padding:-self.padding, self.padding:-self.padding] 168 | current_label = current_label[self.padding:-self.padding, self.padding:-self.padding] 169 | aux_img = aux_img[self.padding:-self.padding, self.padding:-self.padding] 170 | aux_label = aux_label[self.padding:-self.padding, self.padding:-self.padding] 171 | 172 | current_img = normalization2(current_img.astype(np.float32), max=1, min=0) 173 | aux_img = normalization2(aux_img.astype(np.float32), max=1, min=0) 174 | current_img = np.expand_dims(current_img, axis=0) # add additional dimension 175 | current_img = torch.from_numpy(current_img.astype(np.float32)).float() 176 | aux_img = np.expand_dims(aux_img, axis=0) # add additional dimension 177 | aux_img = torch.from_numpy(aux_img.astype(np.float32)).float() 178 | 179 | current_label = (current_label / 255).astype(np.bool) 180 | aux_label = (aux_label / 255).astype(np.bool) 181 | diff = np.bitwise_xor(current_label, aux_label) 182 | current_label = torch.from_numpy(current_label.astype(np.float32)).long() 183 | aux_label = torch.from_numpy(aux_label.astype(np.float32)).long() 184 | diff = torch.from_numpy(diff.astype(np.float32)).long() 185 | 186 | return current_img, current_label, aux_img, aux_label, diff 187 | 188 | if __name__ == '__main__': 189 | # data_dir_img = '../data/Mito/human/training.hdf' 190 | # data_dir_label = '../data/Mito/human/training_groundtruth.hdf' 191 | data_dir_img = '../data/Mito/rat/training.hdf' 192 | data_dir_label = '../data/Mito/rat/training_groundtruth.hdf' 193 | data_list = None 194 | input_size = (512, 512) 195 | stride = 1 196 | dst = sourceDataSet_chang(data_dir_img, 197 | data_dir_label, 198 | data_list, 199 | crop_size=input_size, 200 | stride=stride) 201 | 202 | out_path = './data_temp' 203 | if not osp.exists(out_path): 204 | os.makedirs(out_path) 205 | for i, data in enumerate(dst): 206 | if i < 50: 207 | print(i) 208 | current_img, current_label, aux_img, aux_label, diff = data 209 | current_img = (current_img.numpy() * 255).astype(np.uint8) 210 | current_label = (current_label.numpy() * 255).astype(np.uint8) 211 | current_img = current_img.squeeze() 212 | aux_img = (aux_img.numpy() * 255).astype(np.uint8) 213 | aux_label = (aux_label.numpy() * 255).astype(np.uint8) 214 | aux_img = aux_img.squeeze() 215 | diff = (diff.numpy() * 255).astype(np.uint8) 216 | concat1 = np.concatenate([current_img, aux_img, diff], axis=1) 217 | concat2 = np.concatenate([current_label, aux_label, diff], axis=1) 218 | concat = np.concatenate([concat1, concat2], axis=0) 219 | Image.fromarray(concat).save(osp.join(out_path, str(i).zfill(4)+'.png')) 220 | else: 221 | break 222 | print('Done') 223 | -------------------------------------------------------------------------------- /scripts/dataset/target_dataset_mito.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Descripttion: 3 | version: 0.0 4 | Author: Wei Huang 5 | Date: 2021-11-29 17:12:59 6 | ''' 7 | import os 8 | import h5py 9 | import torch 10 | from torch.utils import data 11 | import numpy as np 12 | import os.path as osp 13 | from PIL import Image 14 | from utils.pre_processing import normalization2, approximate_image, cropping, multi_cropping 15 | from utils.metrics import dice_coeff 16 | 17 | from sklearn.metrics import average_precision_score 18 | from sklearn.metrics import f1_score 19 | from sklearn.metrics import matthews_corrcoef 20 | 21 | class Evaluation(object): 22 | def __init__(self, root_label, crop_size=[512, 512]): 23 | f = h5py.File(root_label, 'r') 24 | self.labels = f['main'][:] 25 | f.close() 26 | self.labels = self.labels[:, :crop_size[0], :crop_size[1]] 27 | self.length = self.labels.shape[0] 28 | self.labels = (self.labels / 255).astype(np.uint8) 29 | self.labels = self.labels.reshape(-1) 30 | 31 | def __call__(self, preds, metric='F1'): 32 | assert preds.shape[0] == self.length, "Prediction ERROR!" 33 | if metric == 'F1': 34 | preds[preds>=0.5] = 1 35 | preds[preds<0.5] = 0 36 | F1 = f1_score(self.labels, preds.reshape(-1)) 37 | return F1 38 | elif metric == 'mAP': 39 | mAP = average_precision_score(self.labels, preds.reshape(-1)) 40 | return mAP 41 | else: 42 | raise NotImplementedError 43 | 44 | 45 | class targetDataSet_val_twoimgs(data.Dataset): 46 | def __init__(self, root_img, root_label, list_path=None, crop_size=[512, 512], stride=1): 47 | print('Load %s' % root_img) 48 | f = h5py.File(root_img, 'r') 49 | self.raws = f['main'][:] 50 | f.close() 51 | # f = h5py.File(root_label, 'r') 52 | # self.labels = f['main'][:] 53 | # f.close() 54 | self.raws = self.raws[:, :crop_size[0], :crop_size[1]] 55 | # self.labels = self.labels[:, :crop_size[0], :crop_size[1]] 56 | print('Data shape:', self.raws.shape) 57 | self.stride = stride 58 | self.iters = self.raws.shape[0] - self.stride 59 | 60 | def __len__(self): 61 | return self.iters 62 | 63 | def __getitem__(self, index): 64 | current_img = self.raws[index] 65 | current_img = normalization2(current_img.astype(np.float32), max=1, min=0) 66 | aux_img = self.raws[index+self.stride] 67 | aux_img = normalization2(aux_img.astype(np.float32), max=1, min=0) 68 | 69 | # current_label = self.labels[index] 70 | # current_label = (current_label / 255).astype(np.bool) 71 | # aux_label = self.labels[index+self.stride] 72 | # aux_label = (aux_label / 255).astype(np.bool) 73 | 74 | # diff = np.bitwise_xor(current_label, aux_label) 75 | # current_label = torch.from_numpy(current_label.astype(np.float32)).long() 76 | # aux_label = torch.from_numpy(aux_label.astype(np.float32)).long() 77 | # diff = torch.from_numpy(diff.astype(np.float32)).long() 78 | 79 | current_img = np.expand_dims(current_img, axis=0) 80 | current_img = torch.from_numpy(current_img.astype(np.float32)).float() 81 | aux_img = np.expand_dims(aux_img, axis=0) 82 | aux_img = torch.from_numpy(aux_img.astype(np.float32)).float() 83 | # return current_img, current_label, aux_img, aux_label, diff 84 | return current_img, aux_img 85 | 86 | 87 | class targetDataSet_test_twoimgs(data.Dataset): 88 | def __init__(self, root_img, root_label, list_path=None, crop_size=[1024, 1024], stride=1): 89 | print('Load %s' % root_img) 90 | f = h5py.File(root_img, 'r') 91 | raws = f['main'][:] 92 | f.close() 93 | print('raw shape:', raws.shape) 94 | raws_padded = np.pad(raws, ((0,0),(256,256),(256,256)), mode='reflect') 95 | self.raws_padded_shape = raws_padded.shape 96 | print('padded raw shape:', self.raws_padded_shape) 97 | self.stride = stride 98 | self.crop_size = crop_size 99 | self.stride_xy = crop_size[0] // 2 # 512 100 | self.num_xy = ((self.raws_padded_shape[1] - crop_size[0]) // self.stride_xy) + 1 # 7+1 101 | assert (self.raws_padded_shape[1] - crop_size[0]) % self.stride_xy == 0, "padded error!" 102 | self.num_per_image = self.num_xy * self.num_xy 103 | self.iter_image = self.raws_padded_shape[0] - self.stride 104 | self.iters = self.iter_image * self.num_per_image 105 | print('iters:', self.iters) 106 | 107 | # normalization2 108 | self.raws_padded_norm = np.zeros_like(raws_padded, dtype=np.float32) 109 | for k in range(self.raws_padded_shape[0]): 110 | self.raws_padded_norm[k] = normalization2(raws_padded[k].astype(np.float32), max=1, min=0) 111 | 112 | del raws_padded, raws 113 | 114 | self.reset_output() 115 | self.weight_vol = self.get_weight() 116 | 117 | def __len__(self): 118 | return self.iters 119 | 120 | def reset_output(self): 121 | self.out_results = np.zeros(self.raws_padded_shape, dtype=np.float32) 122 | self.weight_map = np.zeros(self.raws_padded_shape, dtype=np.float32) 123 | 124 | def __getitem__(self, index): 125 | pos_image = index // self.num_per_image 126 | pre_image = index % self.num_per_image 127 | pos_y = pre_image // self.num_xy 128 | pos_x = pre_image % self.num_xy 129 | 130 | # find position 131 | fromy = pos_y * self.stride_xy 132 | endy = fromy + self.crop_size[0] 133 | if endy > self.raws_padded_shape[1]: 134 | endy = self.raws_padded_shape[1] 135 | fromy = endy - self.crop_size[0] 136 | fromx = pos_x * self.stride_xy 137 | endx = fromx + self.crop_size[1] 138 | if endx > self.raws_padded_shape[2]: 139 | endx = self.raws_padded_shape[2] 140 | fromx = endx - self.crop_size[1] 141 | self.pos = [pos_image, fromy, fromx] 142 | 143 | current_img = self.raws_padded_norm[pos_image, fromx:endx, fromy:endy].copy() 144 | aux_img = self.raws_padded_norm[pos_image+self.stride, fromx:endx, fromy:endy].copy() 145 | 146 | current_img = current_img.astype(np.float32) 147 | current_img = np.expand_dims(current_img, axis=0) 148 | current_img = torch.from_numpy(np.ascontiguousarray(current_img)) 149 | aux_img = aux_img.astype(np.float32) 150 | aux_img = np.expand_dims(aux_img, axis=0) 151 | aux_img = torch.from_numpy(np.ascontiguousarray(aux_img)) 152 | return current_img, aux_img 153 | 154 | def get_weight(self, sigma=0.2, mu=0.0): 155 | yy, xx = np.meshgrid(np.linspace(-1, 1, self.crop_size[0], dtype=np.float32), 156 | np.linspace(-1, 1, self.crop_size[1], dtype=np.float32), indexing='ij') 157 | dd = np.sqrt(yy * yy + xx * xx) 158 | weight = 1e-6 + np.exp(-((dd - mu) ** 2 / (2.0 * sigma ** 2))) 159 | return weight 160 | 161 | def add_vol(self, pred_vol, axu_vol): 162 | pos_image, fromy, fromx = self.pos 163 | self.out_results[pos_image, fromx:fromx+self.crop_size[0], \ 164 | fromy:fromy+self.crop_size[1]] += pred_vol * self.weight_vol 165 | self.out_results[pos_image+self.stride, fromx:fromx+self.crop_size[0], \ 166 | fromy:fromy+self.crop_size[1]] += axu_vol * self.weight_vol 167 | self.weight_map[pos_image, fromx:fromx+self.crop_size[0], \ 168 | fromy:fromy+self.crop_size[1]] += self.weight_vol 169 | self.weight_map[pos_image+self.stride, fromx:fromx+self.crop_size[0], \ 170 | fromy:fromy+self.crop_size[1]] += self.weight_vol 171 | 172 | def get_results(self): 173 | self.out_results = self.out_results / self.weight_map 174 | self.out_results = self.out_results[:, 256:-256, 256:-256] 175 | return self.out_results 176 | 177 | 178 | class Evaluation(object): 179 | def __init__(self, root_label, list_path=None): 180 | print('Load %s' % root_label) 181 | f = h5py.File(root_label, 'r') 182 | self.labels = f['main'][:] 183 | f.close() 184 | if self.labels.max() > 1: 185 | self.labels = self.labels / 255.0 186 | self.labels = self.labels.astype(np.uint8) 187 | self.length = self.labels.shape[0] 188 | 189 | def __call__(self, preds, mode='dice'): 190 | if mode == 'dice': 191 | return self.metric_dice(preds) 192 | elif mode == 'map_2d': 193 | return self.metric_map_2d(preds) 194 | elif mode == 'map_3d': 195 | return self.metric_map_3d(preds) 196 | else: 197 | raise NotImplementedError 198 | 199 | def metric_dice(self, preds): 200 | assert preds.shape[0] == self.length, "Prediction ERROR!" 201 | dices = [] 202 | jacs = [] 203 | for k in range(self.length): 204 | dice, jac = dice_coeff(preds[k], self.labels[k]) 205 | dices.append(dice) 206 | jacs.append(jac) 207 | dice_avg = sum(dices) / len(dices) 208 | jac_avg = sum(jacs) / len(jacs) 209 | return dice_avg, jac_avg 210 | 211 | def metric_map_2d(self, preds): 212 | assert preds.shape[0] == self.length, "Prediction ERROR!" 213 | total_mAP = [] 214 | total_F1 = [] 215 | total_MCC = [] 216 | total_IoU = [] 217 | for i in range(self.length): 218 | pred_temp = preds[i] 219 | gt_temp = self.labels[i] 220 | 221 | serial_segs = gt_temp.reshape(-1) 222 | mAP = average_precision_score(serial_segs, pred_temp.reshape(-1)) 223 | 224 | bin_segs = pred_temp 225 | bin_segs[bin_segs>=0.5] = 1 226 | bin_segs[bin_segs<0.5] = 0 227 | serial_bin_segs = bin_segs.reshape(-1) 228 | 229 | intersection = np.logical_and(serial_segs==1, serial_bin_segs==1) 230 | union = np.logical_or(serial_segs==1, serial_bin_segs==1) 231 | IoU = np.sum(intersection) / np.sum(union) 232 | 233 | F1 = f1_score(serial_segs, serial_bin_segs) 234 | MCC = matthews_corrcoef(serial_segs, serial_bin_segs) 235 | 236 | total_mAP.append(mAP) 237 | total_F1.append(F1) 238 | total_MCC.append(MCC) 239 | total_IoU.append(IoU) 240 | mean_mAP = sum(total_mAP) / len(total_mAP) 241 | mean_F1 = sum(total_F1) / len(total_F1) 242 | mean_MCC = sum(total_MCC) / len(total_MCC) 243 | mean_IoU = sum(total_IoU) / len(total_IoU) 244 | return mean_mAP, mean_F1, mean_MCC, mean_IoU 245 | 246 | def metric_map_3d(self, preds): 247 | serial_segs = self.labels.reshape(-1) 248 | mAP = average_precision_score(serial_segs, preds.reshape(-1)) 249 | 250 | bin_segs = preds 251 | bin_segs[bin_segs>=0.5] = 1 252 | bin_segs[bin_segs<0.5] = 0 253 | serial_bin_segs = bin_segs.reshape(-1) 254 | 255 | intersection = np.logical_and(serial_segs==1, serial_bin_segs==1) 256 | union = np.logical_or(serial_segs==1, serial_bin_segs==1) 257 | IoU = np.sum(intersection) / np.sum(union) 258 | 259 | F1 = f1_score(serial_segs, serial_bin_segs) 260 | MCC = matthews_corrcoef(serial_segs, serial_bin_segs) 261 | return mAP, F1, MCC, IoU 262 | 263 | def get_gt(self): 264 | return self.labels -------------------------------------------------------------------------------- /logs/mitoh2r/valid.txt: -------------------------------------------------------------------------------- 1 | model-1, F1=0.166013 2 | model-500, F1=0.694333 3 | model-1000, F1=0.765657 4 | model-1500, F1=0.815347 5 | model-2000, F1=0.840149 6 | model-2500, F1=0.798959 7 | model-3000, F1=0.808936 8 | model-3500, F1=0.840538 9 | model-4000, F1=0.826774 10 | model-4500, F1=0.830708 11 | model-5000, F1=0.846518 12 | model-5500, F1=0.828942 13 | model-6000, F1=0.856743 14 | model-6500, F1=0.875197 15 | model-7000, F1=0.871860 16 | model-7500, F1=0.872448 17 | model-8000, F1=0.879584 18 | model-8500, F1=0.847825 19 | model-9000, F1=0.884540 20 | model-9500, F1=0.879012 21 | model-10000, F1=0.870604 22 | model-10500, F1=0.881229 23 | model-11000, F1=0.845631 24 | model-11500, F1=0.882071 25 | model-12000, F1=0.880083 26 | model-12500, F1=0.806776 27 | model-13000, F1=0.888163 28 | model-13500, F1=0.868866 29 | model-14000, F1=0.874019 30 | model-14500, F1=0.882985 31 | model-15000, F1=0.889874 32 | model-15500, F1=0.885897 33 | model-16000, F1=0.859054 34 | model-16500, F1=0.869360 35 | model-17000, F1=0.855655 36 | model-17500, F1=0.899912 37 | model-18000, F1=0.892127 38 | model-18500, F1=0.898174 39 | model-19000, F1=0.880274 40 | model-19500, F1=0.900318 41 | model-20000, F1=0.872653 42 | model-20500, F1=0.885900 43 | model-21000, F1=0.887946 44 | model-21500, F1=0.875934 45 | model-22000, F1=0.891290 46 | model-22500, F1=0.894720 47 | model-23000, F1=0.868979 48 | model-23500, F1=0.872248 49 | model-24000, F1=0.900196 50 | model-24500, F1=0.884373 51 | model-25000, F1=0.890645 52 | model-25500, F1=0.894680 53 | model-26000, F1=0.895192 54 | model-26500, F1=0.875093 55 | model-27000, F1=0.879990 56 | model-27500, F1=0.889368 57 | model-28000, F1=0.890145 58 | model-28500, F1=0.894925 59 | model-29000, F1=0.889114 60 | model-29500, F1=0.894284 61 | model-30000, F1=0.881518 62 | model-30500, F1=0.891113 63 | model-31000, F1=0.873437 64 | model-31500, F1=0.882141 65 | model-32000, F1=0.896053 66 | model-32500, F1=0.889101 67 | model-33000, F1=0.897763 68 | model-33500, F1=0.885698 69 | model-34000, F1=0.899488 70 | model-34500, F1=0.892759 71 | model-35000, F1=0.863704 72 | model-35500, F1=0.900409 73 | model-36000, F1=0.889959 74 | model-36500, F1=0.900860 75 | model-37000, F1=0.899802 76 | model-37500, F1=0.880422 77 | model-38000, F1=0.905819 78 | model-38500, F1=0.898653 79 | model-39000, F1=0.893815 80 | model-39500, F1=0.872850 81 | model-40000, F1=0.895197 82 | model-40500, F1=0.893282 83 | model-41000, F1=0.895669 84 | model-41500, F1=0.904650 85 | model-42000, F1=0.901514 86 | model-42500, F1=0.896747 87 | model-43000, F1=0.896941 88 | model-43500, F1=0.902746 89 | model-44000, F1=0.881218 90 | model-44500, F1=0.899585 91 | model-45000, F1=0.899873 92 | model-45500, F1=0.881324 93 | model-46000, F1=0.899130 94 | model-46500, F1=0.893631 95 | model-47000, F1=0.893070 96 | model-47500, F1=0.894541 97 | model-48000, F1=0.902244 98 | model-48500, F1=0.907968 99 | model-49000, F1=0.902587 100 | model-49500, F1=0.883358 101 | model-50000, F1=0.902692 102 | model-50500, F1=0.896788 103 | model-51000, F1=0.898472 104 | model-51500, F1=0.904798 105 | model-52000, F1=0.904030 106 | model-52500, F1=0.900943 107 | model-53000, F1=0.909454 108 | model-53500, F1=0.896319 109 | model-54000, F1=0.906194 110 | model-54500, F1=0.895921 111 | model-55000, F1=0.903622 112 | model-55500, F1=0.899873 113 | model-56000, F1=0.909207 114 | model-56500, F1=0.901314 115 | model-57000, F1=0.899252 116 | model-57500, F1=0.896696 117 | model-58000, F1=0.909924 118 | model-58500, F1=0.894613 119 | model-59000, F1=0.910474 120 | model-59500, F1=0.894498 121 | model-60000, F1=0.892616 122 | model-60500, F1=0.897417 123 | model-61000, F1=0.900210 124 | model-61500, F1=0.890144 125 | model-62000, F1=0.893614 126 | model-62500, F1=0.898760 127 | model-63000, F1=0.909029 128 | model-63500, F1=0.892273 129 | model-64000, F1=0.902395 130 | model-64500, F1=0.899343 131 | model-65000, F1=0.908293 132 | model-65500, F1=0.887099 133 | model-66000, F1=0.902804 134 | model-66500, F1=0.901224 135 | model-67000, F1=0.908449 136 | model-67500, F1=0.898841 137 | model-68000, F1=0.904988 138 | model-68500, F1=0.907865 139 | model-69000, F1=0.906136 140 | model-69500, F1=0.906927 141 | model-70000, F1=0.882088 142 | model-70500, F1=0.904660 143 | model-71000, F1=0.908277 144 | model-71500, F1=0.901279 145 | model-72000, F1=0.897170 146 | model-72500, F1=0.893189 147 | model-73000, F1=0.905586 148 | model-73500, F1=0.901568 149 | model-74000, F1=0.906731 150 | model-74500, F1=0.899590 151 | model-75000, F1=0.903950 152 | model-75500, F1=0.893038 153 | model-76000, F1=0.907115 154 | model-76500, F1=0.898732 155 | model-77000, F1=0.903726 156 | model-77500, F1=0.903835 157 | model-78000, F1=0.894493 158 | model-78500, F1=0.903668 159 | model-79000, F1=0.907674 160 | model-79500, F1=0.906642 161 | model-80000, F1=0.903553 162 | model-80500, F1=0.896321 163 | model-81000, F1=0.906191 164 | model-81500, F1=0.907724 165 | model-82000, F1=0.885405 166 | model-82500, F1=0.901515 167 | model-83000, F1=0.898441 168 | model-83500, F1=0.891830 169 | model-84000, F1=0.910918 170 | model-84500, F1=0.897221 171 | model-85000, F1=0.903263 172 | model-85500, F1=0.907357 173 | model-86000, F1=0.900900 174 | model-86500, F1=0.898898 175 | model-87000, F1=0.907334 176 | model-87500, F1=0.913518 177 | model-88000, F1=0.914221 178 | model-88500, F1=0.910858 179 | model-89000, F1=0.899486 180 | model-89500, F1=0.908140 181 | model-90000, F1=0.907325 182 | model-90500, F1=0.891564 183 | model-91000, F1=0.901771 184 | model-91500, F1=0.896865 185 | model-92000, F1=0.909588 186 | model-92500, F1=0.903009 187 | model-93000, F1=0.898630 188 | model-93500, F1=0.909070 189 | model-94000, F1=0.906356 190 | model-94500, F1=0.909479 191 | model-95000, F1=0.904178 192 | model-95500, F1=0.912348 193 | model-96000, F1=0.902002 194 | model-96500, F1=0.910247 195 | model-97000, F1=0.902901 196 | model-97500, F1=0.906205 197 | model-98000, F1=0.899345 198 | model-98500, F1=0.899905 199 | model-99000, F1=0.902022 200 | model-99500, F1=0.904681 201 | model-100000, F1=0.906446 202 | model-100500, F1=0.907891 203 | model-101000, F1=0.910300 204 | model-101500, F1=0.913668 205 | model-102000, F1=0.913545 206 | model-102500, F1=0.910940 207 | model-103000, F1=0.904561 208 | model-103500, F1=0.901391 209 | model-104000, F1=0.911612 210 | model-104500, F1=0.890472 211 | model-105000, F1=0.909495 212 | model-105500, F1=0.910659 213 | model-106000, F1=0.906580 214 | model-106500, F1=0.901213 215 | model-107000, F1=0.904955 216 | model-107500, F1=0.909704 217 | model-108000, F1=0.905711 218 | model-108500, F1=0.911871 219 | model-109000, F1=0.910012 220 | model-109500, F1=0.903267 221 | model-110000, F1=0.907794 222 | model-110500, F1=0.907015 223 | model-111000, F1=0.904926 224 | model-111500, F1=0.910157 225 | model-112000, F1=0.908774 226 | model-112500, F1=0.901256 227 | model-113000, F1=0.906629 228 | model-113500, F1=0.911202 229 | model-114000, F1=0.914782 230 | model-114500, F1=0.909787 231 | model-115000, F1=0.911527 232 | model-115500, F1=0.897285 233 | model-116000, F1=0.906924 234 | model-116500, F1=0.914525 235 | model-117000, F1=0.905704 236 | model-117500, F1=0.905920 237 | model-118000, F1=0.913797 238 | model-118500, F1=0.900041 239 | model-119000, F1=0.900471 240 | model-119500, F1=0.907736 241 | model-120000, F1=0.904388 242 | model-120500, F1=0.900865 243 | model-121000, F1=0.907028 244 | model-121500, F1=0.907616 245 | model-122000, F1=0.903368 246 | model-122500, F1=0.905301 247 | model-123000, F1=0.885109 248 | model-123500, F1=0.895346 249 | model-124000, F1=0.908967 250 | model-124500, F1=0.896468 251 | model-125000, F1=0.901389 252 | model-125500, F1=0.912316 253 | model-126000, F1=0.905206 254 | model-126500, F1=0.904833 255 | model-127000, F1=0.909819 256 | model-127500, F1=0.903353 257 | model-128000, F1=0.907110 258 | model-128500, F1=0.909243 259 | model-129000, F1=0.911634 260 | model-129500, F1=0.904345 261 | model-130000, F1=0.908042 262 | model-130500, F1=0.906865 263 | model-131000, F1=0.903411 264 | model-131500, F1=0.912042 265 | model-132000, F1=0.909527 266 | model-132500, F1=0.908027 267 | model-133000, F1=0.906189 268 | model-133500, F1=0.900791 269 | model-134000, F1=0.901667 270 | model-134500, F1=0.906211 271 | model-135000, F1=0.901950 272 | model-135500, F1=0.905419 273 | model-136000, F1=0.910290 274 | model-136500, F1=0.907374 275 | model-137000, F1=0.901193 276 | model-137500, F1=0.903465 277 | model-138000, F1=0.908559 278 | model-138500, F1=0.909840 279 | model-139000, F1=0.902792 280 | model-139500, F1=0.905860 281 | model-140000, F1=0.899234 282 | model-140500, F1=0.907410 283 | model-141000, F1=0.907815 284 | model-141500, F1=0.904792 285 | model-142000, F1=0.908075 286 | model-142500, F1=0.903583 287 | model-143000, F1=0.904560 288 | model-143500, F1=0.901751 289 | model-144000, F1=0.895834 290 | model-144500, F1=0.901678 291 | model-145000, F1=0.906004 292 | model-145500, F1=0.898729 293 | model-146000, F1=0.910474 294 | model-146500, F1=0.906813 295 | model-147000, F1=0.904512 296 | model-147500, F1=0.908950 297 | model-148000, F1=0.906605 298 | model-148500, F1=0.904818 299 | model-149000, F1=0.900442 300 | model-149500, F1=0.906949 301 | model-150000, F1=0.906717 302 | model-150500, F1=0.905338 303 | model-151000, F1=0.897171 304 | model-151500, F1=0.908095 305 | model-152000, F1=0.899476 306 | model-152500, F1=0.908368 307 | model-153000, F1=0.902387 308 | model-153500, F1=0.907685 309 | model-154000, F1=0.910460 310 | model-154500, F1=0.906345 311 | model-155000, F1=0.903924 312 | model-155500, F1=0.907711 313 | model-156000, F1=0.910026 314 | model-156500, F1=0.907385 315 | model-157000, F1=0.906842 316 | model-157500, F1=0.904784 317 | model-158000, F1=0.896781 318 | model-158500, F1=0.909685 319 | model-159000, F1=0.908668 320 | model-159500, F1=0.902411 321 | model-160000, F1=0.902794 322 | model-160500, F1=0.910769 323 | model-161000, F1=0.903785 324 | model-161500, F1=0.911949 325 | model-162000, F1=0.907871 326 | model-162500, F1=0.911034 327 | model-163000, F1=0.903297 328 | model-163500, F1=0.909169 329 | model-164000, F1=0.911063 330 | model-164500, F1=0.907280 331 | model-165000, F1=0.911874 332 | model-165500, F1=0.907127 333 | model-166000, F1=0.902621 334 | model-166500, F1=0.906897 335 | model-167000, F1=0.901806 336 | model-167500, F1=0.910203 337 | model-168000, F1=0.907350 338 | model-168500, F1=0.909735 339 | model-169000, F1=0.907942 340 | model-169500, F1=0.904142 341 | model-170000, F1=0.905212 342 | model-170500, F1=0.910163 343 | model-171000, F1=0.907655 344 | model-171500, F1=0.908445 345 | model-172000, F1=0.908687 346 | model-172500, F1=0.909901 347 | model-173000, F1=0.911480 348 | model-173500, F1=0.911986 349 | model-174000, F1=0.910733 350 | model-174500, F1=0.905713 351 | model-175000, F1=0.911722 352 | model-175500, F1=0.907461 353 | model-176000, F1=0.903762 354 | model-176500, F1=0.906678 355 | model-177000, F1=0.908482 356 | model-177500, F1=0.906561 357 | model-178000, F1=0.909224 358 | model-178500, F1=0.906325 359 | model-179000, F1=0.909849 360 | model-179500, F1=0.906082 361 | model-180000, F1=0.906958 362 | model-180500, F1=0.909013 363 | model-181000, F1=0.912206 364 | model-181500, F1=0.910269 365 | model-182000, F1=0.912286 366 | model-182500, F1=0.906388 367 | model-183000, F1=0.911028 368 | model-183500, F1=0.911394 369 | model-184000, F1=0.909132 370 | model-184500, F1=0.908070 371 | model-185000, F1=0.912146 372 | model-185500, F1=0.911897 373 | model-186000, F1=0.910571 374 | model-186500, F1=0.910183 375 | model-187000, F1=0.905452 376 | model-187500, F1=0.910467 377 | model-188000, F1=0.907359 378 | model-188500, F1=0.909035 379 | model-189000, F1=0.912186 380 | model-189500, F1=0.908374 381 | model-190000, F1=0.909637 382 | model-190500, F1=0.911524 383 | model-191000, F1=0.909609 384 | model-191500, F1=0.909738 385 | model-192000, F1=0.910535 386 | model-192500, F1=0.911011 387 | model-193000, F1=0.910553 388 | model-193500, F1=0.911173 389 | model-194000, F1=0.909536 390 | model-194500, F1=0.909370 391 | model-195000, F1=0.911200 392 | model-195500, F1=0.910635 393 | model-196000, F1=0.910389 394 | model-196500, F1=0.910183 395 | model-197000, F1=0.910362 396 | model-197500, F1=0.909446 397 | model-198000, F1=0.911092 398 | model-198500, F1=0.911131 399 | model-199000, F1=0.910602 400 | model-199500, F1=0.910254 401 | model-200000, F1=0.910211 402 | -------------------------------------------------------------------------------- /logs/mitor2h/valid.txt: -------------------------------------------------------------------------------- 1 | model-1, F1=0.118753 2 | model-500, F1=0.648005 3 | model-1000, F1=0.737206 4 | model-1500, F1=0.684702 5 | model-2000, F1=0.755597 6 | model-2500, F1=0.781379 7 | model-3000, F1=0.637741 8 | model-3500, F1=0.757590 9 | model-4000, F1=0.782989 10 | model-4500, F1=0.778304 11 | model-5000, F1=0.701538 12 | model-5500, F1=0.798416 13 | model-6000, F1=0.805298 14 | model-6500, F1=0.800642 15 | model-7000, F1=0.836052 16 | model-7500, F1=0.843923 17 | model-8000, F1=0.826717 18 | model-8500, F1=0.805132 19 | model-9000, F1=0.788449 20 | model-9500, F1=0.806226 21 | model-10000, F1=0.811304 22 | model-10500, F1=0.732171 23 | model-11000, F1=0.839811 24 | model-11500, F1=0.809285 25 | model-12000, F1=0.826254 26 | model-12500, F1=0.815004 27 | model-13000, F1=0.808367 28 | model-13500, F1=0.793709 29 | model-14000, F1=0.753661 30 | model-14500, F1=0.711671 31 | model-15000, F1=0.740057 32 | model-15500, F1=0.783489 33 | model-16000, F1=0.836221 34 | model-16500, F1=0.790822 35 | model-17000, F1=0.814266 36 | model-17500, F1=0.809355 37 | model-18000, F1=0.821638 38 | model-18500, F1=0.784106 39 | model-19000, F1=0.812189 40 | model-19500, F1=0.827447 41 | model-20000, F1=0.734190 42 | model-20500, F1=0.813084 43 | model-21000, F1=0.786057 44 | model-21500, F1=0.797790 45 | model-22000, F1=0.819665 46 | model-22500, F1=0.817588 47 | model-23000, F1=0.819377 48 | model-23500, F1=0.793716 49 | model-24000, F1=0.778332 50 | model-24500, F1=0.817763 51 | model-25000, F1=0.774270 52 | model-25500, F1=0.825070 53 | model-26000, F1=0.831159 54 | model-26500, F1=0.824178 55 | model-27000, F1=0.766064 56 | model-27500, F1=0.839226 57 | model-28000, F1=0.780388 58 | model-28500, F1=0.824167 59 | model-29000, F1=0.785164 60 | model-29500, F1=0.826640 61 | model-30000, F1=0.832511 62 | model-30500, F1=0.821630 63 | model-31000, F1=0.834174 64 | model-31500, F1=0.815993 65 | model-32000, F1=0.803619 66 | model-32500, F1=0.818478 67 | model-33000, F1=0.849298 68 | model-33500, F1=0.811819 69 | model-34000, F1=0.820826 70 | model-34500, F1=0.848442 71 | model-35000, F1=0.788345 72 | model-35500, F1=0.811267 73 | model-36000, F1=0.801624 74 | model-36500, F1=0.837233 75 | model-37000, F1=0.769339 76 | model-37500, F1=0.830885 77 | model-38000, F1=0.799361 78 | model-38500, F1=0.799536 79 | model-39000, F1=0.839329 80 | model-39500, F1=0.826113 81 | model-40000, F1=0.843869 82 | model-40500, F1=0.830722 83 | model-41000, F1=0.824721 84 | model-41500, F1=0.817969 85 | model-42000, F1=0.843912 86 | model-42500, F1=0.801935 87 | model-43000, F1=0.821694 88 | model-43500, F1=0.838855 89 | model-44000, F1=0.775513 90 | model-44500, F1=0.830518 91 | model-45000, F1=0.621025 92 | model-45500, F1=0.806798 93 | model-46000, F1=0.842446 94 | model-46500, F1=0.822388 95 | model-47000, F1=0.825475 96 | model-47500, F1=0.824240 97 | model-48000, F1=0.821825 98 | model-48500, F1=0.825978 99 | model-49000, F1=0.803402 100 | model-49500, F1=0.821359 101 | model-50000, F1=0.818584 102 | model-50500, F1=0.821678 103 | model-51000, F1=0.819689 104 | model-51500, F1=0.819429 105 | model-52000, F1=0.763564 106 | model-52500, F1=0.838585 107 | model-53000, F1=0.844932 108 | model-53500, F1=0.850853 109 | model-54000, F1=0.821346 110 | model-54500, F1=0.803209 111 | model-55000, F1=0.826121 112 | model-55500, F1=0.836860 113 | model-56000, F1=0.814526 114 | model-56500, F1=0.808176 115 | model-57000, F1=0.787583 116 | model-57500, F1=0.806341 117 | model-58000, F1=0.832654 118 | model-58500, F1=0.773546 119 | model-59000, F1=0.821371 120 | model-59500, F1=0.767069 121 | model-60000, F1=0.800361 122 | model-60500, F1=0.804174 123 | model-61000, F1=0.777507 124 | model-61500, F1=0.826883 125 | model-62000, F1=0.829701 126 | model-62500, F1=0.799164 127 | model-63000, F1=0.807427 128 | model-63500, F1=0.804535 129 | model-64000, F1=0.830561 130 | model-64500, F1=0.832451 131 | model-65000, F1=0.845884 132 | model-65500, F1=0.805454 133 | model-66000, F1=0.819261 134 | model-66500, F1=0.832951 135 | model-67000, F1=0.839597 136 | model-67500, F1=0.839404 137 | model-68000, F1=0.802194 138 | model-68500, F1=0.849116 139 | model-69000, F1=0.826818 140 | model-69500, F1=0.842220 141 | model-70000, F1=0.790240 142 | model-70500, F1=0.825574 143 | model-71000, F1=0.842927 144 | model-71500, F1=0.835866 145 | model-72000, F1=0.831791 146 | model-72500, F1=0.830713 147 | model-73000, F1=0.818610 148 | model-73500, F1=0.800427 149 | model-74000, F1=0.813338 150 | model-74500, F1=0.780626 151 | model-75000, F1=0.845921 152 | model-75500, F1=0.837127 153 | model-76000, F1=0.835485 154 | model-76500, F1=0.824526 155 | model-77000, F1=0.850828 156 | model-77500, F1=0.841938 157 | model-78000, F1=0.807763 158 | model-78500, F1=0.801235 159 | model-79000, F1=0.817374 160 | model-79500, F1=0.838667 161 | model-80000, F1=0.842367 162 | model-80500, F1=0.857641 163 | model-81000, F1=0.838100 164 | model-81500, F1=0.843258 165 | model-82000, F1=0.774572 166 | model-82500, F1=0.831994 167 | model-83000, F1=0.803690 168 | model-83500, F1=0.802612 169 | model-84000, F1=0.805686 170 | model-84500, F1=0.820574 171 | model-85000, F1=0.836076 172 | model-85500, F1=0.843523 173 | model-86000, F1=0.805196 174 | model-86500, F1=0.833724 175 | model-87000, F1=0.838458 176 | model-87500, F1=0.832815 177 | model-88000, F1=0.823854 178 | model-88500, F1=0.819582 179 | model-89000, F1=0.821085 180 | model-89500, F1=0.846441 181 | model-90000, F1=0.832257 182 | model-90500, F1=0.844726 183 | model-91000, F1=0.835205 184 | model-91500, F1=0.812407 185 | model-92000, F1=0.799395 186 | model-92500, F1=0.801366 187 | model-93000, F1=0.852555 188 | model-93500, F1=0.834293 189 | model-94000, F1=0.814162 190 | model-94500, F1=0.830107 191 | model-95000, F1=0.802481 192 | model-95500, F1=0.858709 193 | model-96000, F1=0.807565 194 | model-96500, F1=0.772761 195 | model-97000, F1=0.832729 196 | model-97500, F1=0.814160 197 | model-98000, F1=0.824483 198 | model-98500, F1=0.847008 199 | model-99000, F1=0.812389 200 | model-99500, F1=0.840109 201 | model-100000, F1=0.801762 202 | model-100500, F1=0.809584 203 | model-101000, F1=0.830808 204 | model-101500, F1=0.805836 205 | model-102000, F1=0.811061 206 | model-102500, F1=0.820224 207 | model-103000, F1=0.852490 208 | model-103500, F1=0.835090 209 | model-104000, F1=0.826645 210 | model-104500, F1=0.813421 211 | model-105000, F1=0.793647 212 | model-105500, F1=0.828824 213 | model-106000, F1=0.804500 214 | model-106500, F1=0.829446 215 | model-107000, F1=0.800423 216 | model-107500, F1=0.846238 217 | model-108000, F1=0.837304 218 | model-108500, F1=0.829648 219 | model-109000, F1=0.820212 220 | model-109500, F1=0.831995 221 | model-110000, F1=0.834401 222 | model-110500, F1=0.799285 223 | model-111000, F1=0.827904 224 | model-111500, F1=0.853108 225 | model-112000, F1=0.813695 226 | model-112500, F1=0.841802 227 | model-113000, F1=0.843367 228 | model-113500, F1=0.823584 229 | model-114000, F1=0.827203 230 | model-114500, F1=0.811048 231 | model-115000, F1=0.816365 232 | model-115500, F1=0.837627 233 | model-116000, F1=0.838633 234 | model-116500, F1=0.843313 235 | model-117000, F1=0.838974 236 | model-117500, F1=0.833903 237 | model-118000, F1=0.859484 238 | model-118500, F1=0.844388 239 | model-119000, F1=0.796401 240 | model-119500, F1=0.777987 241 | model-120000, F1=0.831998 242 | model-120500, F1=0.817144 243 | model-121000, F1=0.832931 244 | model-121500, F1=0.838063 245 | model-122000, F1=0.856489 246 | model-122500, F1=0.819761 247 | model-123000, F1=0.814140 248 | model-123500, F1=0.805624 249 | model-124000, F1=0.826702 250 | model-124500, F1=0.809847 251 | model-125000, F1=0.857646 252 | model-125500, F1=0.816608 253 | model-126000, F1=0.833572 254 | model-126500, F1=0.802217 255 | model-127000, F1=0.828576 256 | model-127500, F1=0.847288 257 | model-128000, F1=0.839736 258 | model-128500, F1=0.842066 259 | model-129000, F1=0.846781 260 | model-129500, F1=0.789422 261 | model-130000, F1=0.829700 262 | model-130500, F1=0.823999 263 | model-131000, F1=0.825772 264 | model-131500, F1=0.812650 265 | model-132000, F1=0.784175 266 | model-132500, F1=0.833679 267 | model-133000, F1=0.859857 268 | model-133500, F1=0.815701 269 | model-134000, F1=0.827978 270 | model-134500, F1=0.851222 271 | model-135000, F1=0.833681 272 | model-135500, F1=0.845779 273 | model-136000, F1=0.822837 274 | model-136500, F1=0.836330 275 | model-137000, F1=0.830961 276 | model-137500, F1=0.810988 277 | model-138000, F1=0.798386 278 | model-138500, F1=0.822547 279 | model-139000, F1=0.849336 280 | model-139500, F1=0.858331 281 | model-140000, F1=0.813040 282 | model-140500, F1=0.846806 283 | model-141000, F1=0.832068 284 | model-141500, F1=0.844225 285 | model-142000, F1=0.837429 286 | model-142500, F1=0.805568 287 | model-143000, F1=0.833556 288 | model-143500, F1=0.853132 289 | model-144000, F1=0.833598 290 | model-144500, F1=0.825956 291 | model-145000, F1=0.827578 292 | model-145500, F1=0.829072 293 | model-146000, F1=0.850305 294 | model-146500, F1=0.825653 295 | model-147000, F1=0.844544 296 | model-147500, F1=0.840674 297 | model-148000, F1=0.835713 298 | model-148500, F1=0.850072 299 | model-149000, F1=0.837974 300 | model-149500, F1=0.821907 301 | model-150000, F1=0.853275 302 | model-150500, F1=0.825029 303 | model-151000, F1=0.853956 304 | model-151500, F1=0.839591 305 | model-152000, F1=0.843267 306 | model-152500, F1=0.850919 307 | model-153000, F1=0.861352 308 | model-153500, F1=0.859569 309 | model-154000, F1=0.850604 310 | model-154500, F1=0.844556 311 | model-155000, F1=0.843529 312 | model-155500, F1=0.855038 313 | model-156000, F1=0.852148 314 | model-156500, F1=0.854638 315 | model-157000, F1=0.839759 316 | model-157500, F1=0.860942 317 | model-158000, F1=0.857513 318 | model-158500, F1=0.858326 319 | model-159000, F1=0.868555 320 | model-159500, F1=0.845170 321 | model-160000, F1=0.849999 322 | model-160500, F1=0.819306 323 | model-161000, F1=0.849969 324 | model-161500, F1=0.844825 325 | model-162000, F1=0.834272 326 | model-162500, F1=0.831393 327 | model-163000, F1=0.848509 328 | model-163500, F1=0.856107 329 | model-164000, F1=0.859925 330 | model-164500, F1=0.844554 331 | model-165000, F1=0.832045 332 | model-165500, F1=0.838899 333 | model-166000, F1=0.841369 334 | model-166500, F1=0.854808 335 | model-167000, F1=0.828304 336 | model-167500, F1=0.840189 337 | model-168000, F1=0.859960 338 | model-168500, F1=0.849525 339 | model-169000, F1=0.858631 340 | model-169500, F1=0.853106 341 | model-170000, F1=0.854655 342 | model-170500, F1=0.852107 343 | model-171000, F1=0.852821 344 | model-171500, F1=0.837687 345 | model-172000, F1=0.846739 346 | model-172500, F1=0.850207 347 | model-173000, F1=0.873426 348 | model-173500, F1=0.858330 349 | model-174000, F1=0.860424 350 | model-174500, F1=0.846004 351 | model-175000, F1=0.848893 352 | model-175500, F1=0.852221 353 | model-176000, F1=0.855692 354 | model-176500, F1=0.856565 355 | model-177000, F1=0.848803 356 | model-177500, F1=0.834743 357 | model-178000, F1=0.859321 358 | model-178500, F1=0.853701 359 | model-179000, F1=0.857127 360 | model-179500, F1=0.839078 361 | model-180000, F1=0.859471 362 | model-180500, F1=0.863859 363 | model-181000, F1=0.849971 364 | model-181500, F1=0.855549 365 | model-182000, F1=0.851765 366 | model-182500, F1=0.850832 367 | model-183000, F1=0.859330 368 | model-183500, F1=0.864485 369 | model-184000, F1=0.861109 370 | model-184500, F1=0.853818 371 | model-185000, F1=0.854540 372 | model-185500, F1=0.850520 373 | model-186000, F1=0.847279 374 | model-186500, F1=0.840304 375 | model-187000, F1=0.851260 376 | model-187500, F1=0.853604 377 | model-188000, F1=0.855204 378 | model-188500, F1=0.853799 379 | model-189000, F1=0.850643 380 | model-189500, F1=0.842167 381 | model-190000, F1=0.837294 382 | model-190500, F1=0.840624 383 | model-191000, F1=0.845661 384 | model-191500, F1=0.843604 385 | model-192000, F1=0.851697 386 | model-192500, F1=0.844468 387 | model-193000, F1=0.847193 388 | model-193500, F1=0.841244 389 | model-194000, F1=0.845478 390 | model-194500, F1=0.846914 391 | model-195000, F1=0.843940 392 | model-195500, F1=0.845283 393 | model-196000, F1=0.840671 394 | model-196500, F1=0.844417 395 | model-197000, F1=0.847023 396 | model-197500, F1=0.850731 397 | model-198000, F1=0.845653 398 | model-198500, F1=0.848107 399 | model-199000, F1=0.850237 400 | model-199500, F1=0.850259 401 | model-200000, F1=0.849481 402 | -------------------------------------------------------------------------------- /scripts/utils/pre_processing.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from scipy.ndimage.interpolation import map_coordinates 3 | from scipy.ndimage.filters import gaussian_filter 4 | from random import randint 5 | 6 | 7 | def add_elastic_transform(image, alpha, sigma, pad_size=30, seed=None): 8 | """ 9 | Args: 10 | image : numpy array of image 11 | alpha : α is a scaling factor 12 | sigma : σ is an elasticity coefficient 13 | random_state = random integer 14 | Return : 15 | image : elastically transformed numpy array of image 16 | """ 17 | image_height = int(image.shape[0]) 18 | image_width = int(image.shape[1]) 19 | image = np.pad(image, pad_size, mode="symmetric") 20 | if seed is None: 21 | seed = randint(1, 100) 22 | random_state = np.random.RandomState(seed) 23 | else: 24 | random_state = np.random.RandomState(seed) 25 | shape = image.shape 26 | dx = gaussian_filter((random_state.rand(*shape) * 2 - 1), 27 | sigma, mode="constant", cval=0) * alpha 28 | dy = gaussian_filter((random_state.rand(*shape) * 2 - 1), 29 | sigma, mode="constant", cval=0) * alpha 30 | 31 | x, y = np.meshgrid(np.arange(shape[1]), np.arange(shape[0])) 32 | indices = np.reshape(y+dy, (-1, 1)), np.reshape(x+dx, (-1, 1)) 33 | return cropping(map_coordinates(image, indices, order=1).reshape(shape), 34 | image_height,image_width, pad_size, pad_size), seed 35 | 36 | 37 | def flip(image, option_value): 38 | """ 39 | Args: 40 | image : numpy array of image 41 | option_value = random integer between 0 to 3 42 | Return : 43 | image : numpy array of flipped image 44 | """ 45 | if option_value == 0: 46 | # vertical 47 | image = np.flip(image, option_value) 48 | elif option_value == 1: 49 | # horizontal 50 | image = np.flip(image, option_value) 51 | elif option_value == 2: 52 | # horizontally and vertically flip 53 | image = np.flip(image, 0) 54 | image = np.flip(image, 1) 55 | else: 56 | image = image 57 | # no effect 58 | return image 59 | 60 | 61 | def add_gaussian_noise(image, mean=0, std=1): 62 | """ 63 | Args: 64 | image : numpy array of image 65 | mean : pixel mean of image 66 | standard deviation : pixel standard deviation of image 67 | Return : 68 | image : numpy array of image with gaussian noise added 69 | """ 70 | gaus_noise = np.random.normal(mean, std, image.shape) 71 | image = image.astype("int16") 72 | noise_img = image + gaus_noise 73 | image = ceil_floor_image(image) 74 | return noise_img 75 | 76 | 77 | def add_uniform_noise(image, low=-10, high=10): 78 | """ 79 | Args: 80 | image : numpy array of image 81 | low : lower boundary of output interval 82 | high : upper boundary of output interval 83 | Return : 84 | image : numpy array of image with uniform noise added 85 | """ 86 | uni_noise = np.random.uniform(low, high, image.shape) 87 | image = image.astype("int16") 88 | noise_img = image + uni_noise 89 | image = ceil_floor_image(image) 90 | return noise_img 91 | 92 | 93 | def change_brightness(image, value): 94 | """ 95 | Args: 96 | image : numpy array of image 97 | value : brightness 98 | Return : 99 | image : numpy array of image with brightness added 100 | """ 101 | image = image.astype("int16") 102 | image = image + value 103 | image = ceil_floor_image(image) 104 | return image 105 | 106 | 107 | def ceil_floor_image(image): 108 | """ 109 | Args: 110 | image : numpy array of image in datatype int16 111 | Return : 112 | image : numpy array of image in datatype uint8 with ceilling(maximum 255) and flooring(minimum 0) 113 | """ 114 | image[image > 255] = 255 115 | image[image < 0] = 0 116 | image = image.astype("uint8") 117 | return image 118 | 119 | 120 | def approximate_image(image): 121 | """ 122 | Args: 123 | image : numpy array of image in datatype int16 124 | Return : 125 | image : numpy array of image in datatype uint8 only with 255 and 0 126 | """ 127 | image[image > 127.5] = 255 128 | image[image < 127.5] = 0 129 | image = image.astype("uint8") 130 | return image 131 | 132 | 133 | def normalization1(image, mean, std): 134 | """ Normalization using mean and std 135 | Args : 136 | image : numpy array of image 137 | mean : 138 | Return : 139 | image : numpy array of image with values turned into standard scores 140 | """ 141 | 142 | image = image / 255 # values will lie between 0 and 1. 143 | image = (image - mean) / std 144 | 145 | return image 146 | 147 | 148 | def normalization2(image, max, min): 149 | """Normalization to range of [min, max] 150 | Args : 151 | image : numpy array of image 152 | mean : 153 | Return : 154 | image : numpy array of image with values turned into standard scores 155 | """ 156 | image_new = (image - np.min(image))*(max - min)/(np.max(image)-np.min(image)) + min 157 | return image_new 158 | 159 | 160 | def stride_size(image_len, crop_num, crop_size): 161 | """return stride size 162 | Args : 163 | image_len(int) : length of one size of image (width or height) 164 | crop_num(int) : number of crop in certain direction 165 | crop_size(int) : size of crop 166 | Return : 167 | stride_size(int) : stride size 168 | """ 169 | if crop_num == 1: 170 | stride = 0 171 | else: 172 | stride = int((image_len - crop_size) / (crop_num - 1)) 173 | return stride 174 | 175 | 176 | def multi_cropping(image, crop_size, crop_num1, crop_num2): 177 | """crop the image and pad it to in_size 178 | Args : 179 | images : numpy arrays of images 180 | crop_size(int) : size of cropped image 181 | crop_num2 (int) : number of crop in horizontal way 182 | crop_num1 (int) : number of crop in vertical way 183 | Return : 184 | cropped_imgs : numpy arrays of stacked images 185 | """ 186 | 187 | img_height, img_width = image.shape[0], image.shape[1] 188 | assert crop_size*crop_num1 >= img_height and crop_size * \ 189 | crop_num2 >= img_width, "Whole image cannot be sufficiently expressed" 190 | assert crop_num1 <= img_height - crop_size + 1 and crop_num2 <= img_width - \ 191 | crop_size + 1, "Too many number of crops" 192 | 193 | cropped_imgs = [] 194 | # int((img_height - crop_size)/(crop_num1 - 1)) 195 | dim1_stride = stride_size(img_height, crop_num1, crop_size) 196 | # int((img_width - crop_size)/(crop_num2 - 1)) 197 | dim2_stride = stride_size(img_width, crop_num2, crop_size) 198 | for i in range(crop_num1): 199 | for j in range(crop_num2): 200 | cropped_imgs.append(cropping(image, crop_size,crop_size, 201 | dim1_stride*i, dim2_stride*j)) 202 | return np.asarray(cropped_imgs) 203 | 204 | 205 | # IT IS NOT USED FOR PAD AND CROP DATA OPERATION 206 | # IF YOU WANT TO USE CROP AND PAD USE THIS FUNCTION 207 | """ 208 | def multi_padding(images, in_size, out_size, mode): 209 | '''Pad the images to in_size 210 | Args : 211 | images : numpy array of images (CxHxW) 212 | in_size(int) : the input_size of model (512) 213 | out_size(int) : the output_size of model (388) 214 | mode(str) : mode of padding 215 | Return : 216 | padded_imgs: numpy arrays of padded images 217 | ''' 218 | pad_size = int((in_size - out_size)/2) 219 | padded_imgs = [] 220 | for num in range(images.shape[0]): 221 | padded_imgs.append(add_padding(images[num], in_size, out_size, mode=mode)) 222 | return np.asarray(padded_imgs) 223 | 224 | """ 225 | 226 | 227 | def cropping(image, y, x, dim1, dim2): 228 | """crop the image and pad it to in_size 229 | Args : 230 | images : numpy array of images 231 | crop_size(int) : size of cropped image 232 | dim1(int) : vertical location of crop 233 | dim2(int) : horizontal location of crop 234 | Return : 235 | cropped_img: numpy array of cropped image 236 | """ 237 | 238 | cropped_img = image[dim1:dim1+y, dim2:dim2+x] 239 | return cropped_img 240 | 241 | 242 | def add_padding(image, in_size, out_size, mode): 243 | """Pad the image to in_size 244 | Args : 245 | images : numpy array of images 246 | in_size(int) : the input_size of model 247 | out_size(int) : the output_size of model 248 | mode(str) : mode of padding 249 | Return : 250 | padded_img: numpy array of padded image 251 | """ 252 | pad_size = int((in_size - out_size)/2) 253 | padded_img = np.pad(image, pad_size, mode=mode) 254 | return padded_img 255 | 256 | 257 | def division_array(crop_size, crop_num1, crop_num2, dim1, dim2): 258 | """Make division array 259 | Args : 260 | crop_size(int) : size of cropped image 261 | crop_num2 (int) : number of crop in horizontal way 262 | crop_num1 (int) : number of crop in vertical way 263 | dim1(int) : vertical size of output 264 | dim2(int) : horizontal size_of_output 265 | Return : 266 | div_array : numpy array of numbers of 1,2,4 267 | """ 268 | div_array = np.zeros([dim1, dim2]) # make division array 269 | one_array = np.ones([crop_size, crop_size]) # one array to be added to div_array 270 | dim1_stride = stride_size(dim1, crop_num1, crop_size) # vertical stride 271 | dim2_stride = stride_size(dim2, crop_num2, crop_size) # horizontal stride 272 | for i in range(crop_num1): 273 | for j in range(crop_num2): 274 | # add ones to div_array at specific position 275 | div_array[dim1_stride*i:dim1_stride*i + crop_size, 276 | dim2_stride*j:dim2_stride*j + crop_size] += one_array 277 | return div_array 278 | 279 | 280 | def image_concatenate(image, crop_num1, crop_num2, dim1, dim2): 281 | """concatenate images 282 | Args : 283 | image : output images (should be square) 284 | crop_num2 (int) : number of crop in horizontal way (2) 285 | crop_num1 (int) : number of crop in vertical way (2) 286 | dim1(int) : vertical size of output (512) 287 | dim2(int) : horizontal size_of_output (512) 288 | Return : 289 | div_array : numpy arrays of numbers of 1,2,4 290 | """ 291 | crop_size = image.shape[1] # size of crop 292 | empty_array = np.zeros([dim1, dim2]).astype("float64") # to make sure no overflow 293 | dim1_stride = stride_size(dim1, crop_num1, crop_size) # vertical stride 294 | dim2_stride = stride_size(dim2, crop_num2, crop_size) # horizontal stride 295 | index = 0 296 | for i in range(crop_num1): 297 | for j in range(crop_num2): 298 | # add image to empty_array at specific position 299 | empty_array[dim1_stride*i:dim1_stride*i + crop_size, 300 | dim2_stride*j:dim2_stride*j + crop_size] += image[index] 301 | index += 1 302 | return empty_array 303 | 304 | 305 | if __name__ == "__main__": 306 | from PIL import Image 307 | 308 | b = Image.open("../data/train/images/14.png") 309 | c = Image.open("../data/train/masks/14.png") 310 | 311 | original = np.array(b) 312 | originall = np.array(c) 313 | original_norm = normalization2(original, max=1, min=0) 314 | print(original_norm) 315 | 316 | b = Image.open("../readme_images/original.png") 317 | original = np.array(b) 318 | """ 319 | original1 = add_gaussian_noise(original, 0, 100) 320 | original1 = Image.fromarray(original1) 321 | original1.show() 322 | """ 323 | original1 = add_uniform_noise(original, -100, 100) 324 | original1 = Image.fromarray(original1) 325 | original1.show() 326 | """ 327 | original1 = change_brightness(original, 50) 328 | original1 = Image.fromarray(original1) 329 | original1.show() 330 | original1 = add_elastic_transform(original, 10, 4, 1)[0] 331 | original1 = Image.fromarray(original1) 332 | original1.show() 333 | """ 334 | -------------------------------------------------------------------------------- /scripts/model/advanced_model.py: -------------------------------------------------------------------------------- 1 | # full assembly of the sub-parts to form the complete net 2 | import torch 3 | import torch.nn as nn 4 | import torch.nn.functional as F 5 | 6 | from torch.autograd import Variable 7 | import numpy as np 8 | from PIL import Image 9 | from torch.nn.functional import sigmoid 10 | 11 | class Double_conv(nn.Module): 12 | '''(conv => ReLU) * 2 => MaxPool2d''' 13 | 14 | def __init__(self, in_ch, out_ch): 15 | """ 16 | Args: 17 | in_ch(int) : input channel 18 | out_ch(int) : output channel 19 | """ 20 | super(Double_conv, self).__init__() 21 | self.conv = nn.Sequential( 22 | nn.Conv2d(in_ch, out_ch, 3, padding=1, stride=1), 23 | # nn.InstanceNorm2d(out_ch), 24 | nn.GroupNorm(num_groups=32, num_channels=out_ch, eps=1e-5,affine=False), 25 | nn.ReLU(inplace=True), 26 | nn.Conv2d(out_ch, out_ch, 3, padding=1, stride=1), 27 | nn.GroupNorm(num_groups=32, num_channels=out_ch, eps=1e-5,affine=False), 28 | # nn.InstanceNorm2d(out_ch), 29 | nn.ReLU(inplace=True) 30 | ) 31 | 32 | def forward(self, x): 33 | x = self.conv(x) 34 | return x 35 | 36 | 37 | class Conv_down(nn.Module): 38 | '''(conv => ReLU) * 2 => MaxPool2d''' 39 | 40 | def __init__(self, in_ch, out_ch): 41 | """ 42 | Args: 43 | in_ch(int) : input channel 44 | out_ch(int) : output channel 45 | """ 46 | super(Conv_down, self).__init__() 47 | self.conv = Double_conv(in_ch, out_ch) 48 | self.pool = nn.MaxPool2d(kernel_size=2, stride=2) 49 | 50 | def forward(self, x): 51 | x = self.conv(x) 52 | pool_x = self.pool(x) 53 | return pool_x, x 54 | 55 | 56 | class Conv_up(nn.Module): 57 | '''(conv => ReLU) * 2 => MaxPool2d''' 58 | 59 | def __init__(self, in_ch, out_ch): 60 | """ 61 | Args: 62 | in_ch(int) : input channel 63 | out_ch(int) : output channel 64 | """ 65 | super(Conv_up, self).__init__() 66 | self.conv = Double_conv(in_ch, out_ch) 67 | self.up = nn.Conv2d(in_ch, out_ch, kernel_size=1, padding=0, stride=1) 68 | self.interp = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False) 69 | 70 | def forward(self, x1, x2): 71 | x1 = self.interp(x1) 72 | x1 = self.up(x1) 73 | x1_dim = x1.size()[2] 74 | x2 = extract_img(x1_dim, x2) 75 | x1 = torch.cat((x1, x2), dim=1) 76 | x1 = self.conv(x1) 77 | return x1 78 | 79 | 80 | class Conv_up_nl(nn.Module): 81 | '''(conv => ReLU) * 2 => MaxPool2d''' 82 | 83 | def __init__(self, in_ch, out_ch): 84 | """ 85 | Args: 86 | in_ch(int) : input channel 87 | out_ch(int) : output channel 88 | """ 89 | super(Conv_up_nl, self).__init__() 90 | self.conv = Double_conv(in_ch, out_ch) 91 | self.interp = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False) 92 | 93 | def forward(self, x1): 94 | x1 = self.interp(x1) 95 | x1 = self.conv(x1) 96 | return x1 97 | 98 | 99 | def extract_img(size, in_tensor): 100 | """ 101 | Args: 102 | size(int) : size of cut 103 | in_tensor(tensor) : tensor to be cut 104 | """ 105 | dim1, dim2 = in_tensor.size()[2:] 106 | in_tensor = in_tensor[:, :, int((dim1 - size) / 2):int((dim1 + size) / 2), 107 | int((dim2 - size) / 2):int((dim2 + size) / 2)] 108 | return in_tensor 109 | 110 | 111 | class CleanU_Net(nn.Module): 112 | def __init__(self, in_channels, out_channels): 113 | super(CleanU_Net, self).__init__() 114 | self.Conv_down1 = Conv_down(in_channels, 64) 115 | self.Conv_down2 = Conv_down(64, 128) 116 | self.Conv_down3 = Conv_down(128, 256) 117 | self.Conv_down4 = Conv_down(256, 512) 118 | self.Conv_down5 = Conv_down(512, 1024) 119 | 120 | self.Conv_up1 = Conv_up(1024, 512) 121 | self.Conv_up2 = Conv_up(512, 256) 122 | self.Conv_up3 = Conv_up(256, 128) 123 | self.Conv_up4 = Conv_up(128, 64) 124 | self.Conv_out = nn.Conv2d(64, out_channels, 1, padding=0, stride=1) 125 | 126 | def forward(self, x): 127 | x, conv1 = self.Conv_down1(x) 128 | # print("dConv1 => down1|", x.shape) 129 | x, conv2 = self.Conv_down2(x) 130 | # print("dConv2 => down2|", x.shape) 131 | x, conv3 = self.Conv_down3(x) 132 | # print("dConv3 => down3|", x.shape) 133 | x, conv4 = self.Conv_down4(x) 134 | # print("dConv4 => down4|", x.shape) 135 | _, x = self.Conv_down5(x) 136 | # print("dConv5|", x.shape) 137 | x = self.Conv_up1(x, conv4) 138 | # print("up1 => uConv1|", x.shape) 139 | x = self.Conv_up2(x, conv3) 140 | # print("up2 => uConv2|", x.shape) 141 | x = self.Conv_up3(x, conv2) 142 | # print("up3 => uConv3|", x.shape) 143 | x = self.Conv_up4(x, conv1) 144 | feature = x 145 | output = self.Conv_out(x) 146 | return feature, output 147 | 148 | class DoubleU_Net(nn.Module): 149 | def __init__(self, in_channels, out_channels): 150 | super(DoubleU_Net, self).__init__() 151 | self.Conv_down1 = Conv_down(in_channels, 64) 152 | self.Conv_down2 = Conv_down(64, 128) 153 | self.Conv_down3 = Conv_down(128, 256) 154 | self.Conv_down4 = Conv_down(256, 512) 155 | self.Conv_down5 = Conv_down(512, 1024) 156 | 157 | self.Conv_up1 = Conv_up(1024, 512) 158 | self.Conv_up2 = Conv_up(512, 256) 159 | self.Conv_up3 = Conv_up(256, 128) 160 | self.Conv_out1 = nn.Conv2d(128,out_channels,1,padding = 0,stride = 1) 161 | self.Conv_up4 = Conv_up(128, 64) 162 | self.Conv_out2 = nn.Conv2d(64, out_channels, 1, padding=0, stride=1) 163 | 164 | def forward(self, x): 165 | x, conv1 = self.Conv_down1(x) 166 | # print("dConv1 => down1|", x.shape) 167 | x, conv2 = self.Conv_down2(x) 168 | # print("dConv2 => down2|", x.shape) 169 | x, conv3 = self.Conv_down3(x) 170 | # print("dConv3 => down3|", x.shape) 171 | x, conv4 = self.Conv_down4(x) 172 | # print("dConv4 => down4|", x.shape) 173 | _, x = self.Conv_down5(x) 174 | # print("dConv5|", x.shape) 175 | x = self.Conv_up1(x, conv4) 176 | # print("up1 => uConv1|", x.shape) 177 | x = self.Conv_up2(x, conv3) 178 | # print("up2 => uConv2|", x.shape) 179 | x = self.Conv_up3(x, conv2) 180 | # print("up3 => uConv3|", x.shape) 181 | x1 = self.Conv_out1(x) 182 | x = self.Conv_up4(x, conv1) 183 | x2 = self.Conv_out2(x) 184 | return x1, x2 185 | 186 | 187 | class U_NetEncoder(nn.Module): 188 | def __init__(self, in_channels): 189 | super(U_NetEncoder, self).__init__() 190 | self.in_channels = in_channels 191 | self.Conv_down1 = Conv_down(in_channels, 64) 192 | self.Conv_down2 = Conv_down(64, 128) 193 | self.Conv_down3 = Conv_down(128, 256) 194 | self.Conv_down4 = Conv_down(256, 512) 195 | self.Conv_down5 = Conv_down(512, 1024) 196 | 197 | def forward(self, x): 198 | encoder_outputs = [] 199 | x, conv1 = self.Conv_down1(x) 200 | encoder_outputs.append(conv1) 201 | x, conv2 = self.Conv_down2(x) 202 | encoder_outputs.append(conv2) 203 | x, conv3 = self.Conv_down3(x) 204 | encoder_outputs.append(conv3) 205 | x, conv4 = self.Conv_down4(x) 206 | _, x = self.Conv_down5(x) 207 | encoder_outputs.append(conv4) 208 | return x, encoder_outputs 209 | 210 | 211 | class Domain_Decoder(nn.Module): 212 | def __init__(self, out_channels=2): 213 | super(Domain_Decoder, self).__init__() 214 | self.out_channels = out_channels 215 | self.Conv_up1 = Conv_up(1024, 512) 216 | self.Conv_up2 = Conv_up(512, 256) 217 | self.Conv_up3 = Conv_up(256, 128) 218 | self.Conv_up4 = Conv_up(128, 64) 219 | self.Conv_out = nn.Conv2d(64, out_channels, 1, padding=0, stride=1) 220 | 221 | def forward(self, x, encoder_outputs): 222 | encoder_outputs.reverse() 223 | # print("dConv5|", x.shape) 224 | x = self.Conv_up1(x, encoder_outputs[0]) 225 | # print("up1 => uConv1|", x.shape) 226 | x = self.Conv_up2(x, encoder_outputs[1]) 227 | # print("up2 => uConv2|", x.shape) 228 | x = self.Conv_up3(x, encoder_outputs[2]) 229 | # print("up3 => uConv3|", x.shape) 230 | x = self.Conv_up4(x, encoder_outputs[3]) 231 | feature = x 232 | output = self.Conv_out(x) 233 | 234 | return feature, output 235 | 236 | 237 | class Rec_Decoder(nn.Module): 238 | def __init__(self, out_channels=1): 239 | super(Rec_Decoder, self).__init__() 240 | self.out_channels = out_channels 241 | self.Conv_up1 = Conv_up_nl(1024, 512) 242 | self.Conv_up2 = Conv_up_nl(512, 256) 243 | self.Conv_up3 = Conv_up_nl(256, 128) 244 | self.Conv_up4 = Conv_up_nl(128, 64) 245 | self.Conv_out = nn.Conv2d(64, out_channels, 1, padding=0, stride=1) 246 | 247 | def forward(self, x): 248 | # print("dConv5|", x.shape) 249 | x = self.Conv_up1(x) 250 | # print("up1 => uConv1|", x.shape) 251 | x = self.Conv_up2(x) 252 | # print("up2 => uConv2|", x.shape) 253 | x = self.Conv_up3(x) 254 | # print("up3 => uConv3|", x.shape) 255 | x = self.Conv_up4(x) 256 | output = self.Conv_out(x) 257 | 258 | return output 259 | 260 | class source2targetNet(nn.Module): 261 | def __init__(self, in_channels, out_channels): 262 | super(source2targetNet, self).__init__() 263 | 264 | self.encoder = U_NetEncoder(in_channels) 265 | self.domain_decoder = Domain_Decoder(out_channels) 266 | self.rec_decoder = Rec_Decoder(in_channels) 267 | 268 | def forward(self, inputs): 269 | x, encoder_outputs = self.encoder(inputs) 270 | feature, pred = self.domain_decoder(x, encoder_outputs) 271 | recimg = self.rec_decoder(x) 272 | 273 | return recimg, feature, pred 274 | 275 | def get_target_segmentation_net(self): 276 | return unet_from_encoder_decoder(self.encoder, self.domain_decoder) 277 | 278 | class source2targetNet_seg(nn.Module): 279 | def __init__(self, in_channels, out_channels): 280 | super(source2targetNet_seg, self).__init__() 281 | 282 | self.encoder = U_NetEncoder(in_channels) 283 | self.domain_decoder = Domain_Decoder(out_channels) 284 | 285 | def forward(self, inputs): 286 | x, encoder_outputs = self.encoder(inputs) 287 | feature, pred = self.domain_decoder(x, encoder_outputs) 288 | 289 | return feature, pred 290 | 291 | class UNet2D(nn.Module): 292 | 293 | def __init__(self, in_channels, out_channels): 294 | super(UNet2D, self).__init__() 295 | 296 | self.in_channels = in_channels 297 | self.out_channels = out_channels 298 | 299 | # contractive path 300 | self.encoder = U_NetEncoder(in_channels) 301 | # expansive path 302 | self.decoder = Domain_Decoder(out_channels) 303 | 304 | def forward(self, inputs): 305 | x, encoder_outputs = self.encoder(inputs) 306 | 307 | feature, x = self.decoder(x, encoder_outputs) 308 | 309 | return feature, x 310 | 311 | 312 | def unet_from_encoder_decoder(encoder, decoder): 313 | net = UNet2D(in_channels=encoder.in_channels, out_channels=decoder.out_channels) 314 | 315 | new_encoder_dict = net.encoder.state_dict() 316 | new_decoder_dict = net.decoder.state_dict() 317 | 318 | encoder_dict = encoder.state_dict() 319 | decoder_dict = decoder.state_dict() 320 | 321 | for k, v in encoder_dict.items(): 322 | if k in new_encoder_dict: 323 | new_encoder_dict[k] = v 324 | else: 325 | print("val model encoder parameter copy error!!!") 326 | net.encoder.load_state_dict(new_encoder_dict) 327 | 328 | for k, v in decoder_dict.items(): 329 | if k in new_decoder_dict: 330 | new_decoder_dict[k] = v 331 | else: 332 | print("val model decoder parameter copy error!!!") 333 | net.decoder.load_state_dict(new_decoder_dict) 334 | 335 | return net 336 | 337 | class DomainDiscriminator(nn.Module): 338 | def __init__(self, input_channels,input_size,num_classes,fc_classifier = 3): 339 | super(DomainDiscriminator, self).__init__() 340 | self.fc_classifier = fc_classifier 341 | self.fc_channels = [288, 144, 2] 342 | self.conv_channels = [48, 48, 48] 343 | self.input_size = input_size 344 | 345 | self.conv_features = nn.Sequential() 346 | self.fc_features = nn.Sequential() 347 | 348 | # convolutional layers 349 | in_channels = input_channels 350 | data_size = input_size 351 | for i, out_channels in enumerate(self.conv_channels): 352 | conv = nn.Sequential(nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1), 353 | nn.GroupNorm(num_groups=4, num_channels=out_channels, eps=0, affine=False), 354 | nn.ReLU()) 355 | self.conv_features.add_module('conv%d' % (i + 1), conv) 356 | in_channels = out_channels 357 | data_size /= 2 358 | 359 | # full connections 360 | in_channels = self.conv_channels[-1]*data_size*data_size 361 | for i, out_channels in enumerate(self.fc_channels): 362 | if i == fc_classifier - 1: 363 | fc = nn.Sequential(nn.Linear(int(in_channels), out_channels)) 364 | else: 365 | fc = nn.Sequential(nn.Linear(int(in_channels), out_channels), 366 | nn.GroupNorm(num_groups=4, num_channels=out_channels, eps=0, affine=False), 367 | nn.ReLU()) 368 | self.fc_features.add_module('linear%d' % (i + 1), fc) 369 | in_channels = out_channels 370 | 371 | # self.leaky_relu = nn.LeakyReLU(negative_slope=0.2, inplace=True) 372 | self.relu = nn.ReLU(inplace=True) 373 | self.gpN = nn.GroupNorm(num_groups=4, num_channels=num_classes, eps=0, affine=False) 374 | # nn.InstanceNorm2d(out_ch), 375 | # nn.Dropout(p=0.2), 376 | 377 | # self.up_sample = nn.Upsample(scale_factor=32, mode='bilinear') 378 | # self.sigmoid = nn.Sigmoid() 379 | 380 | 381 | def forward(self, x): 382 | 383 | for i in range(len(self.conv_channels)): 384 | x = getattr(self.conv_features, 'conv%d' % (i + 1))(x) 385 | x = F.max_pool2d(x, kernel_size=2) 386 | 387 | x = x.view(x.size(0), -1) 388 | for i in range(self.fc_classifier): 389 | x = getattr(self.fc_features, 'linear%d' % (i + 1))(x) 390 | 391 | # x = self.up_sample(x) 392 | # x = self.sigmoid(x) 393 | 394 | return x 395 | 396 | if __name__ == "__main__": 397 | from ptflops import get_model_complexity_info 398 | input = np.random.random((2,1,512,512)).astype(np.float32) 399 | x = torch.tensor(input).to('cuda:0') 400 | 401 | model = source2targetNet_seg(in_channels=1, out_channels=2).cuda() 402 | # recimg, feature, pred = model(x) 403 | # print(recimg.shape) 404 | feature, pred = model(x) 405 | print(feature.shape) 406 | print(pred.shape) 407 | 408 | macs, params = get_model_complexity_info(model, (1, 512, 512), as_strings=True, 409 | print_per_layer_stat=True, verbose=True) 410 | print('{:<30} {:<8}'.format('Computational complexity: ', macs)) 411 | print('{:<30} {:<8}'.format('Number of parameters: ', params)) -------------------------------------------------------------------------------- /logs/vnc2lucchi1/valid.txt: -------------------------------------------------------------------------------- 1 | model-1, dice=0.128978, jac=0.069106 2 | model-500, dice=0.580416, jac=0.411523 3 | model-1000, dice=0.565513, jac=0.396346 4 | model-1500, dice=0.587933, jac=0.419481 5 | model-2000, dice=0.597651, jac=0.429445 6 | model-2500, dice=0.661575, jac=0.497768 7 | model-3000, dice=0.599166, jac=0.432813 8 | model-3500, dice=0.633090, jac=0.465454 9 | model-4000, dice=0.651207, jac=0.485739 10 | model-4500, dice=0.597812, jac=0.428432 11 | model-5000, dice=0.587503, jac=0.419791 12 | model-5500, dice=0.636165, jac=0.470075 13 | model-6000, dice=0.642652, jac=0.476879 14 | model-6500, dice=0.600462, jac=0.430308 15 | model-7000, dice=0.626277, jac=0.460942 16 | model-7500, dice=0.633024, jac=0.464444 17 | model-8000, dice=0.552837, jac=0.384319 18 | model-8500, dice=0.610984, jac=0.442141 19 | model-9000, dice=0.606369, jac=0.436702 20 | model-9500, dice=0.559549, jac=0.392485 21 | model-10000, dice=0.668611, jac=0.503989 22 | model-10500, dice=0.589699, jac=0.421226 23 | model-11000, dice=0.638463, jac=0.472365 24 | model-11500, dice=0.633336, jac=0.465730 25 | model-12000, dice=0.637753, jac=0.470062 26 | model-12500, dice=0.569380, jac=0.400193 27 | model-13000, dice=0.705470, jac=0.547240 28 | model-13500, dice=0.675091, jac=0.511422 29 | model-14000, dice=0.652567, jac=0.486214 30 | model-14500, dice=0.617118, jac=0.449099 31 | model-15000, dice=0.659571, jac=0.494118 32 | model-15500, dice=0.690405, jac=0.530037 33 | model-16000, dice=0.666180, jac=0.502697 34 | model-16500, dice=0.662119, jac=0.497501 35 | model-17000, dice=0.615733, jac=0.447449 36 | model-17500, dice=0.696559, jac=0.537608 37 | model-18000, dice=0.683106, jac=0.524500 38 | model-18500, dice=0.665108, jac=0.504169 39 | model-19000, dice=0.682965, jac=0.520708 40 | model-19500, dice=0.690974, jac=0.529933 41 | model-20000, dice=0.672677, jac=0.508610 42 | model-20500, dice=0.692288, jac=0.531308 43 | model-21000, dice=0.667382, jac=0.504543 44 | model-21500, dice=0.673574, jac=0.510466 45 | model-22000, dice=0.648686, jac=0.482613 46 | model-22500, dice=0.669868, jac=0.507315 47 | model-23000, dice=0.648006, jac=0.483691 48 | model-23500, dice=0.706422, jac=0.548521 49 | model-24000, dice=0.706641, jac=0.548650 50 | model-24500, dice=0.655595, jac=0.492639 51 | model-25000, dice=0.689050, jac=0.527117 52 | model-25500, dice=0.601490, jac=0.432272 53 | model-26000, dice=0.653226, jac=0.487255 54 | model-26500, dice=0.675427, jac=0.512240 55 | model-27000, dice=0.665240, jac=0.500075 56 | model-27500, dice=0.697030, jac=0.537474 57 | model-28000, dice=0.677655, jac=0.514260 58 | model-28500, dice=0.669855, jac=0.505290 59 | model-29000, dice=0.739123, jac=0.587738 60 | model-29500, dice=0.720432, jac=0.565379 61 | model-30000, dice=0.721875, jac=0.565789 62 | model-30500, dice=0.683667, jac=0.521458 63 | model-31000, dice=0.742192, jac=0.592919 64 | model-31500, dice=0.727175, jac=0.574236 65 | model-32000, dice=0.676847, jac=0.513038 66 | model-32500, dice=0.738567, jac=0.587693 67 | model-33000, dice=0.696912, jac=0.537042 68 | model-33500, dice=0.687340, jac=0.525621 69 | model-34000, dice=0.679457, jac=0.518129 70 | model-34500, dice=0.729252, jac=0.576961 71 | model-35000, dice=0.697982, jac=0.539475 72 | model-35500, dice=0.705281, jac=0.547566 73 | model-36000, dice=0.721058, jac=0.565557 74 | model-36500, dice=0.709237, jac=0.552665 75 | model-37000, dice=0.690573, jac=0.529143 76 | model-37500, dice=0.720310, jac=0.565267 77 | model-38000, dice=0.717176, jac=0.562499 78 | model-38500, dice=0.735126, jac=0.583605 79 | model-39000, dice=0.722176, jac=0.566850 80 | model-39500, dice=0.708600, jac=0.550789 81 | model-40000, dice=0.707088, jac=0.548335 82 | model-40500, dice=0.707973, jac=0.550408 83 | model-41000, dice=0.716477, jac=0.562053 84 | model-41500, dice=0.708805, jac=0.551062 85 | model-42000, dice=0.734544, jac=0.582913 86 | model-42500, dice=0.711018, jac=0.554051 87 | model-43000, dice=0.665071, jac=0.501308 88 | model-43500, dice=0.715723, jac=0.558499 89 | model-44000, dice=0.735862, jac=0.583996 90 | model-44500, dice=0.718784, jac=0.563440 91 | model-45000, dice=0.761985, jac=0.617421 92 | model-45500, dice=0.719117, jac=0.564872 93 | model-46000, dice=0.698446, jac=0.540725 94 | model-46500, dice=0.695734, jac=0.535489 95 | model-47000, dice=0.733138, jac=0.581091 96 | model-47500, dice=0.725618, jac=0.572972 97 | model-48000, dice=0.725186, jac=0.572354 98 | model-48500, dice=0.685163, jac=0.525561 99 | model-49000, dice=0.709334, jac=0.552501 100 | model-49500, dice=0.717862, jac=0.563819 101 | model-50000, dice=0.726748, jac=0.574219 102 | model-50500, dice=0.742382, jac=0.593294 103 | model-51000, dice=0.741393, jac=0.590676 104 | model-51500, dice=0.746592, jac=0.597765 105 | model-52000, dice=0.637675, jac=0.471418 106 | model-52500, dice=0.706903, jac=0.550271 107 | model-53000, dice=0.696762, jac=0.536401 108 | model-53500, dice=0.675059, jac=0.512739 109 | model-54000, dice=0.719683, jac=0.565670 110 | model-54500, dice=0.711623, jac=0.554615 111 | model-55000, dice=0.724898, jac=0.572438 112 | model-55500, dice=0.702143, jac=0.543893 113 | model-56000, dice=0.723889, jac=0.570416 114 | model-56500, dice=0.743429, jac=0.594855 115 | model-57000, dice=0.762832, jac=0.618596 116 | model-57500, dice=0.682297, jac=0.521749 117 | model-58000, dice=0.725981, jac=0.571741 118 | model-58500, dice=0.761734, jac=0.617826 119 | model-59000, dice=0.740814, jac=0.590997 120 | model-59500, dice=0.737020, jac=0.586535 121 | model-60000, dice=0.749257, jac=0.601543 122 | model-60500, dice=0.743384, jac=0.595166 123 | model-61000, dice=0.700524, jac=0.545723 124 | model-61500, dice=0.602412, jac=0.443326 125 | model-62000, dice=0.716786, jac=0.561452 126 | model-62500, dice=0.754073, jac=0.607133 127 | model-63000, dice=0.745005, jac=0.596226 128 | model-63500, dice=0.745278, jac=0.597033 129 | model-64000, dice=0.701954, jac=0.542057 130 | model-64500, dice=0.757004, jac=0.610762 131 | model-65000, dice=0.723076, jac=0.569707 132 | model-65500, dice=0.739383, jac=0.590283 133 | model-66000, dice=0.750730, jac=0.603172 134 | model-66500, dice=0.729685, jac=0.576602 135 | model-67000, dice=0.707930, jac=0.550932 136 | model-67500, dice=0.730038, jac=0.578514 137 | model-68000, dice=0.742314, jac=0.593176 138 | model-68500, dice=0.746741, jac=0.598947 139 | model-69000, dice=0.761915, jac=0.618102 140 | model-69500, dice=0.769178, jac=0.627813 141 | model-70000, dice=0.753952, jac=0.607859 142 | model-70500, dice=0.767551, jac=0.624781 143 | model-71000, dice=0.720162, jac=0.566555 144 | model-71500, dice=0.773803, jac=0.634066 145 | model-72000, dice=0.765582, jac=0.622662 146 | model-72500, dice=0.761791, jac=0.617609 147 | model-73000, dice=0.778955, jac=0.639645 148 | model-73500, dice=0.723737, jac=0.570088 149 | model-74000, dice=0.785587, jac=0.649771 150 | model-74500, dice=0.771072, jac=0.631047 151 | model-75000, dice=0.704398, jac=0.545613 152 | model-75500, dice=0.742135, jac=0.594380 153 | model-76000, dice=0.725596, jac=0.571662 154 | model-76500, dice=0.727654, jac=0.575610 155 | model-77000, dice=0.767758, jac=0.626378 156 | model-77500, dice=0.749955, jac=0.602817 157 | model-78000, dice=0.755327, jac=0.609319 158 | model-78500, dice=0.770446, jac=0.629096 159 | model-79000, dice=0.756323, jac=0.611307 160 | model-79500, dice=0.767911, jac=0.625638 161 | model-80000, dice=0.751334, jac=0.605738 162 | model-80500, dice=0.654718, jac=0.497462 163 | model-81000, dice=0.725028, jac=0.575453 164 | model-81500, dice=0.757452, jac=0.612681 165 | model-82000, dice=0.728490, jac=0.577467 166 | model-82500, dice=0.713994, jac=0.558869 167 | model-83000, dice=0.698871, jac=0.543265 168 | model-83500, dice=0.663196, jac=0.503248 169 | model-84000, dice=0.723674, jac=0.570217 170 | model-84500, dice=0.715141, jac=0.558785 171 | model-85000, dice=0.651891, jac=0.488681 172 | model-85500, dice=0.723364, jac=0.569544 173 | model-86000, dice=0.682577, jac=0.523040 174 | model-86500, dice=0.661166, jac=0.498861 175 | model-87000, dice=0.708012, jac=0.551139 176 | model-87500, dice=0.727453, jac=0.574751 177 | model-88000, dice=0.782539, jac=0.644481 178 | model-88500, dice=0.754406, jac=0.607512 179 | model-89000, dice=0.743398, jac=0.593836 180 | model-89500, dice=0.747367, jac=0.598737 181 | model-90000, dice=0.735907, jac=0.583974 182 | model-90500, dice=0.788383, jac=0.652683 183 | model-91000, dice=0.779472, jac=0.640780 184 | model-91500, dice=0.725586, jac=0.572839 185 | model-92000, dice=0.633128, jac=0.471235 186 | model-92500, dice=0.775298, jac=0.635369 187 | model-93000, dice=0.741806, jac=0.592821 188 | model-93500, dice=0.734530, jac=0.583114 189 | model-94000, dice=0.694134, jac=0.536445 190 | model-94500, dice=0.766591, jac=0.624463 191 | model-95000, dice=0.730489, jac=0.578431 192 | model-95500, dice=0.734585, jac=0.582788 193 | model-96000, dice=0.694113, jac=0.538019 194 | model-96500, dice=0.721576, jac=0.569069 195 | model-97000, dice=0.721944, jac=0.567584 196 | model-97500, dice=0.702023, jac=0.548262 197 | model-98000, dice=0.772033, jac=0.630948 198 | model-98500, dice=0.754237, jac=0.608284 199 | model-99000, dice=0.752806, jac=0.607193 200 | model-99500, dice=0.760495, jac=0.616478 201 | model-100000, dice=0.761648, jac=0.618596 202 | model-100500, dice=0.774773, jac=0.635356 203 | model-101000, dice=0.759499, jac=0.615141 204 | model-101500, dice=0.797301, jac=0.664765 205 | model-102000, dice=0.778504, jac=0.639360 206 | model-102500, dice=0.764873, jac=0.622343 207 | model-103000, dice=0.774863, jac=0.634604 208 | model-103500, dice=0.779952, jac=0.642126 209 | model-104000, dice=0.750704, jac=0.603416 210 | model-104500, dice=0.786566, jac=0.650413 211 | model-105000, dice=0.763013, jac=0.619386 212 | model-105500, dice=0.782364, jac=0.644826 213 | model-106000, dice=0.775364, jac=0.635416 214 | model-106500, dice=0.759561, jac=0.616472 215 | model-107000, dice=0.747986, jac=0.601368 216 | model-107500, dice=0.761537, jac=0.617494 217 | model-108000, dice=0.769068, jac=0.627200 218 | model-108500, dice=0.790053, jac=0.656620 219 | model-109000, dice=0.754303, jac=0.608710 220 | model-109500, dice=0.786130, jac=0.649605 221 | model-110000, dice=0.773824, jac=0.632997 222 | model-110500, dice=0.764978, jac=0.622813 223 | model-111000, dice=0.761359, jac=0.617671 224 | model-111500, dice=0.736920, jac=0.587546 225 | model-112000, dice=0.772599, jac=0.632137 226 | model-112500, dice=0.788840, jac=0.653592 227 | model-113000, dice=0.727754, jac=0.577534 228 | model-113500, dice=0.731553, jac=0.583243 229 | model-114000, dice=0.629154, jac=0.469550 230 | model-114500, dice=0.735930, jac=0.586914 231 | model-115000, dice=0.790181, jac=0.654941 232 | model-115500, dice=0.779364, jac=0.640199 233 | model-116000, dice=0.780882, jac=0.643210 234 | model-116500, dice=0.785664, jac=0.648114 235 | model-117000, dice=0.766887, jac=0.624324 236 | model-117500, dice=0.754410, jac=0.609009 237 | model-118000, dice=0.674115, jac=0.516461 238 | model-118500, dice=0.769751, jac=0.628469 239 | model-119000, dice=0.734240, jac=0.584449 240 | model-119500, dice=0.734624, jac=0.584854 241 | model-120000, dice=0.739251, jac=0.591396 242 | model-120500, dice=0.759500, jac=0.615607 243 | model-121000, dice=0.737699, jac=0.589689 244 | model-121500, dice=0.719161, jac=0.568567 245 | model-122000, dice=0.709481, jac=0.555937 246 | model-122500, dice=0.765146, jac=0.622965 247 | model-123000, dice=0.676930, jac=0.520733 248 | model-123500, dice=0.659918, jac=0.504961 249 | model-124000, dice=0.762013, jac=0.618948 250 | model-124500, dice=0.688477, jac=0.536811 251 | model-125000, dice=0.795294, jac=0.662158 252 | model-125500, dice=0.753189, jac=0.607944 253 | model-126000, dice=0.754143, jac=0.608961 254 | model-126500, dice=0.741117, jac=0.593890 255 | model-127000, dice=0.777031, jac=0.637223 256 | model-127500, dice=0.698922, jac=0.546787 257 | model-128000, dice=0.755791, jac=0.611362 258 | model-128500, dice=0.730728, jac=0.581942 259 | model-129000, dice=0.731519, jac=0.583394 260 | model-129500, dice=0.667463, jac=0.512712 261 | model-130000, dice=0.643363, jac=0.482815 262 | model-130500, dice=0.725875, jac=0.574882 263 | model-131000, dice=0.790765, jac=0.655757 264 | model-131500, dice=0.770802, jac=0.630191 265 | model-132000, dice=0.774693, jac=0.634665 266 | model-132500, dice=0.780613, jac=0.641802 267 | model-133000, dice=0.753089, jac=0.607083 268 | model-133500, dice=0.776628, jac=0.637234 269 | model-134000, dice=0.780011, jac=0.641495 270 | model-134500, dice=0.757868, jac=0.613403 271 | model-135000, dice=0.776313, jac=0.636659 272 | model-135500, dice=0.801225, jac=0.669371 273 | model-136000, dice=0.808637, jac=0.679838 274 | model-136500, dice=0.784702, jac=0.647684 275 | model-137000, dice=0.764110, jac=0.620657 276 | model-137500, dice=0.761703, jac=0.617415 277 | model-138000, dice=0.782708, jac=0.645336 278 | model-138500, dice=0.800648, jac=0.668731 279 | model-139000, dice=0.758909, jac=0.614472 280 | model-139500, dice=0.774107, jac=0.633956 281 | model-140000, dice=0.774286, jac=0.634324 282 | model-140500, dice=0.787575, jac=0.651118 283 | model-141000, dice=0.783347, jac=0.645879 284 | model-141500, dice=0.737463, jac=0.589152 285 | model-142000, dice=0.658805, jac=0.498612 286 | model-142500, dice=0.758321, jac=0.615945 287 | model-143000, dice=0.770534, jac=0.630345 288 | model-143500, dice=0.764914, jac=0.622603 289 | model-144000, dice=0.789787, jac=0.654283 290 | model-144500, dice=0.730800, jac=0.581159 291 | model-145000, dice=0.755615, jac=0.612536 292 | model-145500, dice=0.789942, jac=0.655710 293 | model-146000, dice=0.754832, jac=0.611182 294 | model-146500, dice=0.786760, jac=0.650521 295 | model-147000, dice=0.730121, jac=0.583422 296 | model-147500, dice=0.720121, jac=0.570624 297 | model-148000, dice=0.739154, jac=0.592468 298 | model-148500, dice=0.702442, jac=0.550174 299 | model-149000, dice=0.749827, jac=0.604064 300 | model-149500, dice=0.772132, jac=0.631623 301 | model-150000, dice=0.733407, jac=0.586745 302 | model-150500, dice=0.720996, jac=0.571499 303 | model-151000, dice=0.799434, jac=0.667768 304 | model-151500, dice=0.823051, jac=0.700218 305 | model-152000, dice=0.807372, jac=0.677948 306 | model-152500, dice=0.778416, jac=0.640776 307 | model-153000, dice=0.780930, jac=0.643777 308 | model-153500, dice=0.698478, jac=0.543790 309 | model-154000, dice=0.728807, jac=0.577569 310 | model-154500, dice=0.766521, jac=0.623950 311 | model-155000, dice=0.744379, jac=0.596382 312 | model-155500, dice=0.739003, jac=0.589758 313 | model-156000, dice=0.773086, jac=0.632170 314 | model-156500, dice=0.773892, jac=0.633218 315 | model-157000, dice=0.783379, jac=0.645692 316 | model-157500, dice=0.769542, jac=0.627730 317 | model-158000, dice=0.755172, jac=0.609604 318 | model-158500, dice=0.772102, jac=0.631108 319 | model-159000, dice=0.780082, jac=0.641437 320 | model-159500, dice=0.791667, jac=0.656562 321 | model-160000, dice=0.790875, jac=0.656332 322 | model-160500, dice=0.727484, jac=0.576279 323 | model-161000, dice=0.752172, jac=0.607928 324 | model-161500, dice=0.723162, jac=0.572169 325 | model-162000, dice=0.737022, jac=0.589286 326 | model-162500, dice=0.777405, jac=0.638738 327 | model-163000, dice=0.726187, jac=0.577277 328 | model-163500, dice=0.723088, jac=0.572897 329 | model-164000, dice=0.617242, jac=0.458884 330 | model-164500, dice=0.764364, jac=0.622209 331 | model-165000, dice=0.735559, jac=0.587995 332 | model-165500, dice=0.723853, jac=0.575914 333 | model-166000, dice=0.706215, jac=0.554807 334 | model-166500, dice=0.762181, jac=0.619344 335 | model-167000, dice=0.768296, jac=0.626833 336 | model-167500, dice=0.799044, jac=0.666940 337 | model-168000, dice=0.802071, jac=0.671083 338 | model-168500, dice=0.795027, jac=0.661681 339 | model-169000, dice=0.773614, jac=0.634151 340 | model-169500, dice=0.799569, jac=0.667838 341 | model-170000, dice=0.796561, jac=0.663308 342 | model-170500, dice=0.773010, jac=0.633443 343 | model-171000, dice=0.743917, jac=0.597750 344 | model-171500, dice=0.763148, jac=0.621499 345 | model-172000, dice=0.766542, jac=0.625377 346 | model-172500, dice=0.794985, jac=0.662255 347 | model-173000, dice=0.794283, jac=0.660756 348 | model-173500, dice=0.756759, jac=0.614162 349 | model-174000, dice=0.747054, jac=0.601270 350 | model-174500, dice=0.791398, jac=0.657017 351 | model-175000, dice=0.769817, jac=0.628442 352 | model-175500, dice=0.777171, jac=0.638094 353 | model-176000, dice=0.721769, jac=0.570670 354 | model-176500, dice=0.693005, jac=0.538956 355 | model-177000, dice=0.699818, jac=0.545985 356 | model-177500, dice=0.758792, jac=0.614861 357 | model-178000, dice=0.781624, jac=0.643603 358 | model-178500, dice=0.744245, jac=0.598298 359 | model-179000, dice=0.780644, jac=0.643438 360 | model-179500, dice=0.705014, jac=0.552763 361 | model-180000, dice=0.791943, jac=0.657366 362 | model-180500, dice=0.758868, jac=0.616210 363 | model-181000, dice=0.726885, jac=0.579111 364 | model-181500, dice=0.773817, jac=0.634045 365 | model-182000, dice=0.778685, jac=0.640314 366 | model-182500, dice=0.798275, jac=0.666225 367 | model-183000, dice=0.734445, jac=0.586439 368 | model-183500, dice=0.800289, jac=0.668458 369 | model-184000, dice=0.781301, jac=0.642938 370 | model-184500, dice=0.798645, jac=0.665745 371 | model-185000, dice=0.790330, jac=0.654721 372 | model-185500, dice=0.803467, jac=0.672324 373 | model-186000, dice=0.808449, jac=0.679263 374 | model-186500, dice=0.794941, jac=0.660892 375 | model-187000, dice=0.780688, jac=0.642234 376 | model-187500, dice=0.754323, jac=0.609177 377 | model-188000, dice=0.764175, jac=0.621839 378 | model-188500, dice=0.782609, jac=0.644619 379 | model-189000, dice=0.780319, jac=0.642198 380 | model-189500, dice=0.767614, jac=0.625548 381 | model-190000, dice=0.754954, jac=0.609963 382 | model-190500, dice=0.726358, jac=0.576193 383 | model-191000, dice=0.755547, jac=0.610792 384 | model-191500, dice=0.727055, jac=0.577483 385 | model-192000, dice=0.717635, jac=0.566935 386 | model-192500, dice=0.746730, jac=0.601026 387 | model-193000, dice=0.761542, jac=0.618695 388 | model-193500, dice=0.777404, jac=0.638400 389 | model-194000, dice=0.791770, jac=0.657022 390 | model-194500, dice=0.783180, jac=0.645686 391 | model-195000, dice=0.781445, jac=0.643547 392 | model-195500, dice=0.792238, jac=0.657734 393 | model-196000, dice=0.775426, jac=0.636321 394 | model-196500, dice=0.772263, jac=0.632289 395 | model-197000, dice=0.775847, jac=0.636643 396 | model-197500, dice=0.772154, jac=0.632043 397 | model-198000, dice=0.773204, jac=0.633222 398 | model-198500, dice=0.767628, jac=0.626319 399 | model-199000, dice=0.766558, jac=0.624948 400 | model-199500, dice=0.767970, jac=0.626586 401 | model-200000, dice=0.769123, jac=0.628032 402 | -------------------------------------------------------------------------------- /logs/vnc2lucchi2/valid.txt: -------------------------------------------------------------------------------- 1 | model-1, dice=0.135015, jac=0.072888 2 | model-500, dice=0.540726, jac=0.383154 3 | model-1000, dice=0.433295, jac=0.278452 4 | model-1500, dice=0.477900, jac=0.319873 5 | model-2000, dice=0.532458, jac=0.369624 6 | model-2500, dice=0.505632, jac=0.340969 7 | model-3000, dice=0.517859, jac=0.351753 8 | model-3500, dice=0.634184, jac=0.470530 9 | model-4000, dice=0.640107, jac=0.474513 10 | model-4500, dice=0.565001, jac=0.398604 11 | model-5000, dice=0.650305, jac=0.483999 12 | model-5500, dice=0.625260, jac=0.458019 13 | model-6000, dice=0.654295, jac=0.491767 14 | model-6500, dice=0.594206, jac=0.425014 15 | model-7000, dice=0.535732, jac=0.368307 16 | model-7500, dice=0.574024, jac=0.404181 17 | model-8000, dice=0.385665, jac=0.241289 18 | model-8500, dice=0.509705, jac=0.344501 19 | model-9000, dice=0.546746, jac=0.378901 20 | model-9500, dice=0.555227, jac=0.387447 21 | model-10000, dice=0.670305, jac=0.511115 22 | model-10500, dice=0.641643, jac=0.473495 23 | model-11000, dice=0.613878, jac=0.450086 24 | model-11500, dice=0.645522, jac=0.480240 25 | model-12000, dice=0.667256, jac=0.502895 26 | model-12500, dice=0.563923, jac=0.395202 27 | model-13000, dice=0.707934, jac=0.550039 28 | model-13500, dice=0.612924, jac=0.445634 29 | model-14000, dice=0.631319, jac=0.464050 30 | model-14500, dice=0.654556, jac=0.490933 31 | model-15000, dice=0.644470, jac=0.476959 32 | model-15500, dice=0.700551, jac=0.541243 33 | model-16000, dice=0.656647, jac=0.491615 34 | model-16500, dice=0.704612, jac=0.545801 35 | model-17000, dice=0.681646, jac=0.520184 36 | model-17500, dice=0.726200, jac=0.572157 37 | model-18000, dice=0.736683, jac=0.586197 38 | model-18500, dice=0.675810, jac=0.513235 39 | model-19000, dice=0.708865, jac=0.552072 40 | model-19500, dice=0.660637, jac=0.496526 41 | model-20000, dice=0.695646, jac=0.535575 42 | model-20500, dice=0.717086, jac=0.560631 43 | model-21000, dice=0.679761, jac=0.518160 44 | model-21500, dice=0.682560, jac=0.520601 45 | model-22000, dice=0.705913, jac=0.546960 46 | model-22500, dice=0.676262, jac=0.513865 47 | model-23000, dice=0.477343, jac=0.318533 48 | model-23500, dice=0.729359, jac=0.575963 49 | model-24000, dice=0.763686, jac=0.619577 50 | model-24500, dice=0.708373, jac=0.552105 51 | model-25000, dice=0.698432, jac=0.538057 52 | model-25500, dice=0.614925, jac=0.448011 53 | model-26000, dice=0.718489, jac=0.562396 54 | model-26500, dice=0.705918, jac=0.547795 55 | model-27000, dice=0.672122, jac=0.508370 56 | model-27500, dice=0.745479, jac=0.596258 57 | model-28000, dice=0.752531, jac=0.604359 58 | model-28500, dice=0.765494, jac=0.621881 59 | model-29000, dice=0.779551, jac=0.641426 60 | model-29500, dice=0.732781, jac=0.581412 61 | model-30000, dice=0.726127, jac=0.571828 62 | model-30500, dice=0.739627, jac=0.589066 63 | model-31000, dice=0.745559, jac=0.595767 64 | model-31500, dice=0.716004, jac=0.559215 65 | model-32000, dice=0.707673, jac=0.549803 66 | model-32500, dice=0.740518, jac=0.590179 67 | model-33000, dice=0.740880, jac=0.591643 68 | model-33500, dice=0.748687, jac=0.599533 69 | model-34000, dice=0.738040, jac=0.587459 70 | model-34500, dice=0.782335, jac=0.644078 71 | model-35000, dice=0.776946, jac=0.636541 72 | model-35500, dice=0.729061, jac=0.575130 73 | model-36000, dice=0.758503, jac=0.612152 74 | model-36500, dice=0.751518, jac=0.603311 75 | model-37000, dice=0.682099, jac=0.520989 76 | model-37500, dice=0.769871, jac=0.627338 77 | model-38000, dice=0.683499, jac=0.523104 78 | model-38500, dice=0.760274, jac=0.615083 79 | model-39000, dice=0.781705, jac=0.642873 80 | model-39500, dice=0.779736, jac=0.641016 81 | model-40000, dice=0.767077, jac=0.623384 82 | model-40500, dice=0.780992, jac=0.641532 83 | model-41000, dice=0.791872, jac=0.656977 84 | model-41500, dice=0.713477, jac=0.557084 85 | model-42000, dice=0.717377, jac=0.562674 86 | model-42500, dice=0.771158, jac=0.628780 87 | model-43000, dice=0.766233, jac=0.622575 88 | model-43500, dice=0.781579, jac=0.643157 89 | model-44000, dice=0.775877, jac=0.635052 90 | model-44500, dice=0.806667, jac=0.677120 91 | model-45000, dice=0.748515, jac=0.600295 92 | model-45500, dice=0.761003, jac=0.617943 93 | model-46000, dice=0.773568, jac=0.632813 94 | model-46500, dice=0.766049, jac=0.621983 95 | model-47000, dice=0.784286, jac=0.646882 96 | model-47500, dice=0.786877, jac=0.649971 97 | model-48000, dice=0.748264, jac=0.599338 98 | model-48500, dice=0.766143, jac=0.622790 99 | model-49000, dice=0.722046, jac=0.568826 100 | model-49500, dice=0.766469, jac=0.622965 101 | model-50000, dice=0.762432, jac=0.619229 102 | model-50500, dice=0.762636, jac=0.619521 103 | model-51000, dice=0.747650, jac=0.599151 104 | model-51500, dice=0.771056, jac=0.629127 105 | model-52000, dice=0.638637, jac=0.473630 106 | model-52500, dice=0.790602, jac=0.655047 107 | model-53000, dice=0.768366, jac=0.625027 108 | model-53500, dice=0.745808, jac=0.597241 109 | model-54000, dice=0.781663, jac=0.642528 110 | model-54500, dice=0.761097, jac=0.615878 111 | model-55000, dice=0.796219, jac=0.662553 112 | model-55500, dice=0.778065, jac=0.638239 113 | model-56000, dice=0.740323, jac=0.590081 114 | model-56500, dice=0.788487, jac=0.652069 115 | model-57000, dice=0.802830, jac=0.671607 116 | model-57500, dice=0.787203, jac=0.650242 117 | model-58000, dice=0.805535, jac=0.675038 118 | model-58500, dice=0.765100, jac=0.621281 119 | model-59000, dice=0.784876, jac=0.647482 120 | model-59500, dice=0.802869, jac=0.672497 121 | model-60000, dice=0.725923, jac=0.571888 122 | model-60500, dice=0.760374, jac=0.615158 123 | model-61000, dice=0.800351, jac=0.668158 124 | model-61500, dice=0.779941, jac=0.640343 125 | model-62000, dice=0.779077, jac=0.639723 126 | model-62500, dice=0.786385, jac=0.649770 127 | model-63000, dice=0.770366, jac=0.629559 128 | model-63500, dice=0.798525, jac=0.666257 129 | model-64000, dice=0.661709, jac=0.500486 130 | model-64500, dice=0.786207, jac=0.649088 131 | model-65000, dice=0.783516, jac=0.645070 132 | model-65500, dice=0.749442, jac=0.601938 133 | model-66000, dice=0.693932, jac=0.537315 134 | model-66500, dice=0.718204, jac=0.564884 135 | model-67000, dice=0.744128, jac=0.598788 136 | model-67500, dice=0.755354, jac=0.609461 137 | model-68000, dice=0.790619, jac=0.656156 138 | model-68500, dice=0.729819, jac=0.579929 139 | model-69000, dice=0.803490, jac=0.674276 140 | model-69500, dice=0.789361, jac=0.654924 141 | model-70000, dice=0.773464, jac=0.632061 142 | model-70500, dice=0.774057, jac=0.633936 143 | model-71000, dice=0.766675, jac=0.623871 144 | model-71500, dice=0.755700, jac=0.611781 145 | model-72000, dice=0.793412, jac=0.658749 146 | model-72500, dice=0.758913, jac=0.615777 147 | model-73000, dice=0.786994, jac=0.650253 148 | model-73500, dice=0.744504, jac=0.594615 149 | model-74000, dice=0.757344, jac=0.612280 150 | model-74500, dice=0.795458, jac=0.661789 151 | model-75000, dice=0.808418, jac=0.679531 152 | model-75500, dice=0.786061, jac=0.649012 153 | model-76000, dice=0.756465, jac=0.613005 154 | model-76500, dice=0.787411, jac=0.650703 155 | model-77000, dice=0.815724, jac=0.689730 156 | model-77500, dice=0.709644, jac=0.556110 157 | model-78000, dice=0.796713, jac=0.663118 158 | model-78500, dice=0.754940, jac=0.610122 159 | model-79000, dice=0.782871, jac=0.644354 160 | model-79500, dice=0.793027, jac=0.658899 161 | model-80000, dice=0.749735, jac=0.602484 162 | model-80500, dice=0.782430, jac=0.644840 163 | model-81000, dice=0.795487, jac=0.662092 164 | model-81500, dice=0.809255, jac=0.680367 165 | model-82000, dice=0.792896, jac=0.658135 166 | model-82500, dice=0.794747, jac=0.660860 167 | model-83000, dice=0.808870, jac=0.680285 168 | model-83500, dice=0.792236, jac=0.657100 169 | model-84000, dice=0.784417, jac=0.647199 170 | model-84500, dice=0.794129, jac=0.659718 171 | model-85000, dice=0.807430, jac=0.678572 172 | model-85500, dice=0.800988, jac=0.668856 173 | model-86000, dice=0.768227, jac=0.627963 174 | model-86500, dice=0.726365, jac=0.574746 175 | model-87000, dice=0.787821, jac=0.650876 176 | model-87500, dice=0.750177, jac=0.604276 177 | model-88000, dice=0.763100, jac=0.619804 178 | model-88500, dice=0.776378, jac=0.637595 179 | model-89000, dice=0.745156, jac=0.597360 180 | model-89500, dice=0.773028, jac=0.633929 181 | model-90000, dice=0.769344, jac=0.629288 182 | model-90500, dice=0.736091, jac=0.586839 183 | model-91000, dice=0.749363, jac=0.602074 184 | model-91500, dice=0.689307, jac=0.532986 185 | model-92000, dice=0.741578, jac=0.594357 186 | model-92500, dice=0.778509, jac=0.640837 187 | model-93000, dice=0.775740, jac=0.636701 188 | model-93500, dice=0.764405, jac=0.621030 189 | model-94000, dice=0.750985, jac=0.604642 190 | model-94500, dice=0.793551, jac=0.658691 191 | model-95000, dice=0.707077, jac=0.553612 192 | model-95500, dice=0.773075, jac=0.632401 193 | model-96000, dice=0.772000, jac=0.632456 194 | model-96500, dice=0.796064, jac=0.663826 195 | model-97000, dice=0.773490, jac=0.634752 196 | model-97500, dice=0.794670, jac=0.662410 197 | model-98000, dice=0.789963, jac=0.653971 198 | model-98500, dice=0.729041, jac=0.581244 199 | model-99000, dice=0.813171, jac=0.686759 200 | model-99500, dice=0.779314, jac=0.642024 201 | model-100000, dice=0.814886, jac=0.689012 202 | model-100500, dice=0.801342, jac=0.670145 203 | model-101000, dice=0.788156, jac=0.652431 204 | model-101500, dice=0.778443, jac=0.641036 205 | model-102000, dice=0.784865, jac=0.648939 206 | model-102500, dice=0.806549, jac=0.677524 207 | model-103000, dice=0.800138, jac=0.668117 208 | model-103500, dice=0.800928, jac=0.669072 209 | model-104000, dice=0.803118, jac=0.671776 210 | model-104500, dice=0.801063, jac=0.671811 211 | model-105000, dice=0.799847, jac=0.667323 212 | model-105500, dice=0.799498, jac=0.667374 213 | model-106000, dice=0.801595, jac=0.670126 214 | model-106500, dice=0.802737, jac=0.672017 215 | model-107000, dice=0.803112, jac=0.672644 216 | model-107500, dice=0.795862, jac=0.662605 217 | model-108000, dice=0.773701, jac=0.637297 218 | model-108500, dice=0.792482, jac=0.662047 219 | model-109000, dice=0.804271, jac=0.674584 220 | model-109500, dice=0.793944, jac=0.661566 221 | model-110000, dice=0.690256, jac=0.535601 222 | model-110500, dice=0.754268, jac=0.614507 223 | model-111000, dice=0.795806, jac=0.662798 224 | model-111500, dice=0.792987, jac=0.659611 225 | model-112000, dice=0.797236, jac=0.665174 226 | model-112500, dice=0.791037, jac=0.656176 227 | model-113000, dice=0.807116, jac=0.679062 228 | model-113500, dice=0.825022, jac=0.703263 229 | model-114000, dice=0.805058, jac=0.675087 230 | model-114500, dice=0.808493, jac=0.680121 231 | model-115000, dice=0.803289, jac=0.674329 232 | model-115500, dice=0.806334, jac=0.676828 233 | model-116000, dice=0.820539, jac=0.696894 234 | model-116500, dice=0.790338, jac=0.657446 235 | model-117000, dice=0.809467, jac=0.681536 236 | model-117500, dice=0.779950, jac=0.642947 237 | model-118000, dice=0.756196, jac=0.614539 238 | model-118500, dice=0.776468, jac=0.639549 239 | model-119000, dice=0.768878, jac=0.632501 240 | model-119500, dice=0.794521, jac=0.661645 241 | model-120000, dice=0.808853, jac=0.680977 242 | model-120500, dice=0.789618, jac=0.658463 243 | model-121000, dice=0.793860, jac=0.662814 244 | model-121500, dice=0.789882, jac=0.655508 245 | model-122000, dice=0.814610, jac=0.688448 246 | model-122500, dice=0.803381, jac=0.673403 247 | model-123000, dice=0.790463, jac=0.656013 248 | model-123500, dice=0.814726, jac=0.688753 249 | model-124000, dice=0.809469, jac=0.681094 250 | model-124500, dice=0.797441, jac=0.664998 251 | model-125000, dice=0.828368, jac=0.707980 252 | model-125500, dice=0.793427, jac=0.663239 253 | model-126000, dice=0.792047, jac=0.660226 254 | model-126500, dice=0.790711, jac=0.660900 255 | model-127000, dice=0.804304, jac=0.675896 256 | model-127500, dice=0.803582, jac=0.673766 257 | model-128000, dice=0.827302, jac=0.707468 258 | model-128500, dice=0.784613, jac=0.651758 259 | model-129000, dice=0.806916, jac=0.680405 260 | model-129500, dice=0.788999, jac=0.656416 261 | model-130000, dice=0.815325, jac=0.689775 262 | model-130500, dice=0.811415, jac=0.683814 263 | model-131000, dice=0.816576, jac=0.692267 264 | model-131500, dice=0.813255, jac=0.688651 265 | model-132000, dice=0.803086, jac=0.674077 266 | model-132500, dice=0.785432, jac=0.653183 267 | model-133000, dice=0.785107, jac=0.652201 268 | model-133500, dice=0.825726, jac=0.704787 269 | model-134000, dice=0.784866, jac=0.651200 270 | model-134500, dice=0.750350, jac=0.608300 271 | model-135000, dice=0.784540, jac=0.651008 272 | model-135500, dice=0.796448, jac=0.666423 273 | model-136000, dice=0.829434, jac=0.709464 274 | model-136500, dice=0.823720, jac=0.702851 275 | model-137000, dice=0.792045, jac=0.659677 276 | model-137500, dice=0.798754, jac=0.667232 277 | model-138000, dice=0.780963, jac=0.644749 278 | model-138500, dice=0.779742, jac=0.642750 279 | model-139000, dice=0.800242, jac=0.669464 280 | model-139500, dice=0.814242, jac=0.687766 281 | model-140000, dice=0.824270, jac=0.702074 282 | model-140500, dice=0.825552, jac=0.704102 283 | model-141000, dice=0.812034, jac=0.685432 284 | model-141500, dice=0.834373, jac=0.716589 285 | model-142000, dice=0.809877, jac=0.683958 286 | model-142500, dice=0.803813, jac=0.677163 287 | model-143000, dice=0.812098, jac=0.687174 288 | model-143500, dice=0.816889, jac=0.691316 289 | model-144000, dice=0.804222, jac=0.673705 290 | model-144500, dice=0.819135, jac=0.695165 291 | model-145000, dice=0.827350, jac=0.706659 292 | model-145500, dice=0.831991, jac=0.712969 293 | model-146000, dice=0.821206, jac=0.697584 294 | model-146500, dice=0.806822, jac=0.677697 295 | model-147000, dice=0.820271, jac=0.696420 296 | model-147500, dice=0.796689, jac=0.665180 297 | model-148000, dice=0.819595, jac=0.696346 298 | model-148500, dice=0.810343, jac=0.684626 299 | model-149000, dice=0.808637, jac=0.680156 300 | model-149500, dice=0.787895, jac=0.653812 301 | model-150000, dice=0.825537, jac=0.704521 302 | model-150500, dice=0.818207, jac=0.694652 303 | model-151000, dice=0.810157, jac=0.684051 304 | model-151500, dice=0.815619, jac=0.691015 305 | model-152000, dice=0.808217, jac=0.680165 306 | model-152500, dice=0.817353, jac=0.692339 307 | model-153000, dice=0.821101, jac=0.697968 308 | model-153500, dice=0.820805, jac=0.698658 309 | model-154000, dice=0.774784, jac=0.638080 310 | model-154500, dice=0.784985, jac=0.652363 311 | model-155000, dice=0.802624, jac=0.674812 312 | model-155500, dice=0.792453, jac=0.661475 313 | model-156000, dice=0.826534, jac=0.705803 314 | model-156500, dice=0.790472, jac=0.659967 315 | model-157000, dice=0.783472, jac=0.649848 316 | model-157500, dice=0.818909, jac=0.697034 317 | model-158000, dice=0.789618, jac=0.658102 318 | model-158500, dice=0.808149, jac=0.681575 319 | model-159000, dice=0.790162, jac=0.657887 320 | model-159500, dice=0.788064, jac=0.654870 321 | model-160000, dice=0.817245, jac=0.692482 322 | model-160500, dice=0.794897, jac=0.663641 323 | model-161000, dice=0.809871, jac=0.683969 324 | model-161500, dice=0.810693, jac=0.683194 325 | model-162000, dice=0.827657, jac=0.706856 326 | model-162500, dice=0.830500, jac=0.711089 327 | model-163000, dice=0.817643, jac=0.693961 328 | model-163500, dice=0.812544, jac=0.685811 329 | model-164000, dice=0.805648, jac=0.676741 330 | model-164500, dice=0.810407, jac=0.684421 331 | model-165000, dice=0.788919, jac=0.656908 332 | model-165500, dice=0.811589, jac=0.684833 333 | model-166000, dice=0.816351, jac=0.692002 334 | model-166500, dice=0.802608, jac=0.673006 335 | model-167000, dice=0.826324, jac=0.704920 336 | model-167500, dice=0.803764, jac=0.674712 337 | model-168000, dice=0.820514, jac=0.697242 338 | model-168500, dice=0.812955, jac=0.686883 339 | model-169000, dice=0.796048, jac=0.665317 340 | model-169500, dice=0.805523, jac=0.677505 341 | model-170000, dice=0.815599, jac=0.690856 342 | model-170500, dice=0.804822, jac=0.676275 343 | model-171000, dice=0.789808, jac=0.656073 344 | model-171500, dice=0.819631, jac=0.695836 345 | model-172000, dice=0.795084, jac=0.664954 346 | model-172500, dice=0.789961, jac=0.657011 347 | model-173000, dice=0.798966, jac=0.669878 348 | model-173500, dice=0.800350, jac=0.670699 349 | model-174000, dice=0.774222, jac=0.636958 350 | model-174500, dice=0.826118, jac=0.705701 351 | model-175000, dice=0.811096, jac=0.684292 352 | model-175500, dice=0.813375, jac=0.687456 353 | model-176000, dice=0.802167, jac=0.674313 354 | model-176500, dice=0.811095, jac=0.685524 355 | model-177000, dice=0.816922, jac=0.693181 356 | model-177500, dice=0.822833, jac=0.700998 357 | model-178000, dice=0.814181, jac=0.688802 358 | model-178500, dice=0.815897, jac=0.691736 359 | model-179000, dice=0.809199, jac=0.682422 360 | model-179500, dice=0.784166, jac=0.650710 361 | model-180000, dice=0.782021, jac=0.647810 362 | model-180500, dice=0.795893, jac=0.665979 363 | model-181000, dice=0.822014, jac=0.700024 364 | model-181500, dice=0.795503, jac=0.665216 365 | model-182000, dice=0.800084, jac=0.671215 366 | model-182500, dice=0.812105, jac=0.687369 367 | model-183000, dice=0.817247, jac=0.693002 368 | model-183500, dice=0.799056, jac=0.669775 369 | model-184000, dice=0.810290, jac=0.684072 370 | model-184500, dice=0.828755, jac=0.708855 371 | model-185000, dice=0.810922, jac=0.685585 372 | model-185500, dice=0.811313, jac=0.685813 373 | model-186000, dice=0.809135, jac=0.683185 374 | model-186500, dice=0.808952, jac=0.682600 375 | model-187000, dice=0.809995, jac=0.684365 376 | model-187500, dice=0.804551, jac=0.676463 377 | model-188000, dice=0.791659, jac=0.660829 378 | model-188500, dice=0.772821, jac=0.635828 379 | model-189000, dice=0.793894, jac=0.662999 380 | model-189500, dice=0.795015, jac=0.664750 381 | model-190000, dice=0.810974, jac=0.684189 382 | model-190500, dice=0.815425, jac=0.691060 383 | model-191000, dice=0.809366, jac=0.683205 384 | model-191500, dice=0.809863, jac=0.683742 385 | model-192000, dice=0.816520, jac=0.692541 386 | model-192500, dice=0.817420, jac=0.694068 387 | model-193000, dice=0.818663, jac=0.695991 388 | model-193500, dice=0.812700, jac=0.688530 389 | model-194000, dice=0.815969, jac=0.692384 390 | model-194500, dice=0.824119, jac=0.702695 391 | model-195000, dice=0.824991, jac=0.704295 392 | model-195500, dice=0.824427, jac=0.703376 393 | model-196000, dice=0.821907, jac=0.699923 394 | model-196500, dice=0.817557, jac=0.693997 395 | model-197000, dice=0.816369, jac=0.692731 396 | model-197500, dice=0.811743, jac=0.686705 397 | model-198000, dice=0.809041, jac=0.683661 398 | model-198500, dice=0.809510, jac=0.683807 399 | model-199000, dice=0.812916, jac=0.688291 400 | model-199500, dice=0.810781, jac=0.685468 401 | model-200000, dice=0.811760, jac=0.686792 402 | --------------------------------------------------------------------------------