├── .gitignore ├── README.md ├── image ├── 01234N.png └── 01477D.png ├── model ├── MFNet.py ├── SegNet.py └── __init__.py ├── run_demo.py ├── sample.png ├── test.py ├── train.py ├── util ├── MF_dataset.py ├── __init__.py ├── augmentation.py └── util.py └── weights └── MFNet ├── final.pth ├── log.txt └── tmp.optim /.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__ 2 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # MFNet-pytorch 2 | 3 | Image semantic segmentation using RGB-Thermal images, for example: 4 | 5 | 6 | 7 | Project home page: https://www.mi.t.u-tokyo.ac.jp/static/projects/mil_multispectral/ 8 | 9 | This is official pytorch implementation of [MFNet: Towards real-time semantic segmentation for autonomous vehicles with multi-spectral scenes](https://ieeexplore.ieee.org/document/8206396/) (IROS 2017). The pdf can be downloaded from [HERE](https://drive.google.com/file/d/1vxMh63QpdxPnG3jhzpQU0fb-2XOzHR-Z/view?usp=sharing) (Google Drive shared file). 10 | 11 | ## Introduction 12 | 13 | MFNet is a light CNN architecture for multispectral images semantic segmentation, with ~ 1/40x parameters and 6x ~ inference speed, while providing similar or higher accuracy compared to SegNet. 14 | 15 | ## Requirements 16 | 17 | ``` 18 | * pytorch 0.4.0 19 | * PIL 4.3.0 20 | * numpy 1.14.0 21 | * tqdm 4.19.4 22 | ``` 23 | ## Dataset 24 | 25 | We published a new RGB-Thermal semantic segmentation dataset in support of further development of autonomous vehicles in the future. This dataset contains 1569 images (820 taken at daytime and 749 taken at nighttime) which is available in our [project home page](https://www.mi.t.u-tokyo.ac.jp/static/projects/mil_multispectral/) 26 | 27 | **Importance** : `.png` images in our dataset are contain 4 channels, load them in this way to get the right format. 28 | 29 | ``` 30 | import numpy as np 31 | from PIL import Image 32 | im = Image.open('/path/to/dataset/01606D.png') 33 | print(np.asarray(im).shape) 34 | 35 | # (480, 640, 4) 36 | ``` 37 | 38 | ## Usage 39 | 40 | * run demo code use trained model 41 | ``` 42 | $ cd /path/to/this/repository 43 | $ python run_demo.py 44 | ``` 45 | 46 | * training 47 | ``` 48 | 1. download our dataset 49 | 2. set directory of dataset in train.py 50 | 3. run train.py 51 | ``` 52 | -------------------------------------------------------------------------------- /image/01234N.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/haqishen/MFNet-pytorch/fff9e28ca47adb1491ea3c6c29df958da91da092/image/01234N.png -------------------------------------------------------------------------------- /image/01477D.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/haqishen/MFNet-pytorch/fff9e28ca47adb1491ea3c6c29df958da91da092/image/01477D.png -------------------------------------------------------------------------------- /model/MFNet.py: -------------------------------------------------------------------------------- 1 | # coding:utf-8 2 | import torch 3 | import torch.nn as nn 4 | import torch.nn.functional as F 5 | 6 | 7 | class ConvBnLeakyRelu2d(nn.Module): 8 | # convolution 9 | # batch normalization 10 | # leaky relu 11 | def __init__(self, in_channels, out_channels, kernel_size=3, padding=1, stride=1, dilation=1, groups=1): 12 | super(ConvBnLeakyRelu2d, self).__init__() 13 | self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, padding=padding, stride=stride, dilation=dilation, groups=groups) 14 | self.bn = nn.BatchNorm2d(out_channels) 15 | def forward(self, x): 16 | return F.leaky_relu(self.bn(self.conv(x)), negative_slope=0.2) 17 | 18 | 19 | class MiniInception(nn.Module): 20 | def __init__(self, in_channels, out_channels): 21 | super(MiniInception, self).__init__() 22 | self.conv1_left = ConvBnLeakyRelu2d(in_channels, out_channels//2) 23 | self.conv1_right = ConvBnLeakyRelu2d(in_channels, out_channels//2, padding=2, dilation=2) 24 | self.conv2_left = ConvBnLeakyRelu2d(out_channels, out_channels//2) 25 | self.conv2_right = ConvBnLeakyRelu2d(out_channels, out_channels//2, padding=2, dilation=2) 26 | self.conv3_left = ConvBnLeakyRelu2d(out_channels, out_channels//2) 27 | self.conv3_right = ConvBnLeakyRelu2d(out_channels, out_channels//2, padding=2, dilation=2) 28 | def forward(self,x): 29 | x = torch.cat((self.conv1_left(x), self.conv1_right(x)), dim=1) 30 | x = torch.cat((self.conv2_left(x), self.conv2_right(x)), dim=1) 31 | x = torch.cat((self.conv3_left(x), self.conv3_right(x)), dim=1) 32 | return x 33 | 34 | 35 | class MFNet(nn.Module): 36 | 37 | def __init__(self, n_class): 38 | super(MFNet, self).__init__() 39 | rgb_ch = [16,48,48,96,96] 40 | inf_ch = [16,16,16,36,36] 41 | 42 | self.conv1_rgb = ConvBnLeakyRelu2d(3, rgb_ch[0]) 43 | self.conv2_1_rgb = ConvBnLeakyRelu2d(rgb_ch[0], rgb_ch[1]) 44 | self.conv2_2_rgb = ConvBnLeakyRelu2d(rgb_ch[1], rgb_ch[1]) 45 | self.conv3_1_rgb = ConvBnLeakyRelu2d(rgb_ch[1], rgb_ch[2]) 46 | self.conv3_2_rgb = ConvBnLeakyRelu2d(rgb_ch[2], rgb_ch[2]) 47 | self.conv4_rgb = MiniInception(rgb_ch[2], rgb_ch[3]) 48 | self.conv5_rgb = MiniInception(rgb_ch[3], rgb_ch[4]) 49 | 50 | self.conv1_inf = ConvBnLeakyRelu2d(1, inf_ch[0]) 51 | self.conv2_1_inf = ConvBnLeakyRelu2d(inf_ch[0], inf_ch[1]) 52 | self.conv2_2_inf = ConvBnLeakyRelu2d(inf_ch[1], inf_ch[1]) 53 | self.conv3_1_inf = ConvBnLeakyRelu2d(inf_ch[1], inf_ch[2]) 54 | self.conv3_2_inf = ConvBnLeakyRelu2d(inf_ch[2], inf_ch[2]) 55 | self.conv4_inf = MiniInception(inf_ch[2], inf_ch[3]) 56 | self.conv5_inf = MiniInception(inf_ch[3], inf_ch[4]) 57 | 58 | self.decode4 = ConvBnLeakyRelu2d(rgb_ch[3]+inf_ch[3], rgb_ch[2]+inf_ch[2]) 59 | self.decode3 = ConvBnLeakyRelu2d(rgb_ch[2]+inf_ch[2], rgb_ch[1]+inf_ch[1]) 60 | self.decode2 = ConvBnLeakyRelu2d(rgb_ch[1]+inf_ch[1], rgb_ch[0]+inf_ch[0]) 61 | self.decode1 = ConvBnLeakyRelu2d(rgb_ch[0]+inf_ch[0], n_class) 62 | 63 | 64 | def forward(self, x): 65 | # split data into RGB and INF 66 | x_rgb = x[:,:3] 67 | x_inf = x[:,3:] 68 | 69 | # encode 70 | x_rgb = self.conv1_rgb(x_rgb) 71 | x_rgb = F.max_pool2d(x_rgb, kernel_size=2, stride=2) # pool1 72 | x_rgb = self.conv2_1_rgb(x_rgb) 73 | x_rgb_p2 = self.conv2_2_rgb(x_rgb) 74 | x_rgb = F.max_pool2d(x_rgb_p2, kernel_size=2, stride=2) # pool2 75 | x_rgb = self.conv3_1_rgb(x_rgb) 76 | x_rgb_p3 = self.conv3_2_rgb(x_rgb) 77 | x_rgb = F.max_pool2d(x_rgb_p3, kernel_size=2, stride=2) # pool3 78 | x_rgb_p4 = self.conv4_rgb(x_rgb) 79 | x_rgb = F.max_pool2d(x_rgb_p4, kernel_size=2, stride=2) # pool4 80 | x_rgb = self.conv5_rgb(x_rgb) 81 | 82 | x_inf = self.conv1_inf(x_inf) 83 | x_inf = F.max_pool2d(x_inf, kernel_size=2, stride=2) # pool1 84 | x_inf = self.conv2_1_inf(x_inf) 85 | x_inf_p2 = self.conv2_2_inf(x_inf) 86 | x_inf = F.max_pool2d(x_inf_p2, kernel_size=2, stride=2) # pool2 87 | x_inf = self.conv3_1_inf(x_inf) 88 | x_inf_p3 = self.conv3_2_inf(x_inf) 89 | x_inf = F.max_pool2d(x_inf_p3, kernel_size=2, stride=2) # pool3 90 | x_inf_p4 = self.conv4_inf(x_inf) 91 | x_inf = F.max_pool2d(x_inf_p4, kernel_size=2, stride=2) # pool4 92 | x_inf = self.conv5_inf(x_inf) 93 | 94 | x = torch.cat((x_rgb, x_inf), dim=1) # fusion RGB and INF 95 | 96 | # decode 97 | x = F.upsample(x, scale_factor=2, mode='nearest') # unpool4 98 | x = self.decode4(x + torch.cat((x_rgb_p4, x_inf_p4), dim=1)) 99 | x = F.upsample(x, scale_factor=2, mode='nearest') # unpool3 100 | x = self.decode3(x + torch.cat((x_rgb_p3, x_inf_p3), dim=1)) 101 | x = F.upsample(x, scale_factor=2, mode='nearest') # unpool2 102 | x = self.decode2(x + torch.cat((x_rgb_p2, x_inf_p2), dim=1)) 103 | x = F.upsample(x, scale_factor=2, mode='nearest') # unpool1 104 | x = self.decode1(x) 105 | 106 | return x 107 | 108 | 109 | def unit_test(): 110 | import numpy as np 111 | x = torch.tensor(np.random.rand(2,4,480,640).astype(np.float32)) 112 | model = MFNet(n_class=9) 113 | y = model(x) 114 | print('output shape:', y.shape) 115 | assert y.shape == (2,9,480,640), 'output shape (2,9,480,640) is expected!' 116 | print('test ok!') 117 | 118 | 119 | if __name__ == '__main__': 120 | unit_test() 121 | -------------------------------------------------------------------------------- /model/SegNet.py: -------------------------------------------------------------------------------- 1 | # coding:utf-8 2 | import torch 3 | import torch.nn as nn 4 | import torch.nn.functional as F 5 | 6 | 7 | class ConvBnRelu2d(nn.Module): 8 | # convolution 9 | # batch normalization 10 | # relu 11 | def __init__(self, in_channels, out_channels, kernel_size=3, padding=1, stride=1, dilation=1, groups=1): 12 | super(ConvBnRelu2d, self).__init__() 13 | self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, padding=padding, stride=stride, dilation=dilation, groups=groups) 14 | self.bn = nn.BatchNorm2d(out_channels) 15 | def forward(self, x): 16 | return F.relu(self.bn(self.conv(x))) 17 | 18 | 19 | class SegNet(nn.Module): 20 | def __init__(self, n_class, in_channels=4): 21 | super(SegNet, self).__init__() 22 | 23 | chs = [32,64,64,128,128] 24 | 25 | self.down1 = nn.Sequential( 26 | ConvBnRelu2d(in_channels, chs[0]), 27 | ConvBnRelu2d(chs[0], chs[0]), 28 | ) 29 | self.down2 = nn.Sequential( 30 | ConvBnRelu2d(chs[0], chs[1]), 31 | ConvBnRelu2d(chs[1], chs[1]), 32 | ) 33 | self.down3 = nn.Sequential( 34 | ConvBnRelu2d(chs[1], chs[2]), 35 | ConvBnRelu2d(chs[2], chs[2]), 36 | ConvBnRelu2d(chs[2], chs[2]) 37 | ) 38 | self.down4 = nn.Sequential( 39 | ConvBnRelu2d(chs[2], chs[3]), 40 | ConvBnRelu2d(chs[3], chs[3]), 41 | ConvBnRelu2d(chs[3], chs[3]) 42 | ) 43 | self.down5 = nn.Sequential( 44 | ConvBnRelu2d(chs[3], chs[4]), 45 | ConvBnRelu2d(chs[4], chs[4]), 46 | ConvBnRelu2d(chs[4], chs[4]) 47 | ) 48 | self.up5 = nn.Sequential( 49 | ConvBnRelu2d(chs[4], chs[4]), 50 | ConvBnRelu2d(chs[4], chs[4]), 51 | ConvBnRelu2d(chs[4], chs[3]) 52 | ) 53 | self.up4 = nn.Sequential( 54 | ConvBnRelu2d(chs[3], chs[3]), 55 | ConvBnRelu2d(chs[3], chs[3]), 56 | ConvBnRelu2d(chs[3], chs[2]) 57 | ) 58 | self.up3 = nn.Sequential( 59 | ConvBnRelu2d(chs[2], chs[2]), 60 | ConvBnRelu2d(chs[2], chs[2]), 61 | ConvBnRelu2d(chs[2], chs[1]) 62 | ) 63 | self.up2 = nn.Sequential( 64 | ConvBnRelu2d(chs[1], chs[1]), 65 | ConvBnRelu2d(chs[1], chs[0]) 66 | ) 67 | self.up1 = nn.Sequential( 68 | ConvBnRelu2d(chs[0], chs[0]), 69 | ConvBnRelu2d(chs[0], n_class) 70 | ) 71 | 72 | def forward(self, x): 73 | x = self.down1(x) 74 | x, ind1 = F.max_pool2d(x, 2, 2, return_indices=True) 75 | x = self.down2(x) 76 | x, ind2 = F.max_pool2d(x, 2, 2, return_indices=True) 77 | x = self.down3(x) 78 | x, ind3 = F.max_pool2d(x, 2, 2, return_indices=True) 79 | x = self.down4(x) 80 | x, ind4 = F.max_pool2d(x, 2, 2, return_indices=True) 81 | x = self.down5(x) 82 | x, ind5 = F.max_pool2d(x, 2, 2, return_indices=True) 83 | 84 | x = F.max_unpool2d(x, ind5, 2, 2) 85 | x = self.up5(x) 86 | x = F.max_unpool2d(x, ind4, 2, 2) 87 | x = self.up4(x) 88 | x = F.max_unpool2d(x, ind3, 2, 2) 89 | x = self.up3(x) 90 | x = F.max_unpool2d(x, ind2, 2, 2) 91 | x = self.up2(x) 92 | x = F.max_unpool2d(x, ind1, 2, 2) 93 | x = self.up1(x) 94 | 95 | return x 96 | 97 | 98 | def unit_test(): 99 | import numpy as np 100 | x = torch.tensor(np.random.rand(2,4,480,640).astype(np.float32)) 101 | model = SegNet(n_class=9) 102 | y = model(x) 103 | print('output shape:', y.shape) 104 | assert y.shape == (2,9,480,640), 'output shape (2,9,480,640) is expected!' 105 | print('test ok!') 106 | 107 | 108 | if __name__ == '__main__': 109 | unit_test() 110 | -------------------------------------------------------------------------------- /model/__init__.py: -------------------------------------------------------------------------------- 1 | from .MFNet import MFNet 2 | from .SegNet import SegNet -------------------------------------------------------------------------------- /run_demo.py: -------------------------------------------------------------------------------- 1 | # coding:utf-8 2 | import os 3 | import argparse 4 | import time 5 | import numpy as np 6 | from PIL import Image 7 | 8 | import torch 9 | import torch.nn.functional as F 10 | from torch.autograd import Variable 11 | 12 | from util.util import visualize 13 | from model import MFNet 14 | from train import n_class, model_dir 15 | 16 | 17 | def main(): 18 | 19 | model = eval(args.model_name)(n_class=n_class) 20 | if args.gpu >= 0: model.cuda(args.gpu) 21 | if os.path.exists(final_model_file): 22 | model.load_state_dict(torch.load(final_model_file, map_location={'cuda:0':'cuda:1'})) 23 | elif os.path.exists(checkpoint_model_file): 24 | model.load_state_dict(torch.load(checkpoint_model_file, map_location={'cuda:0':'cuda:1'})) 25 | else: 26 | raise Exception('| model file do not exists in %s' % model_dir) 27 | print('| model loaded!') 28 | 29 | files = os.listdir('image') 30 | images = [] 31 | fpath = [] 32 | for file in files: 33 | if file[-3:] != 'png': continue 34 | fpath.append('image/'+file) 35 | images.append( np.asarray(Image.open('image/'+file)) ) 36 | images = np.asarray(images, dtype=np.float32).transpose((0,3,1,2))/255. 37 | images = Variable(torch.tensor(images)) 38 | if args.gpu >= 0: images = images.cuda(args.gpu) 39 | 40 | model.eval() 41 | with torch.no_grad(): 42 | logits = model(images) 43 | predictions = logits.argmax(1) 44 | visualize(fpath, predictions) 45 | 46 | print('| prediction files have been saved in image/') 47 | 48 | 49 | if __name__ == "__main__": 50 | 51 | parser = argparse.ArgumentParser(description='Run MFNet demo with pytorch') 52 | parser.add_argument('--model_name', '-M', type=str, default='MFNet') 53 | parser.add_argument('--gpu', '-G', type=int, default=0) 54 | args = parser.parse_args() 55 | 56 | model_dir = os.path.join(model_dir, args.model_name) 57 | 58 | checkpoint_model_file = os.path.join(model_dir, 'tmp.pth') 59 | final_model_file = os.path.join(model_dir, 'final.pth') 60 | 61 | print('| running %s demo on GPU #%d with pytorch' % (args.model_name, args.gpu)) 62 | main() 63 | -------------------------------------------------------------------------------- /sample.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/haqishen/MFNet-pytorch/fff9e28ca47adb1491ea3c6c29df958da91da092/sample.png -------------------------------------------------------------------------------- /test.py: -------------------------------------------------------------------------------- 1 | # coding:utf-8 2 | import os 3 | import argparse 4 | import time 5 | import numpy as np 6 | 7 | import torch 8 | import torch.nn.functional as F 9 | from torch.autograd import Variable 10 | from torch.utils.data import DataLoader 11 | 12 | from util.MF_dataset import MF_dataset 13 | from util.util import calculate_accuracy, calculate_result 14 | 15 | from model import MFNet 16 | from train import n_class, data_dir, model_dir 17 | 18 | 19 | def main(): 20 | 21 | cf = np.zeros((n_class, n_class)) 22 | 23 | model = eval(args.model_name)(n_class=n_class) 24 | if args.gpu >= 0: model.cuda(args.gpu) 25 | print('| loading model file %s... ' % final_model_file, end='') 26 | model.load_state_dict(torch.load(final_model_file, map_location={'cuda:0':'cuda:1'})) 27 | print('done!') 28 | 29 | test_dataset = MF_dataset(data_dir, 'test', have_label=True) 30 | test_loader = DataLoader( 31 | dataset = test_dataset, 32 | batch_size = args.batch_size, 33 | shuffle = False, 34 | num_workers = args.num_workers, 35 | pin_memory = True, 36 | drop_last = False 37 | ) 38 | test_loader.n_iter = len(test_loader) 39 | 40 | loss_avg = 0. 41 | acc_avg = 0. 42 | model.eval() 43 | with torch.no_grad(): 44 | for it, (images, labels, names) in enumerate(test_loader): 45 | images = Variable(images) 46 | labels = Variable(labels) 47 | if args.gpu >= 0: 48 | images = images.cuda(args.gpu) 49 | labels = labels.cuda(args.gpu) 50 | 51 | logits = model(images) 52 | loss = F.cross_entropy(logits, labels) 53 | acc = calculate_accuracy(logits, labels) 54 | loss_avg += float(loss) 55 | acc_avg += float(acc) 56 | 57 | print('|- test iter %s/%s. loss: %.4f, acc: %.4f' \ 58 | % (it+1, test_loader.n_iter, float(loss), float(acc))) 59 | 60 | predictions = logits.argmax(1) 61 | for gtcid in range(n_class): 62 | for pcid in range(n_class): 63 | gt_mask = labels == gtcid 64 | pred_mask = predictions == pcid 65 | intersection = gt_mask * pred_mask 66 | cf[gtcid, pcid] += int(intersection.sum()) 67 | 68 | overall_acc, acc, IoU = calculate_result(cf) 69 | 70 | print('| overall accuracy:', overall_acc) 71 | print('| accuracy of each class:', acc) 72 | print('| class accuracy avg:', acc.mean()) 73 | print('| IoU:', IoU) 74 | print('| class IoU avg:', IoU.mean()) 75 | 76 | 77 | if __name__ == '__main__': 78 | 79 | parser = argparse.ArgumentParser(description='Test MFNet with pytorch') 80 | parser.add_argument('--model_name', '-M', type=str, default='MFNet') 81 | parser.add_argument('--batch_size', '-B', type=int, default=16) 82 | parser.add_argument('--gpu', '-G', type=int, default=0) 83 | parser.add_argument('--num_workers', '-j', type=int, default=8) 84 | args = parser.parse_args() 85 | 86 | model_dir = os.path.join(model_dir, args.model_name) 87 | final_model_file = os.path.join(model_dir, 'final.pth') 88 | assert os.path.exists(final_model_file), 'model file `%s` do not exist' % (final_model_file) 89 | 90 | print('| testing %s on GPU #%d with pytorch' % (args.model_name, args.gpu)) 91 | 92 | main() 93 | -------------------------------------------------------------------------------- /train.py: -------------------------------------------------------------------------------- 1 | # coding:utf-8 2 | import os 3 | import argparse 4 | import time 5 | 6 | import torch 7 | import torch.nn.functional as F 8 | from torch.autograd import Variable 9 | from torch.utils.data import DataLoader 10 | 11 | from util.MF_dataset import MF_dataset 12 | from util.util import calculate_accuracy 13 | from util.augmentation import RandomFlip, RandomCrop, RandomCropOut, RandomBrightness, RandomNoise 14 | from model import MFNet, SegNet 15 | 16 | from tqdm import tqdm 17 | 18 | # config 19 | n_class = 9 20 | data_dir = '../../data/MF/' 21 | model_dir = 'weights/' 22 | augmentation_methods = [ 23 | RandomFlip(prob=0.5), 24 | RandomCrop(crop_rate=0.1, prob=1.0), 25 | # RandomCropOut(crop_rate=0.2, prob=1.0), 26 | # RandomBrightness(bright_range=0.15, prob=0.9), 27 | # RandomNoise(noise_range=5, prob=0.9), 28 | ] 29 | lr_start = 0.01 30 | lr_decay = 0.95 31 | 32 | 33 | def train(epo, model, train_loader, optimizer): 34 | 35 | lr_this_epo = lr_start * lr_decay**(epo-1) 36 | for param_group in optimizer.param_groups: 37 | param_group['lr'] = lr_this_epo 38 | 39 | loss_avg = 0. 40 | acc_avg = 0. 41 | start_t = t = time.time() 42 | model.train() 43 | 44 | for it, (images, labels, names) in enumerate(train_loader): 45 | images = Variable(images).cuda(args.gpu) 46 | labels = Variable(labels).cuda(args.gpu) 47 | if args.gpu >= 0: 48 | images = images.cuda(args.gpu) 49 | labels = labels.cuda(args.gpu) 50 | 51 | optimizer.zero_grad() 52 | logits = model(images) 53 | loss = F.cross_entropy(logits, labels) 54 | loss.backward() 55 | optimizer.step() 56 | 57 | acc = calculate_accuracy(logits, labels) 58 | loss_avg += float(loss) 59 | acc_avg += float(acc) 60 | 61 | cur_t = time.time() 62 | if cur_t-t > 5: 63 | print('|- epo %s/%s. train iter %s/%s. %.2f img/sec loss: %.4f, acc: %.4f' \ 64 | % (epo, args.epoch_max, it+1, train_loader.n_iter, (it+1)*args.batch_size/(cur_t-start_t), float(loss), float(acc))) 65 | t += 5 66 | 67 | content = '| epo:%s/%s lr:%.4f train_loss_avg:%.4f train_acc_avg:%.4f ' \ 68 | % (epo, args.epoch_max, lr_this_epo, loss_avg/train_loader.n_iter, acc_avg/train_loader.n_iter) 69 | print(content) 70 | with open(log_file, 'a') as appender: 71 | appender.write(content) 72 | 73 | 74 | def validation(epo, model, val_loader): 75 | 76 | loss_avg = 0. 77 | acc_avg = 0. 78 | start_t = time.time() 79 | model.eval() 80 | 81 | with torch.no_grad(): 82 | for it, (images, labels, names) in enumerate(val_loader): 83 | images = Variable(images) 84 | labels = Variable(labels) 85 | if args.gpu >= 0: 86 | images = images.cuda(args.gpu) 87 | labels = labels.cuda(args.gpu) 88 | 89 | logits = model(images) 90 | loss = F.cross_entropy(logits, labels) 91 | acc = calculate_accuracy(logits, labels) 92 | loss_avg += float(loss) 93 | acc_avg += float(acc) 94 | 95 | cur_t = time.time() 96 | print('|- epo %s/%s. val iter %s/%s. %.2f img/sec loss: %.4f, acc: %.4f' \ 97 | % (epo, args.epoch_max, it+1, val_loader.n_iter, (it+1)*args.batch_size/(cur_t-start_t), float(loss), float(acc))) 98 | 99 | content = '| val_loss_avg:%.4f val_acc_avg:%.4f\n' \ 100 | % (loss_avg/val_loader.n_iter, acc_avg/val_loader.n_iter) 101 | print(content) 102 | with open(log_file, 'a') as appender: 103 | appender.write(content) 104 | 105 | 106 | def main(): 107 | 108 | model = eval(args.model_name)(n_class=n_class) 109 | if args.gpu >= 0: model.cuda(args.gpu) 110 | optimizer = torch.optim.SGD(model.parameters(), lr=lr_start, momentum=0.9, weight_decay=0.0005) 111 | # optimizer = torch.optim.Adam(model.parameters(), lr=lr_start) 112 | 113 | if args.epoch_from > 1: 114 | print('| loading checkpoint file %s... ' % checkpoint_model_file, end='') 115 | model.load_state_dict(torch.load(checkpoint_model_file, map_location={'cuda:0':'cuda:1'})) 116 | optimizer.load_state_dict(torch.load(checkpoint_optim_file)) 117 | print('done!') 118 | 119 | train_dataset = MF_dataset(data_dir, 'train', have_label=True, transform=augmentation_methods) 120 | val_dataset = MF_dataset(data_dir, 'val', have_label=True) 121 | 122 | train_loader = DataLoader( 123 | dataset = train_dataset, 124 | batch_size = args.batch_size, 125 | shuffle = True, 126 | num_workers = args.num_workers, 127 | pin_memory = True, 128 | drop_last = True 129 | ) 130 | val_loader = DataLoader( 131 | dataset = val_dataset, 132 | batch_size = args.batch_size, 133 | shuffle = False, 134 | num_workers = args.num_workers, 135 | pin_memory = True, 136 | drop_last = False 137 | ) 138 | train_loader.n_iter = len(train_loader) 139 | val_loader.n_iter = len(val_loader) 140 | 141 | for epo in tqdm(range(args.epoch_from, args.epoch_max+1)): 142 | print('\n| epo #%s begin...' % epo) 143 | 144 | train(epo, model, train_loader, optimizer) 145 | validation(epo, model, val_loader) 146 | 147 | # save check point model 148 | print('| saving check point model file... ', end='') 149 | torch.save(model.state_dict(), checkpoint_model_file) 150 | torch.save(optimizer.state_dict(), checkpoint_optim_file) 151 | print('done!') 152 | 153 | os.rename(checkpoint_model_file, final_model_file) 154 | 155 | if __name__ == '__main__': 156 | 157 | parser = argparse.ArgumentParser(description='Train MFNet with pytorch') 158 | parser.add_argument('--model_name', '-M', type=str, default='MFNet') 159 | parser.add_argument('--batch_size', '-B', type=int, default=8) 160 | parser.add_argument('--epoch_max' , '-E', type=int, default=100) 161 | parser.add_argument('--epoch_from', '-EF', type=int, default=1) 162 | parser.add_argument('--gpu', '-G', type=int, default=0) 163 | parser.add_argument('--num_workers', '-j', type=int, default=8) 164 | args = parser.parse_args() 165 | 166 | model_dir = os.path.join(model_dir, args.model_name) 167 | os.makedirs(model_dir, exist_ok=True) 168 | checkpoint_model_file = os.path.join(model_dir, 'tmp.pth') 169 | checkpoint_optim_file = os.path.join(model_dir, 'tmp.optim') 170 | final_model_file = os.path.join(model_dir, 'final.pth') 171 | log_file = os.path.join(model_dir, 'log.txt') 172 | 173 | print('| training %s on GPU #%d with pytorch' % (args.model_name, args.gpu)) 174 | print('| from epoch %d / %s' % (args.epoch_from, args.epoch_max)) 175 | print('| model will be saved in: %s' % model_dir) 176 | 177 | main() 178 | -------------------------------------------------------------------------------- /util/MF_dataset.py: -------------------------------------------------------------------------------- 1 | # coding:utf-8 2 | import os 3 | import torch 4 | from torch.utils.data.dataset import Dataset 5 | from torch.utils.data import DataLoader 6 | import numpy as np 7 | from PIL import Image 8 | 9 | from ipdb import set_trace as st 10 | 11 | 12 | class MF_dataset(Dataset): 13 | 14 | def __init__(self, data_dir, split, have_label, input_h=480, input_w=640 ,transform=[]): 15 | super(MF_dataset, self).__init__() 16 | 17 | assert split in ['train', 'val', 'test'], 'split must be "train"|"val"|"test"' 18 | 19 | with open(os.path.join(data_dir, split+'.txt'), 'r') as f: 20 | self.names = [name.strip() for name in f.readlines()] 21 | 22 | self.data_dir = data_dir 23 | self.split = split 24 | self.input_h = input_h 25 | self.input_w = input_w 26 | self.transform = transform 27 | self.is_train = have_label 28 | self.n_data = len(self.names) 29 | 30 | 31 | def read_image(self, name, folder): 32 | file_path = os.path.join(self.data_dir, '%s/%s.png' % (folder, name)) 33 | image = np.asarray(Image.open(file_path)) # (w,h,c) 34 | image.flags.writeable = True 35 | return image 36 | 37 | def get_train_item(self, index): 38 | name = self.names[index] 39 | image = self.read_image(name, 'images') 40 | label = self.read_image(name, 'labels') 41 | 42 | for func in self.transform: 43 | image, label = func(image, label) 44 | 45 | image = np.asarray(Image.fromarray(image).resize((self.input_w, self.input_h)), dtype=np.float32).transpose((2,0,1))/255 46 | label = np.asarray(Image.fromarray(label).resize((self.input_w, self.input_h)), dtype=np.int64) 47 | 48 | return torch.tensor(image), torch.tensor(label), name 49 | 50 | def get_test_item(self, index): 51 | name = self.names[index] 52 | image = self.read_image(name, 'images') 53 | image = np.asarray(Image.fromarray(image).resize((self.input_w, self.input_h)), dtype=np.float32).transpose((2,0,1))/255 54 | 55 | return torch.tensor(image), name 56 | 57 | 58 | def __getitem__(self, index): 59 | 60 | if self.is_train is True: 61 | return self.get_train_item(index) 62 | else: 63 | return self.get_test_item (index) 64 | 65 | def __len__(self): 66 | return self.n_data 67 | 68 | if __name__ == '__main__': 69 | data_dir = '../../data/MF/' 70 | MF_dataset() 71 | -------------------------------------------------------------------------------- /util/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/haqishen/MFNet-pytorch/fff9e28ca47adb1491ea3c6c29df958da91da092/util/__init__.py -------------------------------------------------------------------------------- /util/augmentation.py: -------------------------------------------------------------------------------- 1 | # coding:utf-8 2 | import numpy as np 3 | from PIL import Image 4 | from ipdb import set_trace as st 5 | 6 | 7 | class RandomFlip(): 8 | def __init__(self, prob=0.5): 9 | super(RandomFlip, self).__init__() 10 | self.prob = prob 11 | 12 | def __call__(self, image, label): 13 | if np.random.rand() < self.prob: 14 | image = image[:,::-1] 15 | label = label[:,::-1] 16 | return image, label 17 | 18 | 19 | class RandomCrop(): 20 | def __init__(self, crop_rate=0.1, prob=1.0): 21 | super(RandomCrop, self).__init__() 22 | self.crop_rate = crop_rate 23 | self.prob = prob 24 | 25 | def __call__(self, image, label): 26 | if np.random.rand() < self.prob: 27 | w, h, c = image.shape 28 | 29 | h1 = np.random.randint(0, h*self.crop_rate) 30 | w1 = np.random.randint(0, w*self.crop_rate) 31 | h2 = np.random.randint(h-h*self.crop_rate, h+1) 32 | w2 = np.random.randint(w-w*self.crop_rate, w+1) 33 | 34 | image = image[w1:w2, h1:h2] 35 | label = label[w1:w2, h1:h2] 36 | 37 | return image, label 38 | 39 | 40 | class RandomCropOut(): 41 | def __init__(self, crop_rate=0.2, prob=1.0): 42 | super(RandomCropOut, self).__init__() 43 | self.crop_rate = crop_rate 44 | self.prob = prob 45 | 46 | def __call__(self, image, label): 47 | if np.random.rand() < self.prob: 48 | w, h, c = image.shape 49 | 50 | h1 = np.random.randint(0, h*self.crop_rate) 51 | w1 = np.random.randint(0, w*self.crop_rate) 52 | h2 = int(h1 + h*self.crop_rate) 53 | w2 = int(w1 + w*self.crop_rate) 54 | 55 | image[w1:w2, h1:h2] = 0 56 | label[w1:w2, h1:h2] = 0 57 | 58 | return image, label 59 | 60 | 61 | class RandomBrightness(): 62 | def __init__(self, bright_range=0.15, prob=0.9): 63 | super(RandomBrightness, self).__init__() 64 | self.bright_range = bright_range 65 | self.prob = prob 66 | 67 | def __call__(self, image, label): 68 | if np.random.rand() < self.prob: 69 | bright_factor = np.random.uniform(1-self.bright_range, 1+self.bright_range) 70 | image = (image * bright_factor).astype(image.dtype) 71 | 72 | return image, label 73 | 74 | 75 | class RandomNoise(): 76 | def __init__(self, noise_range=5, prob=0.9): 77 | super(RandomNoise, self).__init__() 78 | self.noise_range = noise_range 79 | self.prob = prob 80 | 81 | def __call__(self, image, label): 82 | if np.random.rand() < self.prob: 83 | w, h, c = image.shape 84 | 85 | noise = np.random.randint( 86 | -self.noise_range, 87 | self.noise_range, 88 | (w,h,c) 89 | ) 90 | 91 | image = (image + noise).clip(0,255).astype(image.dtype) 92 | 93 | return image, label 94 | 95 | 96 | 97 | -------------------------------------------------------------------------------- /util/util.py: -------------------------------------------------------------------------------- 1 | # coding:utf-8 2 | import numpy as np 3 | import chainer 4 | from PIL import Image 5 | from ipdb import set_trace as st 6 | 7 | def calculate_accuracy(logits, labels): 8 | # inputs should be torch.tensor 9 | predictions = logits.argmax(1) 10 | no_count = (labels==-1).sum() 11 | count = ((predictions==labels)*(labels!=-1)).sum() 12 | acc = count.float() / (labels.numel()-no_count).float() 13 | return acc 14 | 15 | 16 | def calculate_result(cf): 17 | n_class = cf.shape[0] 18 | conf = np.zeros((n_class,n_class)) 19 | IoU = np.zeros(n_class) 20 | conf[:,0] = cf[:,0]/cf[:,0].sum() 21 | for cid in range(1,n_class): 22 | if cf[:,cid].sum() > 0: 23 | conf[:,cid] = cf[:,cid]/cf[:,cid].sum() 24 | IoU[cid] = cf[cid,cid]/(cf[cid,1:].sum()+cf[1:,cid].sum()-cf[cid,cid]) 25 | overall_acc = np.diag(cf[1:,1:]).sum()/cf[1:,:].sum() 26 | acc = np.diag(conf) 27 | 28 | return overall_acc, acc, IoU 29 | 30 | 31 | # for visualization 32 | def get_palette(): 33 | unlabelled = [0,0,0] 34 | car = [64,0,128] 35 | person = [64,64,0] 36 | bike = [0,128,192] 37 | curve = [0,0,192] 38 | car_stop = [128,128,0] 39 | guardrail = [64,64,128] 40 | color_cone = [192,128,128] 41 | bump = [192,64,0] 42 | palette = np.array([unlabelled,car, person, bike, curve, car_stop, guardrail, color_cone, bump]) 43 | return palette 44 | 45 | 46 | def visualize(names, predictions): 47 | palette = get_palette() 48 | 49 | for (i, pred) in enumerate(predictions): 50 | pred = predictions[i].cpu().numpy() 51 | img = np.zeros((pred.shape[0], pred.shape[1], 3), dtype=np.uint8) 52 | for cid in range(1, int(predictions.max())): 53 | img[pred == cid] = palette[cid] 54 | 55 | img = Image.fromarray(np.uint8(img)) 56 | img.save(names[i].replace('.png', '_pred.png')) 57 | -------------------------------------------------------------------------------- /weights/MFNet/final.pth: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/haqishen/MFNet-pytorch/fff9e28ca47adb1491ea3c6c29df958da91da092/weights/MFNet/final.pth -------------------------------------------------------------------------------- /weights/MFNet/log.txt: -------------------------------------------------------------------------------- 1 | | epo:1/80 lr:0.0100 train_loss_avg:0.7392 train_acc_avg:0.8455 | val_loss_avg:0.3764 val_acc_avg:0.9258 2 | | epo:2/80 lr:0.0094 train_loss_avg:0.3217 train_acc_avg:0.9291 | val_loss_avg:0.3029 val_acc_avg:0.9357 3 | | epo:3/80 lr:0.0088 train_loss_avg:0.2727 train_acc_avg:0.9360 | val_loss_avg:0.2915 val_acc_avg:0.9367 4 | | epo:4/80 lr:0.0083 train_loss_avg:0.2462 train_acc_avg:0.9401 | val_loss_avg:0.3018 val_acc_avg:0.9358 5 | | epo:5/80 lr:0.0078 train_loss_avg:0.2322 train_acc_avg:0.9421 | val_loss_avg:0.3646 val_acc_avg:0.9052 6 | | epo:6/80 lr:0.0073 train_loss_avg:0.2141 train_acc_avg:0.9458 | val_loss_avg:0.2528 val_acc_avg:0.9405 7 | | epo:7/80 lr:0.0069 train_loss_avg:0.2057 train_acc_avg:0.9471 | val_loss_avg:0.2597 val_acc_avg:0.9369 8 | | epo:8/80 lr:0.0065 train_loss_avg:0.1877 train_acc_avg:0.9511 | val_loss_avg:0.2479 val_acc_avg:0.9385 9 | | epo:9/80 lr:0.0061 train_loss_avg:0.1785 train_acc_avg:0.9531 | val_loss_avg:0.2411 val_acc_avg:0.9385 10 | | epo:10/80 lr:0.0057 train_loss_avg:0.1676 train_acc_avg:0.9553 | val_loss_avg:0.2151 val_acc_avg:0.9453 11 | | epo:11/80 lr:0.0054 train_loss_avg:0.1594 train_acc_avg:0.9575 | val_loss_avg:0.2223 val_acc_avg:0.9426 12 | | epo:12/80 lr:0.0051 train_loss_avg:0.1553 train_acc_avg:0.9581 | val_loss_avg:0.2203 val_acc_avg:0.9437 13 | | epo:13/80 lr:0.0048 train_loss_avg:0.1491 train_acc_avg:0.9594 | val_loss_avg:0.2297 val_acc_avg:0.9437 14 | | epo:14/80 lr:0.0045 train_loss_avg:0.1436 train_acc_avg:0.9607 | val_loss_avg:0.2071 val_acc_avg:0.9463 15 | | epo:15/80 lr:0.0042 train_loss_avg:0.1356 train_acc_avg:0.9626 | val_loss_avg:0.2062 val_acc_avg:0.9459 16 | | epo:16/80 lr:0.0040 train_loss_avg:0.1305 train_acc_avg:0.9636 | val_loss_avg:0.2042 val_acc_avg:0.9463 17 | | epo:17/80 lr:0.0037 train_loss_avg:0.1266 train_acc_avg:0.9647 | val_loss_avg:0.1998 val_acc_avg:0.9473 18 | | epo:18/80 lr:0.0035 train_loss_avg:0.1261 train_acc_avg:0.9644 | val_loss_avg:0.1909 val_acc_avg:0.9488 19 | | epo:19/80 lr:0.0033 train_loss_avg:0.1183 train_acc_avg:0.9667 | val_loss_avg:0.1884 val_acc_avg:0.9497 20 | | epo:20/80 lr:0.0031 train_loss_avg:0.1149 train_acc_avg:0.9674 | val_loss_avg:0.1817 val_acc_avg:0.9510 21 | | epo:21/80 lr:0.0029 train_loss_avg:0.1129 train_acc_avg:0.9675 | val_loss_avg:0.2173 val_acc_avg:0.9385 22 | | epo:22/80 lr:0.0027 train_loss_avg:0.1095 train_acc_avg:0.9688 | val_loss_avg:0.2013 val_acc_avg:0.9444 23 | | epo:23/80 lr:0.0026 train_loss_avg:0.1078 train_acc_avg:0.9692 | val_loss_avg:0.1811 val_acc_avg:0.9507 24 | | epo:24/80 lr:0.0024 train_loss_avg:0.1037 train_acc_avg:0.9704 | val_loss_avg:0.1809 val_acc_avg:0.9514 25 | | epo:25/80 lr:0.0023 train_loss_avg:0.1029 train_acc_avg:0.9705 | val_loss_avg:0.1805 val_acc_avg:0.9516 26 | | epo:26/80 lr:0.0021 train_loss_avg:0.1004 train_acc_avg:0.9712 | val_loss_avg:0.1807 val_acc_avg:0.9516 27 | | epo:27/80 lr:0.0020 train_loss_avg:0.0994 train_acc_avg:0.9714 | val_loss_avg:0.1763 val_acc_avg:0.9528 28 | | epo:28/80 lr:0.0019 train_loss_avg:0.0983 train_acc_avg:0.9715 | val_loss_avg:0.1716 val_acc_avg:0.9529 29 | | epo:29/80 lr:0.0018 train_loss_avg:0.0954 train_acc_avg:0.9725 | val_loss_avg:0.1755 val_acc_avg:0.9526 30 | | epo:30/80 lr:0.0017 train_loss_avg:0.0931 train_acc_avg:0.9731 | val_loss_avg:0.1773 val_acc_avg:0.9519 31 | | epo:31/80 lr:0.0016 train_loss_avg:0.0917 train_acc_avg:0.9735 | val_loss_avg:0.1743 val_acc_avg:0.9537 32 | | epo:32/80 lr:0.0015 train_loss_avg:0.0909 train_acc_avg:0.9738 | val_loss_avg:0.1765 val_acc_avg:0.9528 33 | | epo:33/80 lr:0.0014 train_loss_avg:0.0896 train_acc_avg:0.9739 | val_loss_avg:0.1747 val_acc_avg:0.9529 34 | | epo:34/80 lr:0.0013 train_loss_avg:0.0872 train_acc_avg:0.9745 | val_loss_avg:0.1725 val_acc_avg:0.9531 35 | | epo:35/80 lr:0.0012 train_loss_avg:0.0889 train_acc_avg:0.9741 | val_loss_avg:0.1739 val_acc_avg:0.9541 36 | | epo:36/80 lr:0.0011 train_loss_avg:0.0872 train_acc_avg:0.9745 | val_loss_avg:0.1699 val_acc_avg:0.9544 37 | | epo:37/80 lr:0.0011 train_loss_avg:0.0862 train_acc_avg:0.9748 | val_loss_avg:0.1775 val_acc_avg:0.9523 38 | | epo:38/80 lr:0.0010 train_loss_avg:0.0842 train_acc_avg:0.9753 | val_loss_avg:0.1747 val_acc_avg:0.9537 39 | | epo:39/80 lr:0.0010 train_loss_avg:0.0849 train_acc_avg:0.9752 | val_loss_avg:0.1715 val_acc_avg:0.9542 40 | | epo:40/80 lr:0.0009 train_loss_avg:0.0828 train_acc_avg:0.9759 | val_loss_avg:0.1703 val_acc_avg:0.9546 41 | | epo:41/80 lr:0.0008 train_loss_avg:0.0823 train_acc_avg:0.9760 | val_loss_avg:0.1732 val_acc_avg:0.9540 42 | | epo:42/80 lr:0.0008 train_loss_avg:0.0812 train_acc_avg:0.9762 | val_loss_avg:0.1785 val_acc_avg:0.9526 43 | | epo:43/80 lr:0.0007 train_loss_avg:0.0814 train_acc_avg:0.9761 | val_loss_avg:0.1717 val_acc_avg:0.9543 44 | | epo:44/80 lr:0.0007 train_loss_avg:0.0800 train_acc_avg:0.9764 | val_loss_avg:0.1722 val_acc_avg:0.9541 45 | | epo:45/80 lr:0.0007 train_loss_avg:0.0818 train_acc_avg:0.9760 | val_loss_avg:0.1782 val_acc_avg:0.9533 46 | | epo:46/80 lr:0.0006 train_loss_avg:0.0810 train_acc_avg:0.9761 | val_loss_avg:0.1742 val_acc_avg:0.9537 47 | | epo:47/80 lr:0.0006 train_loss_avg:0.0784 train_acc_avg:0.9769 | val_loss_avg:0.1741 val_acc_avg:0.9541 48 | | epo:48/80 lr:0.0005 train_loss_avg:0.0798 train_acc_avg:0.9765 | val_loss_avg:0.1735 val_acc_avg:0.9543 49 | | epo:49/80 lr:0.0005 train_loss_avg:0.0779 train_acc_avg:0.9772 | val_loss_avg:0.1734 val_acc_avg:0.9546 50 | | epo:50/80 lr:0.0005 train_loss_avg:0.0771 train_acc_avg:0.9773 | val_loss_avg:0.1710 val_acc_avg:0.9542 51 | | epo:51/80 lr:0.0005 train_loss_avg:0.0772 train_acc_avg:0.9774 | val_loss_avg:0.1727 val_acc_avg:0.9541 52 | | epo:52/80 lr:0.0004 train_loss_avg:0.0777 train_acc_avg:0.9771 | val_loss_avg:0.1725 val_acc_avg:0.9546 53 | | epo:53/80 lr:0.0004 train_loss_avg:0.0777 train_acc_avg:0.9771 | val_loss_avg:0.1748 val_acc_avg:0.9540 54 | | epo:54/80 lr:0.0004 train_loss_avg:0.0779 train_acc_avg:0.9771 | val_loss_avg:0.1763 val_acc_avg:0.9533 55 | | epo:55/80 lr:0.0004 train_loss_avg:0.0769 train_acc_avg:0.9773 | val_loss_avg:0.1702 val_acc_avg:0.9551 56 | | epo:56/80 lr:0.0003 train_loss_avg:0.0772 train_acc_avg:0.9771 | val_loss_avg:0.1754 val_acc_avg:0.9541 57 | | epo:57/80 lr:0.0003 train_loss_avg:0.0765 train_acc_avg:0.9774 | val_loss_avg:0.1715 val_acc_avg:0.9548 58 | | epo:58/80 lr:0.0003 train_loss_avg:0.0760 train_acc_avg:0.9777 | val_loss_avg:0.1723 val_acc_avg:0.9544 59 | | epo:59/80 lr:0.0003 train_loss_avg:0.0782 train_acc_avg:0.9769 | val_loss_avg:0.1728 val_acc_avg:0.9544 60 | | epo:60/80 lr:0.0003 train_loss_avg:0.0752 train_acc_avg:0.9779 | val_loss_avg:0.1747 val_acc_avg:0.9540 61 | | epo:61/80 lr:0.0002 train_loss_avg:0.0756 train_acc_avg:0.9778 | val_loss_avg:0.1723 val_acc_avg:0.9545 62 | | epo:62/80 lr:0.0002 train_loss_avg:0.0758 train_acc_avg:0.9777 | val_loss_avg:0.1724 val_acc_avg:0.9546 63 | | epo:63/80 lr:0.0002 train_loss_avg:0.0747 train_acc_avg:0.9780 | val_loss_avg:0.1724 val_acc_avg:0.9545 64 | | epo:64/80 lr:0.0002 train_loss_avg:0.0759 train_acc_avg:0.9774 | val_loss_avg:0.1732 val_acc_avg:0.9545 65 | | epo:65/80 lr:0.0002 train_loss_avg:0.0752 train_acc_avg:0.9777 | val_loss_avg:0.1750 val_acc_avg:0.9542 66 | | epo:66/80 lr:0.0002 train_loss_avg:0.0746 train_acc_avg:0.9780 | val_loss_avg:0.1739 val_acc_avg:0.9543 67 | | epo:67/80 lr:0.0002 train_loss_avg:0.0734 train_acc_avg:0.9783 | val_loss_avg:0.1718 val_acc_avg:0.9547 68 | | epo:68/80 lr:0.0002 train_loss_avg:0.0738 train_acc_avg:0.9782 | val_loss_avg:0.1738 val_acc_avg:0.9543 69 | | epo:69/80 lr:0.0001 train_loss_avg:0.0742 train_acc_avg:0.9782 | val_loss_avg:0.1749 val_acc_avg:0.9542 70 | | epo:70/80 lr:0.0001 train_loss_avg:0.0745 train_acc_avg:0.9780 | val_loss_avg:0.1725 val_acc_avg:0.9544 71 | | epo:71/80 lr:0.0001 train_loss_avg:0.0738 train_acc_avg:0.9783 | val_loss_avg:0.1750 val_acc_avg:0.9542 72 | | epo:72/80 lr:0.0001 train_loss_avg:0.0749 train_acc_avg:0.9780 | val_loss_avg:0.1757 val_acc_avg:0.9544 73 | | epo:73/80 lr:0.0001 train_loss_avg:0.0738 train_acc_avg:0.9782 | val_loss_avg:0.1717 val_acc_avg:0.9547 74 | | epo:74/80 lr:0.0001 train_loss_avg:0.0742 train_acc_avg:0.9781 | val_loss_avg:0.1737 val_acc_avg:0.9543 75 | | epo:75/80 lr:0.0001 train_loss_avg:0.0734 train_acc_avg:0.9785 | val_loss_avg:0.1736 val_acc_avg:0.9548 76 | | epo:76/80 lr:0.0001 train_loss_avg:0.0733 train_acc_avg:0.9784 | val_loss_avg:0.1717 val_acc_avg:0.9549 77 | | epo:77/80 lr:0.0001 train_loss_avg:0.0733 train_acc_avg:0.9784 | val_loss_avg:0.1734 val_acc_avg:0.9545 78 | | epo:78/80 lr:0.0001 train_loss_avg:0.0744 train_acc_avg:0.9780 | val_loss_avg:0.1729 val_acc_avg:0.9547 79 | | epo:79/80 lr:0.0001 train_loss_avg:0.0742 train_acc_avg:0.9780 | val_loss_avg:0.1739 val_acc_avg:0.9544 80 | | epo:80/80 lr:0.0001 train_loss_avg:0.0736 train_acc_avg:0.9783 | val_loss_avg:0.1727 val_acc_avg:0.9547 81 | -------------------------------------------------------------------------------- /weights/MFNet/tmp.optim: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/haqishen/MFNet-pytorch/fff9e28ca47adb1491ea3c6c29df958da91da092/weights/MFNet/tmp.optim --------------------------------------------------------------------------------