├── libs ├── models │ ├── __init__.py │ ├── BESNet.py │ └── FCN.py └── losses │ └── loss.py ├── snapshots ├── potsdam │ └── BESNet_0.000_0 │ │ └── 2022-01-01_11-11-11 │ │ └── checkpoint │ │ └── best_metrics.pth └── vaihingen │ └── BESNet0.000_0 │ └── 2022-01-01_11-11-11 │ └── checkpoint │ └── best_metrics.pth ├── Eval.sh ├── train_potsdam_fcn.sh ├── train_vaihingen_fcn.sh ├── README.md └── Eval.py /libs/models/__init__.py: -------------------------------------------------------------------------------- 1 | from .FCN import BESNet -------------------------------------------------------------------------------- /snapshots/potsdam/BESNet_0.000_0/2022-01-01_11-11-11/checkpoint/best_metrics.pth: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /snapshots/vaihingen/BESNet0.000_0/2022-01-01_11-11-11/checkpoint/best_metrics.pth: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /Eval.sh: -------------------------------------------------------------------------------- 1 | dataset="potsdam" 2 | 3 | CUDA_VISIBLE_DEVICES='0' python -u Eval.py \ 4 | --backbone 'resnet18' \ 5 | --model "BESNet" \ 6 | --num_classes 6 \ 7 | --batch_size 1 \ 8 | --num_workers 1 \ 9 | --overlap 0.33333 \ 10 | --flip True \ 11 | --scales 0.75 1.0 1.25 \ 12 | --save_flag False \ 13 | --save_dir "./preds/${dataset}/2022.01.01/" \ 14 | --dataset $dataset \ 15 | --data_dir "/media/FlyC235/Fly/dataset/${dataset}/" \ 16 | --test_list "/media/FlyC235/Fly/dataset/${dataset}/${dataset}_test_fullsize.txt" \ 17 | --checkpoint "/media/FlyC235/Fly/BESnet/snapshots/potsdam/BESNet_0.000_0/2022-01-01_11-11-11/checkpoint/best_metrics.pth" \ 18 | -------------------------------------------------------------------------------- /train_potsdam_fcn.sh: -------------------------------------------------------------------------------- 1 | set -e 2 | Model="BESNet" 3 | dataset="potsdam" 4 | 5 | CUDA_VISIBLE_DEVICES='0' python -u train_fcn.py \ 6 | --model $Model \ 7 | --backbone 'resnet18' \ 8 | --aux_classifier True \ 9 | --auxloss_weight 0.4 \ 10 | --num_classes 6 \ 11 | --batch_size 4 \ 12 | --dataset $dataset \ 13 | --save_dir './snapshots/' \ 14 | --data_dir "/media/FlyC235/Fly/dataset/${dataset}/" \ 15 | --train_list "/media/FlyC235/Fly/dataset/${dataset}/train.txt" \ 16 | --test_list "/media/FlyC235/Fly/dataset/${dataset}/test.txt" \ 17 | --learning_rate 5e-3 \ 18 | --weight_decay 5e-4 \ 19 | --num_workers 4 \ 20 | --max_epoches 200 \ 21 | --warmup_epochs 0 \ 22 | --no-val False -------------------------------------------------------------------------------- /train_vaihingen_fcn.sh: -------------------------------------------------------------------------------- 1 | set -e 2 | Model="BESNet" 3 | dataset="vaihingen" 4 | 5 | CUDA_VISIBLE_DEVICES='0' python -u train_fcn.py \ 6 | --model $Model \ 7 | --backbone 'resnet18' \ 8 | --aux_classifier True \ 9 | --auxloss_weight 0.4 \ 10 | --num_classes 6 \ 11 | --batch_size 2 \ 12 | --dataset $dataset \ 13 | --save_dir './snapshots/' \ 14 | --data_dir "/media/FlyC235/Fly/dataset/${dataset}/" \ 15 | --train_list "/media/FlyC235/Fly/dataset/${dataset}/train.txt" \ 16 | --test_list "/media/FlyC235/Fly/dataset/${dataset}/test.txt" \ 17 | --learning_rate 5e-3 \ 18 | --weight_decay 5e-4 \ 19 | --num_workers 4 \ 20 | --max_epoches 120 \ 21 | --warmup_epochs 0 \ 22 | --no-val False -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Boundary Enhancing Semantic Context Network for Parsing High-resolution Remote Sensing Images 2 | - Updating...... 3 | 4 | The code is based on FCN_8s 5 | 6 | # Datasets 7 | - ISPRS [Vaihingen](https://www2.isprs.org/commissions/comm2/wg4/benchmark/2d-sem-label-vaihingen/) dataset 8 | - ISPRS [Potsdam](https://www2.isprs.org/commissions/comm2/wg4/benchmark/2d-sem-label-potsdam/) dataset 9 | - The original dataset can be requested for the download from [here](https://www2.isprs.org/commissions/comm2/wg4/benchmark/data-request-form/). 10 | - You should cut the training images as well as corresponding labels into patches with an overlap of 171 pixels. 11 | 12 | # Requiements 13 | - Python == 3.7.10 14 | - PyTorch == 1.8.1 15 | - CUDA ==10.1 16 | 17 | # Train 18 | For Vaihingen, run: 19 | ``` 20 | sh train_vaihingen_fcn.sh 21 | ``` 22 | For Potsdam, run: 23 | ``` 24 | sh train_potsdam_fcn.sh 25 | ``` 26 | The results will be saved in the `./snapshots/` folder. 27 | 28 | # Test 29 | For test, run: 30 | ``` 31 | sh Eval.sh 32 | ``` 33 | For different datasets, please manully change `dataset` in `Eval.sh`. -------------------------------------------------------------------------------- /libs/losses/loss.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | 5 | 6 | def dice_loss_func(input, target): 7 | smooth = 1. 8 | n = input.size(0) 9 | iflat = input.view(n, -1) 10 | tflat = target.view(n, -1) 11 | intersection = (iflat * tflat).sum(1) 12 | loss = 1 - ((2. * intersection + smooth) / 13 | (iflat.sum(1) + tflat.sum(1) + smooth)) 14 | return loss.mean() 15 | 16 | class CriterionDSN(nn.Module): 17 | def __init__(self, class_weight, aux_classifier, loss_weight=1.0, ignore_index=255, reduction='mean'): 18 | super(CriterionDSN, self).__init__() 19 | 20 | self.loss_weight = loss_weight 21 | self.ignore_index = ignore_index 22 | self.reduction = reduction 23 | self.aux_classifier = aux_classifier 24 | self.criterion1 = nn.CrossEntropyLoss(weight=class_weight, ignore_index=ignore_index, reduction=reduction) 25 | if self.aux_classifier: 26 | self.criterion2 = nn.CrossEntropyLoss(weight=None, ignore_index=ignore_index, reduction=reduction) 27 | 28 | def forward(self, preds, target): 29 | if self.aux_classifier: 30 | assert preds[0].shape[2:] == target.shape[1:], f"preds[0] shape must be equality to target." 31 | loss1 = self.criterion1(preds[0], target) 32 | assert preds[1].shape[2:] == target.shape[1:], f"preds[0] shape must be equality to target." 33 | loss2 = self.criterion2(preds[1], target) 34 | return loss1 + loss2 * self.loss_weight 35 | 36 | # without aux_classifier 37 | assert preds.shape[2:] == target.shape[1:], f"preds[0] shape must be equality to target." 38 | return self.criterion1(preds, target) 39 | 40 | class BoundaryLoss(nn.Module): 41 | def __init__(self, *args, **kwargs): 42 | super(BoundaryLoss, self).__init__() 43 | 44 | self.laplacian_kernel = torch.tensor( 45 | [-1, -1, -1, -1, 8, -1, -1, -1, -1], 46 | dtype=torch.float32).reshape(1, 1, 3, 3).requires_grad_(False).type(torch.cuda.FloatTensor) 47 | 48 | self.fuse_kernel = torch.nn.Parameter(torch.tensor([[6./10], [3./10], [1./10]], 49 | dtype=torch.float32).reshape(1, 3, 1, 1).type(torch.cuda.FloatTensor)) 50 | 51 | def forward(self, boundary_logits, gtmasks): 52 | 53 | boundary_targets = F.conv2d(gtmasks.unsqueeze(1).type(torch.cuda.FloatTensor), self.laplacian_kernel, padding=1) 54 | boundary_targets = boundary_targets.clamp(min=0) 55 | boundary_targets[boundary_targets > 0.1] = 1 56 | boundary_targets[boundary_targets <= 0.1] = 0 57 | 58 | boundary_targets_x2 = F.conv2d(gtmasks.unsqueeze(1).type(torch.cuda.FloatTensor), self.laplacian_kernel, stride=2, padding=1) 59 | boundary_targets_x2 = boundary_targets_x2.clamp(min=0) 60 | 61 | boundary_targets_x4 = F.conv2d(gtmasks.unsqueeze(1).type(torch.cuda.FloatTensor), self.laplacian_kernel, stride=4, padding=1) 62 | boundary_targets_x4 = boundary_targets_x4.clamp(min=0) 63 | 64 | boundary_targets_x8 = F.conv2d(gtmasks.unsqueeze(1).type(torch.cuda.FloatTensor), self.laplacian_kernel, stride=8, padding=1) 65 | boundary_targets_x8 = boundary_targets_x8.clamp(min=0) 66 | 67 | boundary_targets_x8_up = F.interpolate(boundary_targets_x8, boundary_targets.shape[2:], mode='nearest') 68 | boundary_targets_x4_up = F.interpolate(boundary_targets_x4, boundary_targets.shape[2:], mode='nearest') 69 | boundary_targets_x2_up = F.interpolate(boundary_targets_x2, boundary_targets.shape[2:], mode='nearest') 70 | 71 | boundary_targets_x2_up[boundary_targets_x2_up > 0.1] = 1 72 | boundary_targets_x2_up[boundary_targets_x2_up <= 0.1] = 0 73 | 74 | 75 | boundary_targets_x4_up[boundary_targets_x4_up > 0.1] = 1 76 | boundary_targets_x4_up[boundary_targets_x4_up <= 0.1] = 0 77 | 78 | 79 | boundary_targets_x8_up[boundary_targets_x8_up > 0.1] = 1 80 | boundary_targets_x8_up[boundary_targets_x8_up <= 0.1] = 0 81 | 82 | boudary_targets_pyramids = torch.stack((boundary_targets, boundary_targets_x2_up, boundary_targets_x4_up), dim=1) 83 | 84 | boudary_targets_pyramids = boudary_targets_pyramids.squeeze(2) 85 | boudary_targets_pyramid = F.conv2d(boudary_targets_pyramids, self.fuse_kernel) 86 | 87 | boudary_targets_pyramid[boudary_targets_pyramid > 0.1] = 1 88 | boudary_targets_pyramid[boudary_targets_pyramid <= 0.1] = 0 89 | 90 | if boundary_logits.shape[-1] != boundary_targets.shape[-1]: 91 | boundary_logits = F.interpolate( 92 | boundary_logits, boundary_targets.shape[2:], mode='bilinear', align_corners=True) 93 | 94 | bce_loss = F.binary_cross_entropy_with_logits(boundary_logits, boudary_targets_pyramid) 95 | dice_loss = dice_loss_func(torch.sigmoid(boundary_logits), boudary_targets_pyramid) 96 | return bce_loss, dice_loss -------------------------------------------------------------------------------- /libs/models/BESNet.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import torch 3 | import torch.nn as nn 4 | import torch.nn.functional as F 5 | from typing import ForwardRef 6 | from torch._C import _logging_set_logger 7 | from torch.nn.modules.container import Sequential 8 | 9 | class GlobalAvgPool2d(nn.Module): 10 | def __init__(self): 11 | super(GlobalAvgPool2d, self).__init__() 12 | logging.info("Global Average Pooling Initialized") 13 | 14 | def forward(self, inputs): 15 | in_size = inputs.size() 16 | return inputs.view((in_size[0], in_size[1], -1)).mean(dim=2) 17 | 18 | class ConvBnReLU(nn.Sequential): 19 | def __init__( 20 | self, in_ch, out_ch, kernel_size, stride, padding, dilation, relu=True 21 | ): 22 | super(ConvBnReLU, self).__init__() 23 | self.add_module( 24 | "conv", 25 | nn.Conv2d( 26 | in_ch, out_ch, kernel_size, stride, padding, dilation, bias=False 27 | ), 28 | ) 29 | self.add_module("bn", nn.BatchNorm2d(out_ch)) 30 | 31 | if relu: 32 | self.add_module("relu", nn.ReLU()) 33 | 34 | for m in self.modules(): 35 | if isinstance(m, nn.Conv2d): 36 | nn.init.kaiming_normal_(m.weight, mode='fan_out') 37 | if m.bias is not None: 38 | nn.init.constant_(m.bias, 0) 39 | elif isinstance(m, nn.BatchNorm2d): 40 | nn.init.constant_(m.weight, 1) 41 | nn.init.constant_(m.bias, 0) 42 | 43 | def Upsample(x, size): 44 | return nn.functional.interpolate(x, size=size, mode='bilinear', 45 | align_corners=True) 46 | 47 | def ASPPConv(in_channels, out_channels, atrous_rate): 48 | block = nn.Sequential( 49 | nn.Conv2d(in_channels, out_channels, 3, padding=atrous_rate, 50 | dilation=atrous_rate, bias=False), 51 | nn.BatchNorm2d(out_channels), 52 | nn.ReLU(True)) 53 | return block 54 | 55 | class AsppPooling(nn.Module): 56 | def __init__(self, in_channels, out_channels): 57 | super(AsppPooling, self).__init__() 58 | self.gap = nn.Sequential(nn.AdaptiveAvgPool2d(1), 59 | nn.Conv2d(in_channels, out_channels, 1, bias=False), 60 | nn.BatchNorm2d(out_channels), 61 | nn.ReLU(True)) 62 | 63 | def forward(self, x): 64 | _, _, h, w = x.size() 65 | pool = self.gap(x) 66 | return Upsample(pool, (h,w)) 67 | 68 | class ASPP_Module(nn.Module): 69 | def __init__(self, in_channels, atrous_rates): 70 | super(ASPP_Module, self).__init__() 71 | out_channels = in_channels // 8 72 | rate1, rate2, rate3 = tuple(atrous_rates) 73 | self.b0 = nn.Sequential( 74 | nn.Conv2d(in_channels, out_channels, 1, bias=False), 75 | nn.BatchNorm2d(out_channels), 76 | nn.ReLU(True)) 77 | self.b1 = ASPPConv(in_channels, out_channels, rate1) 78 | self.b2 = ASPPConv(in_channels, out_channels, rate2) 79 | self.b3 = ASPPConv(in_channels, out_channels, rate3) 80 | self.b4 = AsppPooling(in_channels, out_channels) 81 | 82 | def forward(self, x): 83 | feat0 = self.b0(x) 84 | feat1 = self.b1(x) 85 | feat2 = self.b2(x) 86 | feat3 = self.b3(x) 87 | feat4 = self.b4(x) 88 | y = torch.cat((feat0, feat1, feat2, feat3, feat4), 1) 89 | return y 90 | 91 | class BE_Module(nn.Module): 92 | def __init__(self, in_ch1, in_ch2, in_ch5, mid_ch, out_ch, n_class): 93 | super(BE_Module, self).__init__() 94 | 95 | self.convb_1 = ConvBnReLU(in_ch1, mid_ch, kernel_size=1, stride=1, padding=0, dilation=1) 96 | self.convb_2 = ConvBnReLU(in_ch2, mid_ch, kernel_size=1, stride=1, padding=0, dilation=1) 97 | self.convb_5 = ConvBnReLU(in_ch5, mid_ch, kernel_size=1, stride=1, padding=0, dilation=1) 98 | self.convbloss = nn.Conv2d(mid_ch, n_class, kernel_size=1, bias=False) 99 | boundary_ch = 3 * mid_ch 100 | self.boundaryconv = ConvBnReLU(boundary_ch, out_ch, kernel_size=3, stride=1, padding=1, dilation=1) 101 | 102 | for m in self.modules(): 103 | if isinstance(m, nn.Conv2d): 104 | nn.init.kaiming_normal_(m.weight, mode='fan_out') 105 | if m.bias is not None: 106 | nn.init.constant_(m.bias, 0) 107 | elif isinstance(m, nn.BatchNorm2d): 108 | nn.init.constant_(m.weight, 1) 109 | nn.init.constant_(m.bias, 0) 110 | 111 | def forward(self, l1, l2, l5): 112 | l1_b = self.convb_1(l1) 113 | l1_bl = self.convbloss(l1_b) 114 | 115 | l2_b = self.convb_2(l2) 116 | l2_bl = self.convbloss(l2_b) 117 | 118 | l5_b = self.convb_5(l5) 119 | l5_b = F.interpolate(l5_b, l1.size()[2:], mode='bilinear', align_corners=True) 120 | l5_bl = self.convbloss(l5_b) 121 | 122 | b = torch.cat((l1_b, l2_b, l5_b), dim=1) 123 | b = self.boundaryconv(b) 124 | 125 | c_boundaryloss = l1_bl + l2_bl + l5_bl 126 | 127 | return b, c_boundaryloss 128 | 129 | class MSF_Module(nn.Module): 130 | def __init__(self, in_ch, mid_ch1, cat_ch, mid_ch2, out_ch): 131 | super(MSF_Module,self).__init__() 132 | 133 | self.input1 = ConvBnReLU(in_ch[0], mid_ch1, kernel_size=1, stride=1, padding=0, dilation=1) 134 | self.input2 = ConvBnReLU(in_ch[1], mid_ch1, kernel_size=1, stride=1, padding=0, dilation=1) 135 | self.input3 = ConvBnReLU(in_ch[2], mid_ch1, kernel_size=1, stride=1, padding=0, dilation=1) 136 | 137 | self.fusion1 = nn.Sequential( 138 | ConvBnReLU(cat_ch, mid_ch2, kernel_size=1, stride=1, padding=0, dilation=1), 139 | nn.Conv2d(mid_ch2, mid_ch2, kernel_size=3, stride=1, padding=1, dilation=1), 140 | nn.Sigmoid(), 141 | GlobalAvgPool2d() 142 | ) 143 | 144 | self.fusion2 = nn.Sequential( 145 | ConvBnReLU(cat_ch, mid_ch2, kernel_size=1, stride=1, padding=0, dilation=1), 146 | nn.Conv2d(mid_ch2, out_ch, kernel_size=3, stride=1, padding=1, dilation=1), 147 | nn.Sigmoid(), 148 | GlobalAvgPool2d() 149 | ) 150 | 151 | for m in self.modules(): 152 | if isinstance(m, nn.Conv2d): 153 | nn.init.kaiming_normal_(m.weight, mode='fan_out') 154 | if m.bias is not None: 155 | nn.init.constant_(m.bias, 0) 156 | elif isinstance(m, nn.BatchNorm2d): 157 | nn.init.constant_(m.weight, 1) 158 | nn.init.constant_(m.bias, 0) 159 | 160 | def forward(self, l3, l4, l5): 161 | 162 | x1 = self.input1(l3) 163 | x2 = self.input2(l4) 164 | x3 = self.input3(l5) 165 | 166 | w1 = torch.cat((x2, x3), dim=1) 167 | w1 = self.fusion1(w1).unsqueeze(2).unsqueeze(3).expand_as(x3) 168 | m1 = (1-w1)*x2 + w1*x3 169 | 170 | w2 = torch.cat((m1, x1), dim=1) 171 | w2 = self.fusion2(w2).unsqueeze(2).unsqueeze(3).expand_as(x3) 172 | m2 = (1-w2)*x1 + w2*m1 173 | 174 | return m2 175 | 176 | class BES_Module(nn.Module): 177 | def __init__(self, f5_in, mul_ch): 178 | super(BES_Module, self).__init__() 179 | aspp_out = 5 * f5_in // 8 180 | self.aspp = ASPP_Module(f5_in, atrous_rates = [12, 24, 36]) 181 | self.f5_out = ConvBnReLU(aspp_out, mul_ch, kernel_size=3, stride=1, padding=1, dilation=1) 182 | 183 | for m in self.modules(): 184 | if isinstance(m, nn.Conv2d): 185 | nn.init.kaiming_normal_(m.weight, mode='fan_out') 186 | if m.bias is not None: 187 | nn.init.constant_(m.bias, 0) 188 | elif isinstance(m, nn.BatchNorm2d): 189 | nn.init.constant_(m.weight, 1) 190 | nn.init.constant_(m.bias, 0) 191 | 192 | def forward(self, f5, fb, ff): 193 | aspp = self.aspp(f5) 194 | f5 = self.f5_out(aspp) 195 | f5 = F.interpolate(f5, fb.size()[2:], mode='bilinear', align_corners=True) 196 | f5_guide = torch.mul(f5, fb) 197 | ff_guide = torch.mul(ff, fb) 198 | fe = ff + ff_guide + f5_guide 199 | 200 | return fe 201 | -------------------------------------------------------------------------------- /libs/models/FCN.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import math 3 | import torch 4 | from torch._C import _logging_set_logger 5 | import torch.nn as nn 6 | import numpy as np 7 | import torch.nn.functional as F 8 | from torch.nn.modules.container import Sequential 9 | from torch.nn.modules.conv import Conv2d 10 | import torch.utils.model_zoo as model_zoo 11 | 12 | from libs.models.BESNet import BE_Module, MSF_Module, BES_Module 13 | 14 | __all__ = ['FCN'] 15 | 16 | model_urls = { 17 | 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth', 18 | 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth', 19 | 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth', 20 | 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth', 21 | 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth', 22 | } 23 | 24 | def conv3x3(in_planes, out_planes, stride=1): 25 | "3x3 convolution with padding" 26 | return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, 27 | padding=1, bias=False) 28 | 29 | class UP_Conv(nn.Module): 30 | def __init__(self,ch_in,ch_out): 31 | super(UP_Conv,self).__init__() 32 | self.up = nn.Sequential( 33 | nn.Upsample(scale_factor=2, mode="bilinear", align_corners=True), 34 | nn.Conv2d(ch_in,ch_out,kernel_size=3,stride=1,padding=1,bias=True), 35 | nn.BatchNorm2d(ch_out), 36 | nn.ReLU(inplace=True) 37 | ) 38 | 39 | for m in self.modules(): 40 | if isinstance(m, nn.Conv2d): 41 | nn.init.kaiming_normal_(m.weight, mode='fan_out') 42 | if m.bias is not None: 43 | nn.init.constant_(m.bias, 0) 44 | elif isinstance(m, nn.BatchNorm2d): 45 | nn.init.constant_(m.weight, 1) 46 | nn.init.constant_(m.bias, 0) 47 | 48 | def forward(self,x): 49 | x = self.up(x) 50 | return x 51 | 52 | class BasicBlock(nn.Module): 53 | expansion = 1 54 | 55 | def __init__(self, inplanes, planes, stride=1, downsample=None): 56 | super(BasicBlock, self).__init__() 57 | self.conv1 = conv3x3(inplanes, planes, stride) 58 | self.bn1 = nn.BatchNorm2d(planes) 59 | self.relu = nn.ReLU(inplace=True) 60 | self.conv2 = conv3x3(planes, planes) 61 | self.bn2 = nn.BatchNorm2d(planes) 62 | self.downsample = downsample 63 | self.stride = stride 64 | 65 | def forward(self, x): 66 | residual = x 67 | 68 | out = self.conv1(x) 69 | out = self.bn1(out) 70 | out = self.relu(out) 71 | 72 | out = self.conv2(out) 73 | out = self.bn2(out) 74 | 75 | if self.downsample is not None: 76 | residual = self.downsample(x) 77 | 78 | out += residual 79 | out = self.relu(out) 80 | 81 | return out 82 | 83 | class Bottleneck(nn.Module): 84 | expansion = 4 85 | 86 | def __init__(self, inplanes, planes, stride=1, downsample=None): 87 | super(Bottleneck, self).__init__() 88 | self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) 89 | self.bn1 = nn.BatchNorm2d(planes) 90 | self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, 91 | padding=1, bias=False) 92 | self.bn2 = nn.BatchNorm2d(planes) 93 | self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False) 94 | self.bn3 = nn.BatchNorm2d(planes * 4) 95 | self.relu = nn.ReLU(inplace=True) 96 | self.downsample = downsample 97 | self.stride = stride 98 | 99 | def forward(self, x): 100 | residual = x 101 | 102 | out = self.conv1(x) 103 | out = self.bn1(out) 104 | out = self.relu(out) 105 | 106 | out = self.conv2(out) 107 | out = self.bn2(out) 108 | out = self.relu(out) 109 | 110 | out = self.conv3(out) 111 | out = self.bn3(out) 112 | 113 | if self.downsample is not None: 114 | residual = self.downsample(x) 115 | 116 | out += residual 117 | out = self.relu(out) 118 | 119 | return out 120 | 121 | class ResNet(nn.Module): 122 | 123 | def __init__(self, block, layers, num_classes=1000): 124 | self.inplanes = 64 125 | super(ResNet, self).__init__() 126 | self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, 127 | bias=False) 128 | self.bn1 = nn.BatchNorm2d(64) 129 | self.relu = nn.ReLU(inplace=True) 130 | self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) 131 | self.layer1 = self._make_layer(block, 64, layers[0]) 132 | self.layer2 = self._make_layer(block, 128, layers[1], stride=2) 133 | self.layer3 = self._make_layer(block, 256, layers[2], stride=1) 134 | self.layer4 = self._make_layer(block, 512, layers[3], stride=1) 135 | self.avgpool = nn.AvgPool2d(7, stride=1) 136 | self.fc = nn.Linear(512 * block.expansion, num_classes) 137 | 138 | for m in self.modules(): 139 | if isinstance(m, nn.Conv2d): 140 | n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels 141 | m.weight.data.normal_(0, math.sqrt(2. / n)) 142 | elif isinstance(m, nn.BatchNorm2d): 143 | m.weight.data.fill_(1) 144 | m.bias.data.zero_() 145 | 146 | def _make_layer(self, block, planes, blocks, stride=1): 147 | downsample = None 148 | if stride != 1 or self.inplanes != planes * block.expansion: 149 | downsample = nn.Sequential( 150 | nn.Conv2d(self.inplanes, planes * block.expansion, 151 | kernel_size=1, stride=stride, bias=False), 152 | nn.BatchNorm2d(planes * block.expansion), 153 | ) 154 | 155 | layers = [] 156 | layers.append(block(self.inplanes, planes, stride, downsample)) 157 | self.inplanes = planes * block.expansion 158 | for i in range(1, blocks): 159 | layers.append(block(self.inplanes, planes)) 160 | 161 | return nn.Sequential(*layers) 162 | 163 | def forward(self, x): 164 | x = self.conv1(x) 165 | x = self.bn1(x) 166 | x = self.relu(x) 167 | x = self.maxpool(x) 168 | 169 | x = self.layer1(x) 170 | x = self.layer2(x) 171 | x = self.layer3(x) 172 | x = self.layer4(x) 173 | 174 | x = self.avgpool(x) 175 | x = x.view(x.size(0), -1) 176 | x = self.fc(x) 177 | 178 | return x 179 | 180 | def resnet18(pretrained=False, **kwargs): 181 | """Constructs a ResNet-18 model. 182 | 183 | Args: 184 | pretrained (bool): If True, returns a model pre-trained on ImageNet 185 | """ 186 | model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs) 187 | if pretrained: 188 | model.load_state_dict(model_zoo.load_url(model_urls['resnet18'])) 189 | return model 190 | 191 | def resnet34(pretrained=False, **kwargs): 192 | """Constructs a ResNet-34 model. 193 | 194 | Args: 195 | pretrained (bool): If True, returns a model pre-trained on ImageNet 196 | """ 197 | model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs) 198 | if pretrained: 199 | model.load_state_dict(model_zoo.load_url(model_urls['resnet34'])) 200 | return model 201 | 202 | def resnet50(pretrained=False, **kwargs): 203 | """Constructs a ResNet-50 model. 204 | 205 | Args: 206 | pretrained (bool): If True, returns a model pre-trained on ImageNet 207 | """ 208 | model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs) 209 | if pretrained: 210 | model.load_state_dict(model_zoo.load_url(model_urls['resnet50'])) 211 | return model 212 | 213 | def resnet101(pretrained=False, **kwargs): 214 | """Constructs a ResNet-101 model. 215 | 216 | Args: 217 | pretrained (bool): If True, returns a model pre-trained on ImageNet 218 | """ 219 | model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs) 220 | if pretrained: 221 | model.load_state_dict(model_zoo.load_url(model_urls['resnet101'])) 222 | return model 223 | 224 | def resnet152(pretrained=False, **kwargs): 225 | """Constructs a ResNet-152 model. 226 | 227 | Args: 228 | pretrained (bool): If True, returns a model pre-trained on ImageNet 229 | """ 230 | model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs) 231 | if pretrained: 232 | model.load_state_dict(model_zoo.load_url(model_urls['resnet152'])) 233 | return model 234 | 235 | def Upsample(x, size): 236 | """ 237 | Wrapper Around the Upsample Call 238 | """ 239 | return nn.functional.interpolate(x, size=size, mode='bilinear', 240 | align_corners=True) 241 | 242 | class FCNHead(nn.Module): 243 | def __init__(self, in_channels, out_channels, norm_layer=nn.BatchNorm2d): 244 | super(FCNHead, self).__init__() 245 | inter_channels = in_channels // 4 246 | self.conv5 = nn.Sequential(nn.Conv2d(in_channels, inter_channels, 3, padding=1, bias=False), 247 | norm_layer(inter_channels), 248 | nn.ReLU(), 249 | nn.Dropout(0.1, False), 250 | nn.Conv2d(inter_channels, out_channels, 1)) 251 | 252 | for m in self.modules(): 253 | if isinstance(m, nn.Conv2d): 254 | nn.init.kaiming_normal_(m.weight, mode='fan_out') 255 | if m.bias is not None: 256 | nn.init.constant_(m.bias, 0) 257 | elif isinstance(m, nn.BatchNorm2d): 258 | nn.init.constant_(m.weight, 1) 259 | nn.init.constant_(m.bias, 0) 260 | 261 | def forward(self, x): 262 | return self.conv5(x) 263 | 264 | class BESNet(nn.Module): 265 | def __init__(self, nclass, backbone='resnet18', aux=True, norm_layer=nn.BatchNorm2d, pretrained=True): 266 | super(BESNet, self).__init__() 267 | 268 | self.aux=aux 269 | resnet = eval(backbone)(pretrained=pretrained) 270 | 271 | self.layer0 = nn.Sequential( 272 | resnet.conv1, 273 | resnet.bn1, 274 | resnet.relu, 275 | resnet.maxpool) 276 | 277 | self.layer1 = resnet.layer1 278 | self.layer2 = resnet.layer2 279 | self.layer3 = resnet.layer3 280 | self.layer4 = resnet.layer4 281 | 282 | for n, m in self.layer3.named_modules(): 283 | if 'conv2' in n: 284 | m.dilation, m.padding, m.stride = (2, 2), (2, 2), (1, 1) 285 | elif 'downsample.0' in n: 286 | m.stride = (1, 1) 287 | for n, m in self.layer4.named_modules(): 288 | if 'conv2' in n: 289 | m.dilation, m.padding, m.stride = (4, 4), (4, 4), (1, 1) 290 | elif 'downsample.0' in n: 291 | m.stride = (1, 1) 292 | 293 | self.BoundaryExtraction = BE_Module(64, 64, 512, 64, 128, 1) 294 | 295 | self.Fusion = MSF_Module([128, 256, 512], 128, 256, 128, 128) 296 | 297 | self.up = UP_Conv(128, 128) 298 | 299 | self.Enhance = BES_Module(512, 128) 300 | 301 | self.head = FCNHead(128, nclass, norm_layer) 302 | if self.aux: 303 | self.auxlayer = FCNHead(256, nclass, norm_layer) 304 | 305 | def forward(self, x): 306 | 307 | imsize = x.size()[2:] 308 | 309 | c0 = x = self.layer0(x) 310 | c1 = x = self.layer1(x) 311 | c2 = x = self.layer2(x) 312 | c3 = x = self.layer3(x) 313 | c4 = x = self.layer4(x) 314 | 315 | b, c_boundaryloss = self.BoundaryExtraction(c0, c1, c4) 316 | 317 | f = self.Fusion(c2, c3, c4) 318 | f = self.up(f) 319 | 320 | x = self.Enhance(c4, b, f) 321 | 322 | x = self.head(x) 323 | x = Upsample(x, imsize) 324 | 325 | outputs = [x] 326 | if self.aux: 327 | auxout = self.auxlayer(c3) 328 | auxout = Upsample(auxout, imsize) 329 | outputs.append(auxout) 330 | 331 | if self.training and self.aux: 332 | outputs.append(c_boundaryloss) 333 | return tuple(outputs) 334 | return x, c_boundaryloss 335 | 336 | if __name__ == "__main__": 337 | 338 | inp = torch.randn(2,3,512,512) 339 | model = BESNet(nclass=6, aux=False, pretrained=True) 340 | 341 | def count_parameters(model): 342 | return sum(p.numel() for p in model.parameters() if p.requires_grad) 343 | print(f"model parameters: {count_parameters(model)/1e6}M") 344 | 345 | model.eval() 346 | with torch.no_grad(): 347 | out = model(inp) 348 | 349 | if isinstance(out, (tuple, list)): 350 | for it in out: 351 | print(f"multi output: {it.shape}") 352 | else: 353 | print(out.shape) 354 | -------------------------------------------------------------------------------- /Eval.py: -------------------------------------------------------------------------------- 1 | import os 2 | import time 3 | import math 4 | import torch 5 | import torch.nn as nn 6 | from torch.utils import data 7 | import torch.nn.functional as F 8 | from torchvision import transforms 9 | 10 | from libs.models import * 11 | 12 | from libs.utils.metrics import Evaluator 13 | from libs.datasets import TestFullSizeLoader 14 | from mmcv.cnn import get_model_complexity_info 15 | 16 | import argparse 17 | import numpy as np 18 | from tqdm import tqdm 19 | from PIL import Image 20 | import matplotlib.pyplot as plt 21 | 22 | import warnings 23 | warnings.filterwarnings("ignore") 24 | 25 | categories = [ 26 | 'Impervious surface', 27 | 'Building', 28 | 'Low vegetation', 29 | 'Tree', 30 | 'Car', 31 | 'Clutter/Background' 32 | ] 33 | 34 | def get_colormap(): 35 | return np.array( 36 | [ 37 | [255, 255, 255], 38 | [0, 0, 255], 39 | [0, 255, 255], 40 | [0, 255, 0 ], 41 | [255, 255, 0 ], 42 | [255, 0, 0 ], 43 | [0, 0, 0 ] 44 | ] 45 | ) 46 | 47 | def encode_segmap(mask,): 48 | """Encode segmentation label images as pascal classes 49 | Args: 50 | mask (np.ndarray): raw segmentation label image of dimension 51 | (M, N, 3), in which the Pascal classes are encoded as colours. 52 | Returns: 53 | (np.ndarray): class map with dimensions (M,N), where the value at 54 | a given location is the integer denoting the class index. 55 | """ 56 | mask = mask.astype(int) 57 | label_mask = np.zeros((mask.shape[0], mask.shape[1]), dtype=np.int16) 58 | for ii, label in enumerate(get_colormap()): 59 | if ii == 6: 60 | label_mask[np.where(np.all(mask == label, axis=-1))[:2]] = 255 61 | else: 62 | label_mask[np.where(np.all(mask == label, axis=-1))[:2]] = ii 63 | # label_mask[np.where(np.all(mask == label, axis=-1))[:2]] = ii 64 | label_mask = label_mask.astype(int) 65 | return label_mask 66 | 67 | def decode_segmap(label_mask, n_classes = 6): 68 | """Decode segmentation class labels into a color image 69 | Args: 70 | label_mask (np.ndarray): an (M,N) array of integer values denoting 71 | the class label at each spatial location. 72 | plot (bool, optional): whether to show the resulting color image 73 | in a figure. 74 | Returns: 75 | (np.ndarray, optional): the resulting decoded color image. 76 | """ 77 | label_colours = get_colormap() 78 | 79 | r = label_mask.copy() 80 | g = label_mask.copy() 81 | b = label_mask.copy() 82 | for ll in range(0, n_classes+1): 83 | if ll != n_classes: 84 | r[label_mask == ll] = label_colours[ll, 0] 85 | g[label_mask == ll] = label_colours[ll, 1] 86 | b[label_mask == ll] = label_colours[ll, 2] 87 | else: 88 | r[label_mask == 255] = label_colours[ll, 0] 89 | g[label_mask == 255] = label_colours[ll, 1] 90 | b[label_mask == 255] = label_colours[ll, 2] 91 | 92 | rgb = np.zeros((label_mask.shape[0], label_mask.shape[1], 3), dtype=np.uint8) 93 | rgb[:, :, 0] = r 94 | rgb[:, :, 1] = g 95 | rgb[:, :, 2] = b 96 | return rgb 97 | 98 | def tta_inference(inp, model, num_classes=6, scales=[1.0], flip=True): 99 | b, _, h, w = inp.size() 100 | preds = inp.new().resize_(b, num_classes, h, w).zero_().to(inp.device) 101 | for scale in scales: 102 | size = (int(scale*h), int(scale*w)) 103 | resized_img = F.interpolate(inp, size=size, mode='bilinear', align_corners=True,) 104 | pred = model_inference(model, resized_img.to(inp.device), flip) 105 | pred = F.interpolate(pred, size=(h, w), mode='bilinear', align_corners=True,) 106 | preds += pred 107 | 108 | return preds/(len(scales)) 109 | 110 | def model_inference(model, image, flip=True): 111 | output = model(image)[0] 112 | if flip: 113 | fimg = image.flip(2) 114 | output += model(fimg)[0].flip(2) 115 | fimg = image.flip(3) 116 | output += model(fimg)[0].flip(3) 117 | return output/3 118 | 119 | 120 | return output 121 | 122 | def slide(model, scale_image, num_classes=6, crop_size=512, overlap=1/2, scales=[1.0], flip=True): 123 | 124 | N, C, H_, W_ = scale_image.shape 125 | print(f"Height: {H_} Width: {W_}") 126 | 127 | full_probs = torch.zeros((N, num_classes, H_, W_), device=scale_image.device) # 128 | count_predictions = torch.zeros((N, num_classes, H_, W_), device=scale_image.device) # 129 | 130 | h_overlap_length = int((1-overlap)*crop_size) 131 | w_overlap_length = int((1-overlap)*crop_size) 132 | 133 | h = 0 134 | slide_finish = False 135 | while not slide_finish: 136 | 137 | if h + crop_size <= H_: 138 | print(f"h: {h}") 139 | # set row flag 140 | slide_row = True 141 | # initial row start 142 | w = 0 143 | while slide_row: 144 | if w + crop_size <= W_: 145 | print(f" h={h} w={w} -> h'={h+crop_size} w'={w+crop_size}") 146 | patch_image = scale_image[:, :, h:h+crop_size, w:w+crop_size] 147 | # 148 | patch_pred_image = tta_inference(patch_image, model, num_classes=num_classes, scales=scales, flip=flip) 149 | count_predictions[:,:,h:h+crop_size, w:w+crop_size] += 1 150 | full_probs[:,:,h:h+crop_size, w:w+crop_size] += patch_pred_image 151 | 152 | else: 153 | print(f" h={h} w={W_-crop_size} -> h'={h+crop_size} w'={W_}") 154 | patch_image = scale_image[:, :, h:h+crop_size, W_-crop_size:W_] 155 | # 156 | patch_pred_image = tta_inference(patch_image, model, num_classes=num_classes, scales=scales, flip=flip) 157 | count_predictions[:,:,h:h+crop_size, W_-crop_size:W_] += 1 158 | full_probs[:,:,h:h+crop_size, W_-crop_size:W_] += patch_pred_image 159 | slide_row = False 160 | 161 | w += w_overlap_length 162 | 163 | else: 164 | print(f"h: {h}") 165 | # set last row flag 166 | slide_last_row = True 167 | # initial row start 168 | w = 0 169 | while slide_last_row: 170 | if w + crop_size <= W_: 171 | print(f"h={H_-crop_size} w={w} -> h'={H_} w'={w+crop_size}") 172 | patch_image = scale_image[:,:,H_-crop_size:H_, w:w+crop_size] 173 | # 174 | patch_pred_image = tta_inference(patch_image, model, num_classes=num_classes, scales=scales, flip=flip) 175 | count_predictions[:,:,H_-crop_size:H_, w:w+crop_size] += 1 176 | full_probs[:,:,H_-crop_size:H_, w:w+crop_size] += patch_pred_image 177 | 178 | else: 179 | print(f"h={H_-crop_size} w={W_-crop_size} -> h'={H_} w'={W_}") 180 | patch_image = scale_image[:,:,H_-crop_size:H_, W_-crop_size:W_] 181 | # 182 | patch_pred_image = tta_inference(patch_image, model, num_classes=num_classes, scales=scales, flip=flip) 183 | count_predictions[:,:,H_-crop_size:H_, W_-crop_size:W_] += 1 184 | full_probs[:,:,H_-crop_size:H_, W_-crop_size:W_] += patch_pred_image 185 | 186 | slide_last_row = False 187 | slide_finish = True 188 | 189 | w += w_overlap_length 190 | 191 | h += h_overlap_length 192 | 193 | full_probs /= count_predictions 194 | 195 | return full_probs 196 | 197 | def main(args): 198 | 199 | # loader 200 | data_transforms = { 201 | "potsdam": transforms.Compose([ 202 | transforms.ToTensor(), 203 | transforms.Normalize([0.4752, 0.3216, 0.3188], [0.2108, 0.1484, 0.1431]) 204 | ]), 205 | "vaihingen": transforms.Compose([ 206 | transforms.ToTensor(), 207 | transforms.Normalize([0.4752, 0.3216, 0.3188], [0.2108, 0.1484, 0.1431]) 208 | ]), 209 | } 210 | 211 | test_set = TestFullSizeLoader.TestFullSize(root=args.data_dir, data_list=args.test_list,\ 212 | transform=data_transforms[args.dataset],) 213 | 214 | test_loader = data.DataLoader( 215 | test_set, 216 | batch_size=args.batch_size, 217 | shuffle=False, 218 | num_workers=args.num_workers, 219 | pin_memory=True) 220 | 221 | # load model 222 | checkpoint = torch.load(args.checkpoint, map_location='cpu') 223 | 224 | ######################################################################################## 225 | model = eval(args.model)(nclass=args.num_classes, backbone=args.backbone, aux=True) 226 | ######################################################################################## 227 | 228 | flops, params = get_model_complexity_info( 229 | model, 230 | input_shape=(3,512,512), 231 | print_per_layer_stat=False, 232 | as_strings=True) 233 | 234 | print(f"Input shape:{(3,512,512)}\nflops:{flops}\nparams:{params}") 235 | 236 | model.load_state_dict(checkpoint) 237 | model = model.cuda() 238 | 239 | evaluator = Evaluator(args.num_classes) 240 | evaluator.reset() 241 | 242 | model.eval() 243 | with torch.no_grad(): 244 | tqdm_test_loader = tqdm(test_loader, total=len(test_loader)) 245 | for num_iter, (images, gts, name) in enumerate(tqdm_test_loader): 246 | 247 | # print(f"num_iter: {num_iter}") 248 | 249 | images = images.cuda() 250 | gts = gts.cuda() 251 | assert images.shape[2:] == gts.shape[1:], \ 252 | f"images shape: {images.shape} gts shape: {gts.shape}" 253 | 254 | preds = slide( 255 | model, 256 | images, 257 | num_classes=args.num_classes, 258 | crop_size=512, 259 | overlap=args.overlap, 260 | scales=args.scales, 261 | flip=args.flip) 262 | 263 | preds = preds.data.cpu().numpy() 264 | gts = gts.cpu().numpy() 265 | preds = np.argmax(preds, axis=1) 266 | 267 | if args.save_flag: 268 | if not os.path.exists(args.save_dir): 269 | os.makedirs(args.save_dir) 270 | pred_images = decode_segmap(preds.squeeze()) 271 | image_save_name = name[0] 272 | img_save_path = os.path.join(args.save_dir, image_save_name+'.png') 273 | arrimage = Image.fromarray(pred_images) 274 | arrimage.save(img_save_path) 275 | 276 | evaluator.add_batch(gts, preds) 277 | 278 | # Fast test during the training 279 | Acc = evaluator.Pixel_Accuracy() 280 | Acc_class = evaluator.Pixel_Accuracy_Class() 281 | mIoU = evaluator.Mean_Intersection_over_Union() 282 | F1, mF1 = evaluator.Calculate_F1_Score() 283 | 284 | print(f"Test model: {args.model}") 285 | print(f"Test checkpoint: {args.checkpoint}") 286 | print(f"Test overlap: {args.overlap}") 287 | print(f"Test scales: {args.scales}") 288 | print(f"Test flip: {args.flip}") 289 | 290 | print(f"\n\nEvaluation Metrics:\nAcc: {Acc:.5f} \nAcc_class: {Acc_class:.5f}" 291 | f"\nmIoU: {mIoU:.5f} \nmF1: {mF1:.5f} \nCategory F1: {dict(zip(categories, F1))}\n") 292 | 293 | 294 | if __name__ == "__main__": 295 | 296 | def str2bool(v): 297 | if v.lower() in ('yes', 'true', 't', 'y', '1'): 298 | return True 299 | elif v.lower() in ('no', 'false', 'f', 'n', '0'): 300 | return False 301 | else: 302 | raise argparse.ArgumentTypeError('Boolean value expected.') 303 | 304 | def get_arguments(): 305 | """ 306 | Parse all the arguments 307 | Returns: args 308 | A list of parsed arguments. 309 | """ 310 | parser = argparse.ArgumentParser(description="TGARSLetter Competition") 311 | parser.add_argument("--batch_size", type=int, default=1, 312 | help="Number of images sent to the network in one step.") 313 | parser.add_argument("--num_workers", type=int, default=1) 314 | parser.add_argument("--dataset", type=str, choices=['potsdam', 'vaihingen'], 315 | help="choose dataset. ['potsdam', 'vaihingen'].") 316 | parser.add_argument("--data_dir", type=str, default="./data", 317 | help="Path to the directory containing the Cityscapes dataset.") 318 | parser.add_argument("--save_dir", type=str, default="./data", 319 | help="Path to the directory containing the Cityscapes dataset.") 320 | parser.add_argument("--test_list", nargs='+', required=True, 321 | help="Path to the test set listing the images in the dataset.") 322 | parser.add_argument("--model", type=str, default='DeepLab_ResNet50', help="model architecture") 323 | parser.add_argument("--ignore_label", type=int, default=255, 324 | help="The index of the label to ignore during the training.") 325 | parser.add_argument("--num_classes", type=int, default=6, 326 | help="Number of classes to predict (including background).") 327 | parser.add_argument("--backbone", type=str, default='resnet50', 328 | help="Number of classes to predict (including background).") 329 | parser.add_argument("--checkpoint", type=str, default=None, 330 | help="Where restore models parameters from.") 331 | parser.add_argument("--overlap", type=float, default=1/3, 332 | help="Where restore models parameters from.") 333 | parser.add_argument("--scales", nargs='+', type=float, 334 | help="Where restore models parameters from.") 335 | parser.add_argument("--flip", type=str2bool, default=False, 336 | help="Where restore models parameters from.") 337 | parser.add_argument("--save_flag", type=str2bool, default=False, 338 | help="Where restore models parameters from.") 339 | 340 | args = parser.parse_args() 341 | return args 342 | 343 | args = get_arguments() 344 | 345 | start = time.time() 346 | main(args) 347 | runtime = time.time() - start 348 | print(f"Spend Time: {math.floor(runtime//3600):2d}h:" 349 | f"{math.floor(runtime%3600//60):2d}m:{math.floor(runtime%60):2d}s") --------------------------------------------------------------------------------