├── .gitignore ├── dynamic_unet ├── __init__.py ├── base.py ├── module_encode.py └── unet_example.py ├── input └── link.txt ├── notebook ├── CamVid_dataset_with_fastai.ipynb ├── different_network.ipynb └── pytorch_lightning_resnet.ipynb ├── readme.md ├── readme_en.md ├── requirements.txt └── setup.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Output 2 | build 3 | dist 4 | docs/_book 5 | src/version.js 6 | junit.xml 7 | coverage/ 8 | .docz/ 9 | 10 | # YALC (for Erik) 11 | .yalc 12 | yalc.lock 13 | 14 | # Logging, System files, misc. 15 | .idea/ 16 | .vscode 17 | .npm 18 | npm-debug.log 19 | package-lock.json 20 | yarn-error.log 21 | .DS_Store 22 | .env 23 | 24 | # Common Example Data Directories 25 | sampledata/ 26 | bin/deps/ 27 | docker/dcm4che/dcm4che-arc 28 | 29 | # Cypress test results 30 | videos/ 31 | 32 | # Locize settings 33 | .locize 34 | input 35 | output 36 | *.gz 37 | *.out 38 | *.log 39 | 40 | .ipynb_checkpoints 41 | notebook/.ipynb_checkpoints 42 | 43 | __*__ 44 | deploy 45 | *.pth 46 | 47 | -------------------------------------------------------------------------------- /dynamic_unet/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Flyfoxs/dynamic_unet/4d5e183599072ef574fe3a899951a400a3c4eda7/dynamic_unet/__init__.py -------------------------------------------------------------------------------- /dynamic_unet/base.py: -------------------------------------------------------------------------------- 1 | import os 2 | # os.environ["CUDA_VISIBLE_DEVICES"]="0" 3 | from file_cache import * 4 | from fastai.vision import * 5 | from fastai.callbacks.hooks import * 6 | from fvcore.common.registry import Registry 7 | from torchvision import models 8 | from enum import Enum 9 | from fastai.vision.models.unet import UnetBlock 10 | 11 | 12 | def dummy_eval(m: nn.Module, size: tuple = (64, 64)): 13 | "Pass a `dummy_batch` in evaluation mode in `m` with `size`." 14 | dummy = torch.rand((1, 3, *size)).requires_grad_(False) 15 | return m.eval()(dummy) 16 | 17 | 18 | def in_channels(m: nn.Module) -> List[int]: 19 | "Return the shape of the first weight layer in `m`." 20 | for l in m.modules(): 21 | if hasattr(l, 'weight'): return l.weight.shape[1] 22 | raise Exception(f'No weight layer:{type(m)}') 23 | 24 | 25 | def flatten_moduleList(module: nn.Module) -> List[nn.Module]: 26 | "If the ModuleList can be found in children, flatten it. Since ModuleList can not support hook " 27 | res_list = [] 28 | for item in module.children(): 29 | if isinstance(item, nn.ModuleList): 30 | res_list.extend(flatten_moduleList(item)) 31 | else: 32 | res_list.append(item) 33 | return res_list 34 | 35 | def get_unet_config(model, img_size=(512, 512)): 36 | "Cut the network to several blocks, the width and high of the image are reduced by half. And the image W and H >= 7" 37 | x = torch.rand(1, in_channels(model), *img_size) 38 | hooks = [] 39 | count = 0 40 | layer_meta = [] 41 | layers = [] 42 | 43 | 44 | def hook(module, input, output): 45 | "To get the meta of the layer infomation" 46 | nonlocal count 47 | if len(output.shape) == 4: 48 | b, c, w, h = output.shape 49 | layer_meta.append((count, type(module).__name__, c, w, h, output.shape)) 50 | layers.append(module) 51 | count += 1 52 | 53 | for module in flatten_moduleList(model): 54 | hooks.append(module.register_forward_hook(hook)) 55 | 56 | # make a forward pass to trigger the hooks 57 | model(x) 58 | for h in hooks: 59 | h.remove() 60 | 61 | layer_meta = pd.DataFrame(layer_meta, columns=['sn', 'layer', 'c', 'w', 'h', 'size']) 62 | img_size = [x.shape[-1] // (2 ** i) for i in range(8)] 63 | img_size = [size for size in img_size if size >= 7] 64 | layer_meta:pd.DataFrame = layer_meta.loc[(layer_meta.h.isin(img_size))].drop_duplicates(['h'], keep='last') 65 | layer_meta = layer_meta.head(5) 66 | print(layer_meta) 67 | #assert len(layer_meta) == 5, f'Only cut {len(layer_meta)} layers from the pretrained model ' 68 | 69 | layer_size = list(layer_meta['size']) 70 | layers = [layers[i] for i in layer_meta.sn] 71 | return layer_size, layers 72 | 73 | 74 | class DynamicUnet(SequentialEx): 75 | "Create a U-Net from a given architecture." 76 | 77 | def __init__(self, encoder: nn.Module, n_classes: int, img_size: Tuple[int, int] = (256, 256), 78 | blur: bool = False, 79 | blur_final=True, self_attention: bool = False, 80 | y_range: Optional[Tuple[float, float]] = None, 81 | last_cross: bool = True, bottle: bool = False, **kwargs): 82 | 83 | 84 | imsize = tuple(img_size) 85 | sfs_szs, select_layer = get_unet_config(encoder, img_size) 86 | ni = sfs_szs[-1][1] 87 | sfs_szs = list(reversed(sfs_szs[:-1])) 88 | select_layer = list(reversed(select_layer[:-1])) 89 | self.sfs = hook_outputs(select_layer, detach=False) 90 | x = dummy_eval(encoder, imsize).detach() 91 | 92 | middle_conv = nn.Sequential(conv_layer(ni, ni * 2, **kwargs), 93 | conv_layer(ni * 2, ni, **kwargs)).eval() 94 | x = middle_conv(x) 95 | layers = [encoder, batchnorm_2d(ni), nn.ReLU(), middle_conv] 96 | 97 | for i, x_size in enumerate(sfs_szs): 98 | not_final = i != len(sfs_szs) - 1 99 | up_in_c, x_in_c = int(x.shape[1]), int(x_size[1]) 100 | do_blur = blur and (not_final or blur_final) 101 | sa = self_attention and (i == len(sfs_szs) - 3) 102 | unet_block = UnetBlock(up_in_c, x_in_c, self.sfs[i], final_div=not_final, blur=do_blur, self_attention=sa, 103 | **kwargs).eval() 104 | layers.append(unet_block) 105 | x = unet_block(x) 106 | 107 | ni = x.shape[1] 108 | if imsize != sfs_szs[0][-2:]: layers.append(PixelShuffle_ICNR(ni, **kwargs)) 109 | x = PixelShuffle_ICNR(ni)(x) 110 | if imsize != x.shape[-2:]: layers.append(Lambda(lambda x: F.interpolate(x, imsize, mode='nearest'))) 111 | if last_cross: 112 | layers.append(MergeLayer(dense=True)) 113 | ni += in_channels(encoder) 114 | layers.append(res_block(ni, bottle=bottle, **kwargs)) 115 | layers += [conv_layer(ni, n_classes, ks=1, use_activ=False, **kwargs)] 116 | if y_range is not None: layers.append(SigmoidRange(*y_range)) 117 | super().__init__(*layers) 118 | 119 | 120 | def efficient_unet(name='5'): 121 | from efficientnet_pytorch import EfficientNet 122 | class EfficientNet_(EfficientNet): 123 | def __init__(self, *args, **kwargs): 124 | super().__init__(*args, **kwargs) 125 | 126 | def forward(self, inputs): 127 | x = self.extract_features(inputs) 128 | return x 129 | return EfficientNet_.from_pretrained(f'efficientnet-b{name}', in_channels=3) 130 | 131 | 132 | if __name__ == '__main__': 133 | encoder = efficient_unet() 134 | unet = to_device( 135 | DynamicUnet(encoder, n_classes=5, img_size=(224, 224), blur=False, blur_final=False, 136 | self_attention=False, y_range=None, norm_type=NormType, 137 | last_cross=True, 138 | bottle=False), 'cuda') 139 | -------------------------------------------------------------------------------- /dynamic_unet/module_encode.py: -------------------------------------------------------------------------------- 1 | # os.environ["CUDA_VISIBLE_DEVICES"]="0" 2 | from fastai.vision import * 3 | from fastai.vision.models import WideResNet 4 | from fvcore.common.registry import Registry 5 | 6 | UNET_ENCODE = Registry("UNET_ENCODE") 7 | 8 | @UNET_ENCODE.register() 9 | def resnet18(): 10 | return nn.Sequential(*list(models.resnet18(pretrained=True).children())[:-2]) 11 | 12 | @UNET_ENCODE.register() 13 | def densenet121(): 14 | return nn.Sequential(*list(models.densenet121(pretrained=True).children())[0]) 15 | 16 | @UNET_ENCODE.register() 17 | def densenet169(): 18 | return nn.Sequential(*list(models.densenet169(pretrained=True).children())[0]) 19 | 20 | @UNET_ENCODE.register() 21 | def densenet201(): 22 | return nn.Sequential(*list(models.densenet201(pretrained=True).children())[0]) 23 | 24 | @UNET_ENCODE.register() 25 | def efficientnet(name='5'): 26 | from efficientnet_pytorch import EfficientNet 27 | class EfficientNet_(EfficientNet): 28 | def __init__(self, *args, **kwargs): 29 | super().__init__(*args, **kwargs) 30 | 31 | def forward(self, inputs): 32 | x = self.extract_features(inputs) 33 | return x 34 | print('in_channels=', in_channels) 35 | return EfficientNet_.from_pretrained(f'efficientnet-b{name}') 36 | 37 | @UNET_ENCODE.register() 38 | def wrn_22(): 39 | def _wrn_22(): 40 | "Wide ResNet with 22 layers." 41 | return WideResNet(num_groups=3, N=3, num_classes=10, k=6, drop_p=0.2) 42 | 43 | return nn.Sequential(*list(_wrn_22().children())[0]) 44 | 45 | 46 | for i in range(1,8): 47 | UNET_ENCODE._do_register(f'efficientnet-b{i}', partial(efficientnet, name=i ) ) 48 | 49 | 50 | if __name__ == '__main__': 51 | 52 | encode = UNET_ENCODE.get('wrn_22') 53 | print(encode()) 54 | 55 | encode = UNET_ENCODE.get('densenet121') 56 | print(encode()) 57 | 58 | encode = UNET_ENCODE.get('efficientnet') 59 | print(encode(4)) 60 | 61 | encode = UNET_ENCODE.get('efficientnet-b2') 62 | print(encode()) 63 | -------------------------------------------------------------------------------- /dynamic_unet/unet_example.py: -------------------------------------------------------------------------------- 1 | from .base import * 2 | import torch 3 | from torch import nn 4 | from fastai.vision import models 5 | from efficientnet_pytorch import EfficientNet 6 | 7 | # Take EfficientNet as encode 8 | for i in range(3): 9 | encode = efficient_unet(0) 10 | unet = DynamicUnet(encoder, n_classes=5, img_size=(224, 224), blur=False, blur_final=False, 11 | self_attention=False, y_range=None, norm_type=NormType, 12 | last_cross=True, 13 | bottle=False) 14 | 15 | print(unet(torch.rand(1,3,224,224)).shape) -------------------------------------------------------------------------------- /input/link.txt: -------------------------------------------------------------------------------- 1 | https://s3.amazonaws.com/fast-ai-imagelocal/camvid 2 | 3 | ln -s /home/felix/.fastai/data/camvid camvid -------------------------------------------------------------------------------- /notebook/different_network.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Take Different back bone as encoder of Unet\n", 8 | "- VGG\n", 9 | "- Resnet\n", 10 | "- Densenet\n", 11 | "- Efficientnet" 12 | ] 13 | }, 14 | { 15 | "cell_type": "code", 16 | "execution_count": 1, 17 | "metadata": {}, 18 | "outputs": [ 19 | { 20 | "name": "stderr", 21 | "output_type": "stream", 22 | "text": [ 23 | "2020-06-12 10:35:49,436 util_log.py[153] INFO Start the program at:amax7, 127.0.1.1, with:Load module\n" 24 | ] 25 | }, 26 | { 27 | "name": "stdout", 28 | "output_type": "stream", 29 | "text": [ 30 | "File_cache: Adjust notebook work fold to:/share/felix/pj/dynamic_unet/\n" 31 | ] 32 | } 33 | ], 34 | "source": [ 35 | "from file_cache import *\n", 36 | "from dynamic_unet.base import *\n", 37 | "import torch\n", 38 | "from torch import nn\n", 39 | "from fastai.vision import models\n", 40 | "from efficientnet_pytorch import EfficientNet\n" 41 | ] 42 | }, 43 | { 44 | "cell_type": "markdown", 45 | "metadata": {}, 46 | "source": [ 47 | "# Take VGG as encoder" 48 | ] 49 | }, 50 | { 51 | "cell_type": "code", 52 | "execution_count": 2, 53 | "metadata": {}, 54 | "outputs": [ 55 | { 56 | "name": "stdout", 57 | "output_type": "stream", 58 | "text": [ 59 | " sn layer c w h size\n", 60 | "2 2 ReLU 64 224 224 (1, 64, 224, 224)\n", 61 | "6 6 ReLU 128 112 112 (1, 128, 112, 112)\n", 62 | "13 13 ReLU 256 56 56 (1, 256, 56, 56)\n", 63 | "20 20 ReLU 512 28 28 (1, 512, 28, 28)\n", 64 | "27 27 ReLU 512 14 14 (1, 512, 14, 14)\n", 65 | "28 28 MaxPool2d 512 7 7 (1, 512, 7, 7)\n", 66 | "torch.Size([1, 5, 224, 224])\n" 67 | ] 68 | }, 69 | { 70 | "data": { 71 | "text/plain": [ 72 | "DynamicUnet(\n", 73 | " (layers): ModuleList(\n", 74 | " (0): Sequential(\n", 75 | " (0): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n", 76 | " (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", 77 | " (2): ReLU(inplace=True)\n", 78 | " (3): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n", 79 | " (4): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n", 80 | " (5): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", 81 | " (6): ReLU(inplace=True)\n", 82 | " (7): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n", 83 | " (8): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n", 84 | " (9): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", 85 | " (10): ReLU(inplace=True)\n", 86 | " (11): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n", 87 | " (12): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", 88 | " (13): ReLU(inplace=True)\n", 89 | " (14): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n", 90 | " (15): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n", 91 | " (16): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", 92 | " (17): ReLU(inplace=True)\n", 93 | " (18): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n", 94 | " (19): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", 95 | " (20): ReLU(inplace=True)\n", 96 | " (21): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n", 97 | " (22): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n", 98 | " (23): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", 99 | " (24): ReLU(inplace=True)\n", 100 | " (25): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n", 101 | " (26): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", 102 | " (27): ReLU(inplace=True)\n", 103 | " (28): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n", 104 | " )\n", 105 | " (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", 106 | " (2): ReLU()\n", 107 | " (3): Sequential(\n", 108 | " (0): Sequential(\n", 109 | " (0): Conv2d(512, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n", 110 | " (1): ReLU(inplace=True)\n", 111 | " )\n", 112 | " (1): Sequential(\n", 113 | " (0): Conv2d(1024, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n", 114 | " (1): ReLU(inplace=True)\n", 115 | " )\n", 116 | " )\n", 117 | " (4): UnetBlock(\n", 118 | " (shuf): PixelShuffle_ICNR(\n", 119 | " (conv): Sequential(\n", 120 | " (0): Conv2d(512, 1024, kernel_size=(1, 1), stride=(1, 1))\n", 121 | " )\n", 122 | " (shuf): PixelShuffle(upscale_factor=2)\n", 123 | " (pad): ReplicationPad2d((1, 0, 1, 0))\n", 124 | " (blur): AvgPool2d(kernel_size=2, stride=1, padding=0)\n", 125 | " (relu): ReLU(inplace=True)\n", 126 | " )\n", 127 | " (bn): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", 128 | " (conv1): Sequential(\n", 129 | " (0): Conv2d(768, 768, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n", 130 | " (1): ReLU(inplace=True)\n", 131 | " )\n", 132 | " (conv2): Sequential(\n", 133 | " (0): Conv2d(768, 768, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n", 134 | " (1): ReLU(inplace=True)\n", 135 | " )\n", 136 | " (relu): ReLU()\n", 137 | " )\n", 138 | " (5): UnetBlock(\n", 139 | " (shuf): PixelShuffle_ICNR(\n", 140 | " (conv): Sequential(\n", 141 | " (0): Conv2d(768, 1536, kernel_size=(1, 1), stride=(1, 1))\n", 142 | " )\n", 143 | " (shuf): PixelShuffle(upscale_factor=2)\n", 144 | " (pad): ReplicationPad2d((1, 0, 1, 0))\n", 145 | " (blur): AvgPool2d(kernel_size=2, stride=1, padding=0)\n", 146 | " (relu): ReLU(inplace=True)\n", 147 | " )\n", 148 | " (bn): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", 149 | " (conv1): Sequential(\n", 150 | " (0): Conv2d(896, 896, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n", 151 | " (1): ReLU(inplace=True)\n", 152 | " )\n", 153 | " (conv2): Sequential(\n", 154 | " (0): Conv2d(896, 896, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n", 155 | " (1): ReLU(inplace=True)\n", 156 | " )\n", 157 | " (relu): ReLU()\n", 158 | " )\n", 159 | " (6): UnetBlock(\n", 160 | " (shuf): PixelShuffle_ICNR(\n", 161 | " (conv): Sequential(\n", 162 | " (0): Conv2d(896, 1792, kernel_size=(1, 1), stride=(1, 1))\n", 163 | " )\n", 164 | " (shuf): PixelShuffle(upscale_factor=2)\n", 165 | " (pad): ReplicationPad2d((1, 0, 1, 0))\n", 166 | " (blur): AvgPool2d(kernel_size=2, stride=1, padding=0)\n", 167 | " (relu): ReLU(inplace=True)\n", 168 | " )\n", 169 | " (bn): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", 170 | " (conv1): Sequential(\n", 171 | " (0): Conv2d(704, 704, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n", 172 | " (1): ReLU(inplace=True)\n", 173 | " )\n", 174 | " (conv2): Sequential(\n", 175 | " (0): Conv2d(704, 704, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n", 176 | " (1): ReLU(inplace=True)\n", 177 | " )\n", 178 | " (relu): ReLU()\n", 179 | " )\n", 180 | " (7): UnetBlock(\n", 181 | " (shuf): PixelShuffle_ICNR(\n", 182 | " (conv): Sequential(\n", 183 | " (0): Conv2d(704, 1408, kernel_size=(1, 1), stride=(1, 1))\n", 184 | " )\n", 185 | " (shuf): PixelShuffle(upscale_factor=2)\n", 186 | " (pad): ReplicationPad2d((1, 0, 1, 0))\n", 187 | " (blur): AvgPool2d(kernel_size=2, stride=1, padding=0)\n", 188 | " (relu): ReLU(inplace=True)\n", 189 | " )\n", 190 | " (bn): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", 191 | " (conv1): Sequential(\n", 192 | " (0): Conv2d(480, 480, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n", 193 | " (1): ReLU(inplace=True)\n", 194 | " )\n", 195 | " (conv2): Sequential(\n", 196 | " (0): Conv2d(480, 480, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n", 197 | " (1): ReLU(inplace=True)\n", 198 | " )\n", 199 | " (relu): ReLU()\n", 200 | " )\n", 201 | " (8): UnetBlock(\n", 202 | " (shuf): PixelShuffle_ICNR(\n", 203 | " (conv): Sequential(\n", 204 | " (0): Conv2d(480, 960, kernel_size=(1, 1), stride=(1, 1))\n", 205 | " )\n", 206 | " (shuf): PixelShuffle(upscale_factor=2)\n", 207 | " (pad): ReplicationPad2d((1, 0, 1, 0))\n", 208 | " (blur): AvgPool2d(kernel_size=2, stride=1, padding=0)\n", 209 | " (relu): ReLU(inplace=True)\n", 210 | " )\n", 211 | " (bn): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", 212 | " (conv1): Sequential(\n", 213 | " (0): Conv2d(304, 152, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n", 214 | " (1): ReLU(inplace=True)\n", 215 | " )\n", 216 | " (conv2): Sequential(\n", 217 | " (0): Conv2d(152, 152, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n", 218 | " (1): ReLU(inplace=True)\n", 219 | " )\n", 220 | " (relu): ReLU()\n", 221 | " )\n", 222 | " (9): PixelShuffle_ICNR(\n", 223 | " (conv): Sequential(\n", 224 | " (0): Conv2d(152, 608, kernel_size=(1, 1), stride=(1, 1))\n", 225 | " )\n", 226 | " (shuf): PixelShuffle(upscale_factor=2)\n", 227 | " (pad): ReplicationPad2d((1, 0, 1, 0))\n", 228 | " (blur): AvgPool2d(kernel_size=2, stride=1, padding=0)\n", 229 | " (relu): ReLU(inplace=True)\n", 230 | " )\n", 231 | " (10): Lambda()\n", 232 | " (11): MergeLayer()\n", 233 | " (12): SequentialEx(\n", 234 | " (layers): ModuleList(\n", 235 | " (0): Sequential(\n", 236 | " (0): Conv2d(155, 155, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n", 237 | " (1): ReLU(inplace=True)\n", 238 | " )\n", 239 | " (1): Sequential(\n", 240 | " (0): Conv2d(155, 155, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n", 241 | " (1): ReLU(inplace=True)\n", 242 | " )\n", 243 | " (2): MergeLayer()\n", 244 | " )\n", 245 | " )\n", 246 | " (13): Sequential(\n", 247 | " (0): Conv2d(155, 5, kernel_size=(1, 1), stride=(1, 1))\n", 248 | " )\n", 249 | " )\n", 250 | ")" 251 | ] 252 | }, 253 | "execution_count": 2, 254 | "metadata": {}, 255 | "output_type": "execute_result" 256 | } 257 | ], 258 | "source": [ 259 | "encoder = nn.Sequential(*list(models.vgg11_bn().children())[0])\n", 260 | "unet = DynamicUnet(encoder, n_classes=5, img_size=(224, 224), blur=False, blur_final=False,\n", 261 | " self_attention=False, y_range=None, norm_type=NormType,\n", 262 | " last_cross=True,\n", 263 | " bottle=False)\n", 264 | "print(unet(torch.rand(1,3,224,224)).shape)\n", 265 | "unet" 266 | ] 267 | }, 268 | { 269 | "cell_type": "markdown", 270 | "metadata": {}, 271 | "source": [ 272 | "# Take resnet as encoder" 273 | ] 274 | }, 275 | { 276 | "cell_type": "code", 277 | "execution_count": 3, 278 | "metadata": {}, 279 | "outputs": [ 280 | { 281 | "name": "stdout", 282 | "output_type": "stream", 283 | "text": [ 284 | " sn layer c w h size\n", 285 | "2 2 ReLU 64 112 112 (1, 64, 112, 112)\n", 286 | "4 4 Sequential 64 56 56 (1, 64, 56, 56)\n", 287 | "5 5 Sequential 128 28 28 (1, 128, 28, 28)\n", 288 | "6 6 Sequential 256 14 14 (1, 256, 14, 14)\n", 289 | "torch.Size([1, 5, 224, 224])\n" 290 | ] 291 | } 292 | ], 293 | "source": [ 294 | "encoder = nn.Sequential(*list(models.resnet34().children())[:-3])\n", 295 | "\n", 296 | "unet = DynamicUnet(encoder, n_classes=5, img_size=(224, 224), blur=False, blur_final=False,\n", 297 | " self_attention=False, y_range=None, norm_type=NormType,\n", 298 | " last_cross=True,\n", 299 | " bottle=False)\n", 300 | "print(unet(torch.rand(1,3,224,224)).shape)\n", 301 | " " 302 | ] 303 | }, 304 | { 305 | "cell_type": "markdown", 306 | "metadata": {}, 307 | "source": [ 308 | "\n", 309 | "# Take Densenet as encode" 310 | ] 311 | }, 312 | { 313 | "cell_type": "code", 314 | "execution_count": 4, 315 | "metadata": {}, 316 | "outputs": [ 317 | { 318 | "name": "stdout", 319 | "output_type": "stream", 320 | "text": [ 321 | " sn layer c w h size\n", 322 | "2 2 ReLU 64 112 112 (1, 64, 112, 112)\n", 323 | "4 4 _DenseBlock 256 56 56 (1, 256, 56, 56)\n", 324 | "6 6 _DenseBlock 512 28 28 (1, 512, 28, 28)\n", 325 | "8 8 _DenseBlock 1024 14 14 (1, 1024, 14, 14)\n", 326 | "11 11 BatchNorm2d 1024 7 7 (1, 1024, 7, 7)\n", 327 | "torch.Size([1, 5, 224, 224])\n" 328 | ] 329 | } 330 | ], 331 | "source": [ 332 | "encoder = nn.Sequential(*list(models.densenet121().children())[0])\n", 333 | "unet = DynamicUnet(encoder, n_classes=5, img_size=(224, 224), blur=False, blur_final=False,\n", 334 | " self_attention=False, y_range=None, norm_type=NormType,\n", 335 | " last_cross=True,\n", 336 | " bottle=False)\n", 337 | "print(unet(torch.rand(1,3,224,224)).shape)\n", 338 | " " 339 | ] 340 | }, 341 | { 342 | "cell_type": "markdown", 343 | "metadata": {}, 344 | "source": [ 345 | "\n", 346 | "# Take EfficientNet as encode" 347 | ] 348 | }, 349 | { 350 | "cell_type": "code", 351 | "execution_count": 5, 352 | "metadata": {}, 353 | "outputs": [ 354 | { 355 | "name": "stdout", 356 | "output_type": "stream", 357 | "text": [ 358 | "Loaded pretrained weights for efficientnet-b0\n", 359 | " sn layer c w h size\n", 360 | "3 3 MBConvBlock 16 112 112 (1, 16, 112, 112)\n", 361 | "5 5 MBConvBlock 24 56 56 (1, 24, 56, 56)\n", 362 | "7 7 MBConvBlock 40 28 28 (1, 40, 28, 28)\n", 363 | "13 13 MBConvBlock 112 14 14 (1, 112, 14, 14)\n", 364 | "21 21 MemoryEfficientSwish 1280 7 7 (1, 1280, 7, 7)\n", 365 | "torch.Size([1, 5, 224, 224])\n" 366 | ] 367 | } 368 | ], 369 | "source": [ 370 | "\n", 371 | "for i in range(1):\n", 372 | " encoder = efficient_unet(i)\n", 373 | " unet = DynamicUnet(encoder, n_classes=5, img_size=(224, 224), blur=False, blur_final=False,\n", 374 | " self_attention=False, y_range=None, norm_type=NormType,\n", 375 | " last_cross=True,\n", 376 | " bottle=False)\n", 377 | "\n", 378 | " print(unet(torch.rand(1,3,224,224)).shape)\n", 379 | " \n" 380 | ] 381 | } 382 | ], 383 | "metadata": { 384 | "kernelspec": { 385 | "display_name": "Python 3", 386 | "language": "python", 387 | "name": "python3" 388 | }, 389 | "language_info": { 390 | "codemirror_mode": { 391 | "name": "ipython", 392 | "version": 3 393 | }, 394 | "file_extension": ".py", 395 | "mimetype": "text/x-python", 396 | "name": "python", 397 | "nbconvert_exporter": "python", 398 | "pygments_lexer": "ipython3", 399 | "version": "3.7.4" 400 | } 401 | }, 402 | "nbformat": 4, 403 | "nbformat_minor": 2 404 | } 405 | -------------------------------------------------------------------------------- /notebook/pytorch_lightning_resnet.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": {}, 7 | "outputs": [ 8 | { 9 | "name": "stderr", 10 | "output_type": "stream", 11 | "text": [ 12 | "2020-06-14 22:53:39,623 util_log.py[153] INFO Start the program at:amax7, 127.0.1.1, with:Load module\n" 13 | ] 14 | }, 15 | { 16 | "name": "stdout", 17 | "output_type": "stream", 18 | "text": [ 19 | "File_cache: Adjust notebook work fold to:/share/felix/pj/dynamic_unet/\n" 20 | ] 21 | } 22 | ], 23 | "source": [ 24 | "\n", 25 | "import os\n", 26 | "os.environ[\"CUDA_VISIBLE_DEVICES\"]=\"2\"\n", 27 | "\n", 28 | "import torch\n", 29 | "from torch.nn import functional as F\n", 30 | "from torch.utils.data import DataLoader, Dataset\n", 31 | "from torchvision.datasets import MNIST\n", 32 | "from torchvision import transforms\n", 33 | "import pytorch_lightning as pl\n", 34 | "from file_cache import *\n", 35 | "\n", 36 | "\n", 37 | "from albumentations import (\n", 38 | " PadIfNeeded,\n", 39 | " HorizontalFlip,\n", 40 | " VerticalFlip,\n", 41 | " CenterCrop,\n", 42 | " Crop,\n", 43 | " Compose,\n", 44 | " Transpose,\n", 45 | " RandomRotate90,\n", 46 | " ElasticTransform,\n", 47 | " GridDistortion,\n", 48 | " OpticalDistortion,\n", 49 | " RandomSizedCrop,\n", 50 | " OneOf,\n", 51 | " CLAHE,\n", 52 | " RandomBrightnessContrast,\n", 53 | " RandomGamma,\n", 54 | " Resize,\n", 55 | " Rotate,\n", 56 | " Normalize,\n", 57 | ")\n", 58 | "\n" 59 | ] 60 | }, 61 | { 62 | "cell_type": "code", 63 | "execution_count": 2, 64 | "metadata": {}, 65 | "outputs": [ 66 | { 67 | "name": "stdout", 68 | "output_type": "stream", 69 | "text": [ 70 | "==================================================\n", 71 | "False 600\n", 72 | "True 101\n", 73 | "Name: valid, dtype: int64\n", 74 | "img size torch.Size([3, 720, 960])\n", 75 | "==================================================\n", 76 | "False 600\n", 77 | "True 101\n", 78 | "Name: valid, dtype: int64\n" 79 | ] 80 | }, 81 | { 82 | "data": { 83 | "text/plain": [ 84 | "2" 85 | ] 86 | }, 87 | "execution_count": 2, 88 | "metadata": {}, 89 | "output_type": "execute_result" 90 | } 91 | ], 92 | "source": [ 93 | "\n", 94 | "img_size = height, width = 360*2, 480*2\n", 95 | "class CamvidDS(Dataset):\n", 96 | " def __init__(self, ds_type='train'):\n", 97 | " train_file_list = glob('./input/camvid/images/*.png')\n", 98 | " df = pd.DataFrame({'img_file':train_file_list})\n", 99 | " df['img_name'] = df.img_file.apply(lambda val: os.path.basename(val))\n", 100 | " df['label_path'] = df.img_file.apply(lambda val: val.replace('.png', '_P.png').replace('images', 'labels'))\n", 101 | " valid_list = pd.read_csv('./input/camvid/valid.txt', header=None).iloc[:,0] \n", 102 | " df['valid'] = df.img_name.isin(valid_list)\n", 103 | " \n", 104 | " \n", 105 | " self.ds_type = ds_type\n", 106 | " \n", 107 | " if ds_type=='train':\n", 108 | " print(df.valid.value_counts())\n", 109 | " self.df = df.loc[df.valid==False]\n", 110 | " else:\n", 111 | " self.df = df.loc[df.valid==True]\n", 112 | "\n", 113 | " original_height, original_width = height, width\n", 114 | " size = 224\n", 115 | " crop_size = np.random.uniform(0.9, 1)\n", 116 | " self.aug_train = Compose([\n", 117 | "# OneOf([RandomSizedCrop(min_max_height=(50, 101), height=original_height, width=original_width, p=0.5),\n", 118 | "# PadIfNeeded(min_height=original_height, min_width=original_width, p=0.5)], p=1),\n", 119 | " #CenterCrop(int(size*crop_size), int(size*crop_size)),\n", 120 | " #VerticalFlip(p=0.5),\n", 121 | " #Rotate(limit=(-50, 50)),\n", 122 | " Resize(height=height, width=width),\n", 123 | " Normalize()\n", 124 | " ])\n", 125 | "\n", 126 | " self.aug_val = Compose([\n", 127 | " Resize(height=height, width=width),\n", 128 | " Normalize()\n", 129 | " ])\n", 130 | " \n", 131 | " def __len__(self):\n", 132 | " return len(self.df)\n", 133 | " \n", 134 | " def __getitem__(self, index):\n", 135 | " from PIL import Image\n", 136 | " image = self.df.img_file.iloc[index]\n", 137 | " #print(image)\n", 138 | " image = np.array(Image.open(image).convert('RGB'))\n", 139 | "\n", 140 | " mask = self.df.label_path.iloc[index]\n", 141 | " #print(mask)\n", 142 | " mask = np.array(Image.open(mask))\n", 143 | " \n", 144 | " \n", 145 | "\n", 146 | " if self.ds_type=='train':\n", 147 | " augmented = self.aug_train(image=image, mask=mask)\n", 148 | " else:\n", 149 | " augmented = self.aug_val(image=image, mask=mask)\n", 150 | "\n", 151 | " image, mask = augmented['image'], augmented['mask']\n", 152 | "\n", 153 | " return torch.FloatTensor(image).permute([2, 0, 1]), torch.LongTensor(mask)#.cuda()\n", 154 | "\n", 155 | "print('====='*10)\n", 156 | "print('img size', CamvidDS()[0][0].shape)\n", 157 | "print('====='*10)\n", 158 | "len(CamvidDS()[0] )" 159 | ] 160 | }, 161 | { 162 | "cell_type": "code", 163 | "execution_count": 3, 164 | "metadata": {}, 165 | "outputs": [], 166 | "source": [ 167 | "\n", 168 | "\n", 169 | "#Image.open('./input/camvid/images/0016E5_07979.png')" 170 | ] 171 | }, 172 | { 173 | "cell_type": "code", 174 | "execution_count": 4, 175 | "metadata": { 176 | "scrolled": false 177 | }, 178 | "outputs": [], 179 | "source": [ 180 | "from efficientnet_pytorch import EfficientNet\n", 181 | "from dynamic_unet.base import *\n", 182 | "\n", 183 | "# import ipdb\n", 184 | "# ipdb.set_trace()\n", 185 | "# def efficient_unet(name='5', in_channels=3):\n", 186 | "# from efficientnet_pytorch import EfficientNet\n", 187 | "# class EfficientNet_(EfficientNet):\n", 188 | "# def __init__(self, *args, **kwargs):\n", 189 | "# super().__init__(*args, **kwargs)\n", 190 | "\n", 191 | "# def forward(self, inputs):\n", 192 | "# x = self.extract_features(inputs)\n", 193 | "# return x\n", 194 | "# return EfficientNet_.from_pretrained(f'efficientnet-b{name}', in_channels=in_channels)\n", 195 | "\n", 196 | "def unet_resnet34( pretrained: bool = True, blur_final: bool = True,\n", 197 | " norm_type: Optional[NormType] = NormType, split_on: Optional[SplitFuncOrIdxList] = None,\n", 198 | " blur: bool = False,\n", 199 | " self_attention: bool = False, y_range: Optional[Tuple[float, float]] = None, last_cross: bool = True,\n", 200 | " bottle: bool = False, cut: Union[int, Callable] = None,\n", 201 | " n_classes=32, img_size=img_size, in_channels=1,\n", 202 | " **learn_kwargs: Any) -> Learner:\n", 203 | " \"Build Unet learner from `data` and `arch`.\"\n", 204 | " \"blur: do maxpolling or not\"\n", 205 | " from fastai.vision import models\n", 206 | " arch: Callable = models.resnet34\n", 207 | " body = create_body(arch, pretrained, cut)\n", 208 | " from fastai.vision import models\n", 209 | " from dynamic_unet.base import DynamicUnet\n", 210 | " \n", 211 | "# moduleList = flatten_moduleList(body)\n", 212 | "# print(len(moduleList))\n", 213 | "# for child in moduleList[:-2]:\n", 214 | "# for param in child.parameters():\n", 215 | "# param.requires_grad = False\n", 216 | " print('img_size', img_size)\n", 217 | " model = to_device(\n", 218 | " DynamicUnet(body, n_classes=n_classes, img_size=img_size, blur=blur, blur_final=blur_final,\n", 219 | " self_attention=self_attention, y_range=y_range, norm_type=norm_type,\n", 220 | " last_cross=last_cross,\n", 221 | " bottle=bottle), 'cuda')\n", 222 | " return model\n", 223 | "\n", 224 | "class Dynamic_Model(pl.LightningModule):\n", 225 | "\n", 226 | " def __init__(self, n_classes=32, in_channels=3):\n", 227 | " super(Dynamic_Model, self).__init__()\n", 228 | " #encoder = efficient_unet(7, in_channels)\n", 229 | " \n", 230 | " self.unet = unet_resnet34()\n", 231 | "\n", 232 | "\n", 233 | " def forward(self, x):\n", 234 | " # called with self(x)\n", 235 | " return self.unet(x)\n", 236 | "\n", 237 | " def training_step(self, batch, batch_nb):\n", 238 | " # REQUIRED\n", 239 | " x, y = batch\n", 240 | " #print(x.shape, y.shape)\n", 241 | " y_hat = self(x)\n", 242 | " #print(y_hat.shape, y.shape, y.max())\n", 243 | " loss = F.cross_entropy(y_hat, y)\n", 244 | " tensorboard_logs = {'train_loss': loss}\n", 245 | " return {'loss': loss, 'log': tensorboard_logs}\n", 246 | "\n", 247 | " def validation_step(self, batch, batch_nb):\n", 248 | " # OPTIONAL\n", 249 | " x, y = batch\n", 250 | " y_hat = self(x)\n", 251 | " return {'val_loss': F.cross_entropy(y_hat, y)}\n", 252 | "\n", 253 | " def validation_epoch_end(self, outputs):\n", 254 | " # OPTIONAL\n", 255 | " avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean()\n", 256 | " tensorboard_logs = {'val_loss': avg_loss}\n", 257 | " print(self.current_epoch, tensorboard_logs)\n", 258 | " return {'val_loss': avg_loss, 'log': tensorboard_logs}\n", 259 | "\n", 260 | " \n", 261 | " def configure_optimizers(self):\n", 262 | " opt = torch.optim.Adam(self.parameters(), lr=3e-4, betas=(0.9, 0.99))\n", 263 | " # scheduler = optim.lr_scheduler.OneCycleLR(opt, max_lr=self.hparams.lr,\n", 264 | " # steps_per_epoch=len(self.train_dataloader()),\n", 265 | " # epochs=self.hparams.epochs)\n", 266 | "\n", 267 | " scheduler = optim.lr_scheduler.StepLR(opt, gamma=0.8, step_size=10)\n", 268 | "\n", 269 | " print('schedule', scheduler)\n", 270 | " return [opt], [scheduler]\n", 271 | "\n", 272 | " def train_dataloader(self):\n", 273 | " # REQUIRED\n", 274 | " return DataLoader(CamvidDS('train'), batch_size=2)\n", 275 | "\n", 276 | " def val_dataloader(self):\n", 277 | " # OPTIONAL\n", 278 | " return DataLoader(CamvidDS('valid'), batch_size=2)\n", 279 | "\n", 280 | " \n" 281 | ] 282 | }, 283 | { 284 | "cell_type": "code", 285 | "execution_count": 5, 286 | "metadata": {}, 287 | "outputs": [ 288 | { 289 | "name": "stdout", 290 | "output_type": "stream", 291 | "text": [ 292 | "img_size (720, 960)\n", 293 | " sn layer c w h size\n", 294 | "2 2 ReLU 64 360 480 (1, 64, 360, 480)\n", 295 | "4 4 Sequential 64 180 240 (1, 64, 180, 240)\n", 296 | "5 5 Sequential 128 90 120 (1, 128, 90, 120)\n", 297 | "6 6 Sequential 256 45 60 (1, 256, 45, 60)\n", 298 | "7 7 Sequential 512 23 30 (1, 512, 23, 30)\n" 299 | ] 300 | }, 301 | { 302 | "name": "stderr", 303 | "output_type": "stream", 304 | "text": [ 305 | "GPU available: True, used: True\n", 306 | "2020-06-14 22:53:53,874 distrib_data_parallel.py[251] INFO GPU available: True, used: True\n", 307 | "No environment variable for node rank defined. Set as 0.\n", 308 | "2020-06-14 22:53:53,879 distrib_data_parallel.py[297] WARNING No environment variable for node rank defined. Set as 0.\n", 309 | "CUDA_VISIBLE_DEVICES: [0]\n", 310 | "2020-06-14 22:53:53,881 distrib_data_parallel.py[323] INFO CUDA_VISIBLE_DEVICES: [0]\n" 311 | ] 312 | }, 313 | { 314 | "name": "stdout", 315 | "output_type": "stream", 316 | "text": [ 317 | "schedule \n", 318 | "0 {'val_loss': tensor(4.7905, device='cuda:0')}\n", 319 | "False 600\n", 320 | "True 101\n", 321 | "Name: valid, dtype: int64\n" 322 | ] 323 | }, 324 | { 325 | "data": { 326 | "application/vnd.jupyter.widget-view+json": { 327 | "model_id": "05d7b3d431b3413cb5aa125d3c3ad1c1", 328 | "version_major": 2, 329 | "version_minor": 0 330 | }, 331 | "text/plain": [ 332 | "HBox(children=(FloatProgress(value=0.0, description='Finding best initial lr', style=ProgressStyle(description…" 333 | ] 334 | }, 335 | "metadata": {}, 336 | "output_type": "display_data" 337 | }, 338 | { 339 | "name": "stderr", 340 | "output_type": "stream", 341 | "text": [ 342 | "LR finder stopped early due to diverging loss.\n", 343 | "2020-06-14 22:54:37,452 lr_finder.py[174] INFO LR finder stopped early due to diverging loss.\n" 344 | ] 345 | }, 346 | { 347 | "name": "stdout", 348 | "output_type": "stream", 349 | "text": [ 350 | "\n" 351 | ] 352 | }, 353 | { 354 | "data": { 355 | "text/plain": [ 356 | "0.0002089296130854041" 357 | ] 358 | }, 359 | "execution_count": 5, 360 | "metadata": {}, 361 | "output_type": "execute_result" 362 | }, 363 | { 364 | "data": { 365 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYgAAAEKCAYAAAAIO8L1AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjAsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8GearUAAAgAElEQVR4nO3deZRcZ3nn8e9TXdX7Jqlb+y7kDWOb0PGCgWMgFraHBEI8BA8wZlVgYBISkhNnOTFJTkLm5NjOgJMYAw4mgOPEwbEJXlA8JGazsWxkW953JLWk7lar96W2Z/64t1vVrdutbrlvLd2/zzn31F3eW/VUqboeve977/uauyMiIjJdotQBiIhIeVKCEBGRSEoQIiISSQlCREQiKUGIiEgkJQgREYmULHUAC6mtrc03b95c6jBERCrGww8/3OPu7VHHFlWC2Lx5M7t37y51GCIiFcPMXpnpWGxNTGa2wcy+b2ZPmdkTZvZb4f7lZrbLzJ4LH5fNcP6VYZnnzOzKuOIUEZFocfZBZIHPuvvpwPnAp8zsDOAq4D533w7cF25PYWbLgauB84BzgatnSiQiIhKP2BKEux9090fC9UHgKWAd8C7g5rDYzcC7I05/B7DL3Xvd/SiwC7gkrlhFROR4RbmKycw2A68HHgRWuftBCJIIsDLilHXAvoLt/eE+EREpktgThJk1Av8KfMbdB+Z6WsS+yFEFzWynme02s93d3d0nG6aIiEwTa4IwsxRBcvimu3873H3YzNaEx9cAXRGn7gc2FGyvBzqjXsPdb3T3DnfvaG+PvFJLREROQpxXMRnwVeApd7+24NCdwMRVSVcCd0Scfi+ww8yWhZ3TO8J9IiJS4InOfv7r2XhaT+KsQVwIfBB4m5ntCZfLgL8CLjaz54CLw23MrMPMvgLg7r3AnwMPhcufhftERKTANx74OZ/950djee7YbpRz9x8S3ZcA8PaI8ruBjxVs3wTcFE90IiKLQzqbpyYZz//1NRaTiEgFy+TyVCtBiIjIdOlsnlTVTI01r44ShIhIBUurBiEiIlEyuTypKiUIERGZZjybp1oJQkREpktn1cQkIiIRMjnVIEREJIJqECIiEkn3QYiISKTgPgglCBERmUb3QYiISKS0LnMVEZEoqkGIiEgk1SBEROQ4ubyTd9RJLSIiU6WzeQA1MYmIyFTpnBKEiIhEmKxBaD4IEREppBqEiIhEyoQ1CHVSi4jIFHHXIJKxPCtgZjcB7wS63P3McN+twKlhkVagz93PiTj3ZWAQyAFZd++IK04RkUp1rA+iwhIE8DXgeuDrEzvc/dcn1s3sGqB/lvPf6u49sUUnIlLhJmoQqUqrQbj7/Wa2OeqYmRnwXuBtcb2+iMhiN1GDqFlkfRBvBg67+3MzHHfge2b2sJntnO2JzGynme02s93d3d0LHqiISLnKLNKrmK4Abpnl+IXu/gvApcCnzOwtMxV09xvdvcPdO9rb2xc6ThGRspVebFcxmVkSeA9w60xl3L0zfOwCbgfOLU50IiKVYzEOtfFLwNPuvj/qoJk1mFnTxDqwA9hbxPhERCrCZCd1pdUgzOwW4CfAqWa238w+Gh56H9Oal8xsrZndFW6uAn5oZo8CPwW+6+73xBWniEilmuykrsCrmK6YYf+HIvZ1ApeF6y8CZ8cVl4jIYpHJObC4mphERGQBpLM5oAKbmEREJF4arE9ERCJNNDGlNNy3iIgUGo95LCYlCBGRCpXJ5amuShCMXrTwlCBERCpUOpuPrXkJlCBERCpWOpuPrYMalCBERCpWJqcEISIiEYImJiUIERGZZlw1CBERiZLJ5mO7xBWUIEREKlZaNQgREYkycR9EXJQgREQqlDqpRUQkku6DEBGRSOmcK0GIiMjx0tmc+iBEROR4GdUgREQkigbrExGRSLoPQkREIgV3UlfF9vyxJQgzu8nMusxsb8G+z5nZATPbEy6XzXDuJWb2jJk9b2ZXxRWjiEglG8/lSSUrs4npa8AlEfuvc/dzwuWu6QfNrAr4W+BS4AzgCjM7I8Y4RUQqjruTzuapqcSrmNz9fqD3JE49F3je3V909zTwT8C7FjQ4EZEKl807wKLrg/i0mT0WNkEtizi+DthXsL0/3BfJzHaa2W4z293d3b3QsYqIlKV0Ng+wqIba+HtgG3AOcBC4JqJMVIOaz/SE7n6ju3e4e0d7e/vCRCkiUuYyuSBBLJoahLsfdvecu+eBLxM0J023H9hQsL0e6CxGfCIilWLR1SDMbE3B5q8CeyOKPQRsN7MtZlYNvA+4sxjxiYhUivFs/DWIZFxPbGa3ABcBbWa2H7gauMjMziFoMnoZ+I2w7FrgK+5+mbtnzezTwL1AFXCTuz8RV5wiIpVooompphIThLtfEbH7qzOU7QQuK9i+CzjuElgREQmkc4usiUlERBbGRB+ERnMVEZEpJpqYUovlKiYREVkY46pBiIhIlExucd5JLSIir5L6IEREJFK6CPdBKEGIiFSgRTfUhoiILIxjQ21U5nwQIiISk7RqECIiEkWd1CIiEkk1CBERiZRRDUJERKKkc3nMoCqhTmoRESmQzuaprkpgpgQhIiIF0rl8rP0PoAQhIlKRJmoQcVKCEBGpQBnVIEREJEo6m491NjlQghARqUjqgxARkUjprKsPQkREjpfO5WOdbhRiTBBmdpOZdZnZ3oJ9f21mT5vZY2Z2u5m1znDuy2b2uJntMbPdccUoIlKpMtk8NRVcg/gacMm0fbuAM939LOBZ4A9mOf+t7n6Ou3fEFJ+ISMUKahDx3SQHMSYId78f6J2273vung03HwDWx/X6IiKL2WK/D+IjwN0zHHPge2b2sJntnO1JzGynme02s93d3d0LHqSISDlatPdBmNkfAVngmzMUudDdfwG4FPiUmb1lpudy9xvdvcPdO9rb22OIVkSk/CzK+yDM7ErgncD73d2jyrh7Z/jYBdwOnFu8CEVEyt94dpHVIMzsEuD3gV9x95EZyjSYWdPEOrAD2BtVVkRkqcrk8tRUaoIws1uAnwCnmtl+M/socD3QBOwKL2G9ISy71szuCk9dBfzQzB4Ffgp8193viStOEZFKlM7F38SUnEuh8H/yo+6eN7NTgNOAu909M9M57n5FxO6vzlC2E7gsXH8ROHsucYmILFWZMrqK6X6g1szWAfcBHya4z0FEREqgnO6ktrDP4D3AF939V4Ez4gtLRERmks87mVz5jMVkZnYB8H7gu+G+OTVPiYjIwsrk8wBlcxXTZwiGxbjd3Z8ws63A9+MLS0REZpLOhgmiHDqp3f2/gP8CMLME0OPuvxlnYCIiEi2TC24hK4sahJl9y8yaw6uZngSeMbPfizUyERGJNFmDKIcEAZzh7gPAu4G7gI3AB2OLSkREZjSRIMplqI2UmaUIEsQd4f0PkcNkiIhIvNK58qpBfAl4GWgA7jezTcBAXEGJiMjMjnVSxzsfxFw7qb8AfKFg1ytm9tZ4QhIRkdmUVQ3CzFrM7NqJeRfM7BqC2oSIiBRZZiJBVFXF+jpzTT83AYPAe8NlAPiHuIISEZGZHeukLoMmJmCbu/9awfafmtmeOAISEZHZlVUTEzBqZm+a2DCzC4HReEISEZHZFOs+iLnWID4BfN3MWsLto8CV8YQkIiKzKbehNh4Fzjaz5nB7wMw+AzwWZ3AiInK8TJk1MQFBYgjvqAb4nRjiERGREyi3O6mjxNt9LiIikcqtkzqKhtoQESmBsuikNrNBohOBAXWxRCQiIrOarEGUsonJ3ZvcvTliaXL3E3Zwm9lNZtZlZnsL9i03s11m9lz4uGyGc68MyzxnZrpiSkQklMkG/28v5z6IufgacMm0fVcB97n7duC+cHsKM1sOXA2cB5wLXD1TIhERWWrSuRxVCaMqEW9XcKwJwt3vB3qn7X4XcHO4fjPBEOLTvQPY5e697n4U2MXxiUZEZElKZ/OxNy9B/DWIKKvc/SBA+Lgyosw6YF/B9v5wn4jIkpfJeewd1FCaBDEXUfWmyKumzGznxCiz3d3dMYclIlJ649l87P0PUJoEcdjM1gCEj10RZfYDGwq21wOdUU/m7je6e4e7d7S3ty94sCIi5SaTy1OzSGsQd3JsHKcrgTsiytwL7DCzZWHn9I5wn4jIkpfO5iu/icnMbgF+ApxqZvvN7KPAXwEXm9lzwMXhNmbWYWZfAXD3XuDPgYfC5c/CfSIiS146m499LgiY+2iuJ8Xdr5jh0Nsjyu4GPlawfRPBREUiIlIgk1sENQgREVl46dzi7aQWEZFXYXwR3wchIiKvgpqYREQk0mK+k1pERF4F1SBERCTSorgPQkREFl56EQ+1ISIir0J6iQ/WJyIiM0hnc+qkFhGR4y314b5FRGQG6ZwucxURkWlyeSeXd3VSi4jIVJlcHkBNTCIiMtV4VglCREQipCcSRBHmg1CCEBGpIGpiEhGRSBM1CHVSi4jIFKpBiIhIpMlOatUgRESkUDqsQaRUgxARkUKZsAZRsxhrEGZ2qpntKVgGzOwz08pcZGb9BWX+pNhxioiUixe6h9izr4+jw+mi1iCSsb/CNO7+DHAOgJlVAQeA2yOK/sDd31nM2EREys2DLx7hii8/QN6D7YnO6WL0QRQ9QUzzduAFd3+lxHGIiJSdvpE0n7l1D5tWNHDVpaexr3eEn/eOMJrOcerqpthfv9QJ4n3ALTMcu8DMHgU6gd919yeiCpnZTmAnwMaNG2MJUkSk2Nydq/71cXqGxvn2Jy/kdetbih5DyTqpzawa+BXgXyIOPwJscvezgS8C/zbT87j7je7e4e4d7e3t8QQrIlJktz60j3ueOMTv7ji1JMkBSnsV06XAI+5+ePoBdx9w96Fw/S4gZWZtxQ5QRKQUnu8a4k+/8yQXvmYFH3/z1pLFUcoEcQUzNC+Z2Wozs3D9XII4jxQxNhGRkvnre5+mJpXg2veeQyIR/6B8MylJH4SZ1QMXA79RsO8TAO5+A3A58EkzywKjwPvc3UsRq4hIsb1yZISOTctZ1Vxb0jhKkiDcfQRYMW3fDQXr1wPXFzsuEZFy0DM0zus3Lit1GLqTWkSknGRzeY4Mp2lvqil1KEoQIiLlpHc4jTtKECIiMlXX4DgA7Y1KECIiUqB7KEwQqkGIiEihbtUgREQkSk9Yg2hrqi5xJEoQIiJlpXtwnMaaJPXVpR4qTwlCRKSsdA+Ol0X/AyhBiIiUle7B8bLofwAlCBGRstI9pBqEiIhE6Bkcp62x9B3UoAQhIlI2xjI5BsayqkGIiMhUPWV0kxwoQYiIlI3Jm+SUIEREpNCxu6hLOw/EBCUIEZEyUU7jMIEShIhI2egZTAOwQlcxiYhIoe6hMZbVp0hVlcdPc3lEISIiZTXMBpRoTupyk887iYSdsFwu7zz8ylHM4Kz1LdQkq4oQnYgsFUoQZajjL/6DmmSCNS21rGmtY21LLWta6ljbGjwOjWe56/GD3PvEIXqGgjbC2lSCX9i4jPO3ruD8rSs4e4MShoi8Ot1D47xh47JShzGpZAnCzF4GBoEckHX3jmnHDfi/wGXACPAhd39koePI5533n7eRzr4xDvaP8mTnAP/x5GHGs/kp5epSVbzt9JVcduYaklXGAy8e4cEXe7nuP57FHWqSCd6wKUgY521ZztkbWqlNKWGIyNy4Oz2DadUgCrzV3XtmOHYpsD1czgP+PnxcUImE8dkdp07Z5+4cHcnQ2TfKwf4xDLjwNW3UVR/7wX/Ha1cD0D+S4cGXjvDAi7088OKRyYRRnUxwzoZW1i+rY3AsS/9ohsGxLI01VaxsrmVVUy0rGqvJ5Z2xTI7xbB53aK1P0VqfoqUuRX11klSVUV2VIBl2WuXyjruTrEpM1nCqwuaxbC7P/qOjvNI7wmg6h7uTd8i5M5bOMZrJMZLOMZ7NkffgfebyTm2qavI1m+tS1KeqqKuuojZVRV2qiqbaJA01ybLpOBNZjIbDv1EliLl5F/B1d3fgATNrNbM17n4w7hc2M5Y3VLO8oZoz17XMWralPsWO165mR5gw+kbSPPTyUX760hEefKmXB144QnNd8OO7rrWWofEsT3UO8P2BLkbSOQCSCaM2VYW7Mxzum6tUlbG2tY6EGft6R8jmfc7nJgwSZnM+pzaVoKk2RVNNkqbaJI21waQmdakq6qurqEkmyOSdTDZPJpcnm3fMbPJ1JhJW3h13SIbJL5VMUJusYll9imXh595Yk6Q6maAmmQgfg+evSQXrjTXJycQoshhM3CTXViZDfUNpE4QD3zMzB77k7jdOO74O2FewvT/cNyVBmNlOYCfAxo0b44t2jlrrq7n4jFVcfMaqE5Ydy+RIJmyydgCQyeXpH83QN5JhLJMjncuHP7iOhT+0CYPxbJ4DfaP8vHeEfb0juMNlr1vNphUNbF7RQGNNkkRiorxRVx3UBupSVVQnEyQsSIQA6WyegbHMlNcNljzD6SzD41mGxrIMjmcZHAtqQsGS4chQmrHJmkl+ssaTSiaoMsMJayruk7GYgRHUhtLZPOlcUIsaGs/O67OuS1XRECarptokzbUpmmqTtNZXsyJMNCsaq2lvqmFVcy0rm2porElOvm+RclJuw2xAaRPEhe7eaWYrgV1m9rS7319wPOqv+Lj/6oaJ5UaAjo6Ouf/3uQxE9VGkqhK0NdYU9X8R1cniv2aUdDZP30iaI8NphsezpLN5xnN5xjN50rk842FT3Fgmx/B4juF0kKiGwsQ1MJrh0MAYfSNpeofTRFWMapKJyaa05toky+qrWRkmj5XNNaxtqWNta3CBQlNtqvgfgixZShAF3L0zfOwys9uBc4HCBLEf2FCwvR7oLF6EUmzVyUTwY9386sehyeed/tEMPUPjdA+O0zU4zuGBMY4MpxkYzUzWmDr7x3h0f9/k1WmFmmqTbFxez6YV9WxYXs/G5fVsWBasr22t1VVrsqC6B8cAymY2OShRgjCzBiDh7oPh+g7gz6YVuxP4tJn9E0HndH8x+h9kcUgkjGUN1SxrqGb7qqYTls/k8nQPjnOwf4zOvlE6+0bZf3SUfUdHePrgILuePEwmd6xKYgarm2snE8aG5XWsaw2XZXWsblECkfnpGUpTlTCW1ZfHMBtQuhrEKuD2sC04CXzL3e8xs08AuPsNwF0El7g+T3CZ64dLFKssAamqRNi0VMcbNh1/HXou7xweGGNf78hk4tjXO8q+3hF+9HwPhwbGppQ3g7UtdWxuq2fTiga2tjWwbWUj21c2sralbk43ZsrS0h3OJFdO342SJAh3fxE4O2L/DQXrDnyqmHGJzKQqYZMJJOpa67FMjkNh7eNAWPt45cgwLx8Z4a7HD9I3kpksW5eqYnNbA1va6tnSFlxUEFxcUE97U4060Zeo7qHxkvcDTlfOl7mKVIza8Ed/c1tD5PHe4TTPdw3xXNcgz3cN8XLPME8dHOTeJw6TK+hNr0tVsWlFPdvaG9nW3sDW9kZOW9PEa9obp1ztJotPuQ2zAUoQIkWxvKGac7cs59wty6fsz+TyHAhvbnzlyDCvHBnhpZ5hnujs5+69ByevxKpJJjh9TTNnrmtm+8omtobJY01zbVk1ScjJ6x4c57TVJ+4vKyYlCJESSlUlCmoe7VOOjWdzvHJkhCc7B9h7oJ/HD/Rzx886GSy4X6QuVcW2lQ1sX9nE9lWNnLa6iTPXtizIlWBSPPm80zOkGoSIzFFNsopTVjVxyqom3v36dUBw02H34DgvdA/zYs8Qz3cFywMvHuH2nx2YPLe9qYbXrm3mrPWtnLOhhbPXt7Jivu3bL7wA11wD3/gGDA1BYyN84APw2c/Ctm0L+VaXvP7RDNm8K0GIyMkzs8l7RS7YtmLKsYGxDM8cGmTvgX72HghqHfc/+9xkM9X6ZXWcubaFM9Y289q1zZy2pnnmJqq774bLL4dMJlgABgfhK1+Bm2+G226DSy+N+d0uHeU21egEJQiRRaK5NsUvbl7OL24+1s8xPJ5l74F+9uzr47H9/Tx5cIB7njg0ebwuVcWW8BLc09c0cfb6Vs4a76Hp8sthZOT4F5lIGJdfDo89pprEApm8i1pXMYlIsTTUJDlv6wrO23qstjE0nuWpgwM8e3iQF7qCpqqf/fwo33k0GKjgz7/3d7xvbJxZBxrJZOC66+D66+N9A0tEV3gXdVuZ1SAsuN1gcejo6PDdu3eXOgyRitQ3kuax/f2c//otVI8Mn7C8Nzdj/f1FiKwy9Y2k2bOvj7bGGlY21bCisea4EYgf29/Htx78OXc+2kk6m2fP1TtorCnu/9vN7OHp8/FMUA1CRIBgJOK3nNIOoxFNSxF8cJBf+7sfccqqJlY117I8HNqkvbGG7asay+6mr2L7w9sf567HjzXnJQyaalM0VAejEGfzzks9w9Slqvjls9fwPy/YXPTkcCLlFY2IlF5jY9AhfQLpugZSVQl2PXmYI8PHD3bY1ljNqaubOG11M6eubuL01c1sX9W4JGZaPNA3yj17D/HejvW87bRVdA+O0TU4zsBohuF0jpF0MFrxhy/czLtfv47mMh05WAlCRKb6wAeCq5UymZnLpFLUfvhKbv2NC4BgNsO+0QxHh9McGhjjmUODPH1okGcODfKNB16ZnMI3YbClrYEz1rZwxprmySuqFltt4xsPvALAb759O+uX1Zc4mpOnPggRmeqFF+Css6KvYppQXz/nq5hyeeeVI8M8HSaNpw4O8GTnAAf6RifLrGqu4cy1LZy+ppnXrGzkNSsb2dLWQEOZNbnMxVgmx/mfv4/ztiznSx+MbNovK+qDEJG527YtuM9h+n0QAKlUsNx225wvca1KGFvbG9na3shlr1szub9vJM2TYbLYe6CfJzoH+P4zXVMmelrTUsvmFQ1saQ9GxN2+qonTVjexsowHNbxzTyd9Ixk+9MYtpQ7lVVOCEJHjXXppUEO47jr4x388dif1Bz8Iv/3bC3L/Q2t9NW/c1sYbt7VN7psYXuSF8A7xl3qGeenI8HEj4rbWpzhlZRMbV0xM4hTMxbGquZbVLbUl6+dwd/7hxy9z6qomzt+6/MQnlDklCBGJtm1bcJ9DEe91KBxeZLre4TTPHh6c7N94vmuQHz53/FwcAC11Kdoag3nJl9UHS2NtkoaaJA3VVTTXpVhWH8xZPlGmuTb5qkfMfejlozx1cIDPv+d1ZVvDmQ8lCBGpCMsbqjl/6wrO3zp1iJGxTI79R0c52D/Kof4xDg+McWhgjKPDGXqH0/y8d4Q9+/oYHs8ynM7N+hpNtUla61OsaAjmaG9vqmFFQzVNtUmaalOTx1c2BfOYt9anpiSCr/34JVrqUrz7nHWxfAbFpgQhIhWtNlU12bF9Ivm8M5LJMTAaJI+JpW8kTd9ohr6RDH0jaY4Mp9l/dIQ9+47SO5ye0i9SKFVlrGioYUVjNSsaa/jR8z187E1bqKteHJfyKkGIyJKRSBiNNUkaa5Ksba2b0znuznA6x9BYlsGxILF0D43TNTBO1+A4vcPjHBlK0zOc5rTVTVz5xs3xvokiUoIQEZmF2bGksrplac2zoTkMRUQkUtEThJltMLPvm9lTZvaEmf1WRJmLzKzfzPaEy58UO04RkaWuFE1MWeCz7v6ImTUBD5vZLnd/clq5H7j7O0sQn4iIUIIahLsfdPdHwvVB4ClgcVwTJiKyiJS0D8LMNgOvBx6MOHyBmT1qZneb2WuLGpiIiJTuKiYzawT+FfiMuw9MO/wIsMndh8zsMuDfgO0zPM9OYCfAxo0bY4xYRGRpKUkNwsxSBMnhm+7+7enH3X3A3YfC9buAlJm1TS8XHr/R3TvcvaO9vT3WuEVElpJSXMVkwFeBp9z92hnKrA7LYWbnEsR5pHhRiohI0eeDMLM3AT8AHgfy4e4/BDYCuPsNZvZp4JMEVzyNAr/j7j+ew3N3A31A4US5LQXbE+tR+9qAnpN4S4XPNZ/j0/fPtj3beyh13HOJtXC9cF+pY5/LZ67vyonjnqnMfL4rUfEW7tNnPrfjJ/P3ucndo5tf3H1RLcCNM21PrM+wb/dCvN5cj88W50wxRr2HUsc9l1hn+ezL/jPXd+XEcc/le3GiWPWZxxP3XGKdbVmMd1J/Z5bt78yyb6Feb67HZ4tz+vaJ3sPJWKi4p++LO+65PMdCfub6rszt/BN9L6L2zfQ+9JkX/+8z0qKacvTVMLPdPsO0e+WsUuOGyo1dcRdfpcZeqXFPWIw1iJN1Y6kDOEmVGjdUbuyKu/gqNfZKjRtQDUJERGagGoSIiERSghARkUhKECIiEkkJ4gTMbKOZ3WlmN5nZVaWOZz7M7M1mdoOZfcXMTnijYbkws4SZ/YWZfdHMrix1PPMRzmXyg/Bzv6jU8cyHmTWY2cNmVjHD7JvZ6eFnfZuZfbLU8cyHmb3bzL5sZneY2Y5SxxNlUSeI8Ee9y8z2Ttt/iZk9Y2bPz+FH/xTgu+7+EeCM2IKdZiFid/cfuPsngH8Hbo4z3oL4FuIzfxfBEPAZYH9csU63QLE7MATUUqTYFyhugN8H/jmeKI+3QN/xp8Lv+HuBol1OukCx/5u7fxz4EPDrMYZ70hb1VUxm9haCP9avu/uZ4b4q4FngYoI/4IeAK4Aq4PPTnuIjQA64jeAP/x/d/R8qJXZ37wrP+2fgY378qLllGXe4HHX3L5nZbe5+edxxL2DsPe6eN7NVwLXu/v4KifssgmEhasP38O+VELe7d5nZrwBXAde7+7fijnshYw/Pu4Zg4NJHihH7vJzMbeCVtACbgb0F2xcA9xZs/wHwB7Oc/7vAW8L12yop9rDMRuDLlRQ38AHgveH6rZUUe0G56mJ+XxbgM/8L4G+A7wF3AIlKiHvac323kr4rgAH/B/ilYsY9n6Vk80GU0DpgX8H2fuC8WcrfA3zOzP4H8HKMcc3FfGMH+ChQlFrPLOYb97eBL5rZm4H74wxsDuYVu5m9B3gH0ApcH29os5pX3O7+RwBm9iHCWlCs0c1svp/3RcB7gBrgrlgjO7H5fs//N/BLQIuZvcbdb4gzuJOxFBOEReybsZ3N3fcCRWnimIN5xQ7g7lfHFMt8zPczHyFIbOVgvrF/myDBldq8vysA7v61hQ9lXub7ef8n8J9xBTNP8439C6chZWYAAAN0SURBVMAX4gvn1VvUndQz2A9sKNheD3SWKJb5qtTYKzVuqNzYFXfxVXLskZZigngI2G5mW8ysGngfcGeJY5qrSo29UuOGyo1dcRdfJccerdSdIDF3It0CHOTY5ZIfDfdfRnC1wQvAH5U6zsUUe6XGXcmxK27FHteyqC9zFRGRk7cUm5hERGQOlCBERCSSEoSIiERSghARkUhKECIiEkkJQkREIilByKJnZkNFfr2izr1hZq1m9r+K+ZqyNChBiMyTmc06hpm7v7HIr9kKKEHIgluKg/WJYGbbgL8F2oER4OPu/rSZ/TLwxwTDdR8B3u/uh83sc8BagiGee8zsWYKh1LeGj3/jweBrmNmQuzeGI41+DugBzgQeBj7g7m5mlwHXhsceAba6+5SZ3MKRVf8bwRwNDeG8B3cAy4AU8MfufgfwV8A2M9sD7HL33zOz3yOYRKcGuN3LY9BGqTSlvpVbi5a4F2AoYt99wPZw/Tzg/4Xryzg2kdbHgGvC9c8R/MDXFWz/mOAHuI0gmaQKXw+4COgnGLQtAfwEeBPBD/4+YEtY7hbg3yNi/BDBMA7Lw+0k0ByutwHPE4wgupmp8xLsAG4MjyUIZhR8S6n/HbRU3qIahCw5ZtYIvBH4F7PJEZprwsf1wK1mtoagFvFSwal3uvtowfZ33X0cGDezLmAVx08z+lN33x++7h6CH/Mh4EV3n3juW4CdM4S7y917J0IH/jKczSxPMP/AqohzdoTLz8LtRmA7pZ9bQyqMEoQsRQmgz93PiTj2RYKpQu8saCKaMDyt7HjBeo7ov6eoMlHzBsyk8DXfT9Ak9gZ3z5jZywS1kekM+Ly7f2keryNyHHVSy5LjwdzcL5nZfwewwNnh4RbgQLh+ZUwhPA1sNbPN4fZcJ6xvAbrC5PBWYFO4fxBoKih3L/CRsKaEma0zs5WvOmpZclSDkKWg3swKm36uJfjf+N+b2R8TdPj+E/AoQY3hX8zsAPAAsGWhg3H30fCy1HvMrAf46RxP/SbwHTPbDewhSDS4+xEz+5GZ7QXu9qCT+nTgJ2ET2hDBPN9dC/1eZHHTcN8iJWBmje4+ZMEv+N8Cz7n7daWOS6SQmphESuPjYaf1EwRNR+ovkLKjGoSIiERSDUJERCIpQYiISCQlCBERiaQEISIikZQgREQkkhKEiIhE+v8kzzS4dZPI2QAAAABJRU5ErkJggg==\n", 366 | "text/plain": [ 367 | "
" 368 | ] 369 | }, 370 | "metadata": { 371 | "needs_background": "light" 372 | }, 373 | "output_type": "display_data" 374 | } 375 | ], 376 | "source": [ 377 | "dynamic_model = Dynamic_Model()\n", 378 | "trainer = pl.Trainer(gpus=1, weights_summary=None, ) \n", 379 | "lr_finder = trainer.lr_find(dynamic_model)\n", 380 | "\n", 381 | "\n", 382 | "fig = lr_finder.plot(suggest=True); \n", 383 | "\n", 384 | "fig.show()\n", 385 | "lr_finder.suggestion()" 386 | ] 387 | }, 388 | { 389 | "cell_type": "code", 390 | "execution_count": 6, 391 | "metadata": {}, 392 | "outputs": [ 393 | { 394 | "name": "stdout", 395 | "output_type": "stream", 396 | "text": [ 397 | "img_size (720, 960)\n", 398 | " sn layer c w h size\n", 399 | "2 2 ReLU 64 360 480 (1, 64, 360, 480)\n", 400 | "4 4 Sequential 64 180 240 (1, 64, 180, 240)\n", 401 | "5 5 Sequential 128 90 120 (1, 128, 90, 120)\n", 402 | "6 6 Sequential 256 45 60 (1, 256, 45, 60)\n", 403 | "7 7 Sequential 512 23 30 (1, 512, 23, 30)\n" 404 | ] 405 | }, 406 | { 407 | "name": "stderr", 408 | "output_type": "stream", 409 | "text": [ 410 | "GPU available: True, used: True\n", 411 | "2020-06-14 22:54:43,058 distrib_data_parallel.py[251] INFO GPU available: True, used: True\n", 412 | "No environment variable for node rank defined. Set as 0.\n", 413 | "2020-06-14 22:54:43,060 distrib_data_parallel.py[297] WARNING No environment variable for node rank defined. Set as 0.\n", 414 | "CUDA_VISIBLE_DEVICES: [0]\n", 415 | "2020-06-14 22:54:43,060 distrib_data_parallel.py[323] INFO CUDA_VISIBLE_DEVICES: [0]\n" 416 | ] 417 | }, 418 | { 419 | "name": "stdout", 420 | "output_type": "stream", 421 | "text": [ 422 | "schedule \n", 423 | "False 600\n", 424 | "True 101\n", 425 | "Name: valid, dtype: int64\n" 426 | ] 427 | }, 428 | { 429 | "data": { 430 | "application/vnd.jupyter.widget-view+json": { 431 | "model_id": "0f1cc9981b8d4ad2b3689eef199c8f6a", 432 | "version_major": 2, 433 | "version_minor": 0 434 | }, 435 | "text/plain": [ 436 | "HBox(children=(FloatProgress(value=1.0, bar_style='info', description='Training', layout=Layout(flex='2'), max…" 437 | ] 438 | }, 439 | "metadata": {}, 440 | "output_type": "display_data" 441 | }, 442 | { 443 | "data": { 444 | "application/vnd.jupyter.widget-view+json": { 445 | "model_id": "", 446 | "version_major": 2, 447 | "version_minor": 0 448 | }, 449 | "text/plain": [ 450 | "HBox(children=(FloatProgress(value=1.0, bar_style='info', description='Validating', layout=Layout(flex='2'), m…" 451 | ] 452 | }, 453 | "metadata": {}, 454 | "output_type": "display_data" 455 | }, 456 | { 457 | "name": "stdout", 458 | "output_type": "stream", 459 | "text": [ 460 | "0 {'val_loss': tensor(0.8661, device='cuda:0')}\n" 461 | ] 462 | }, 463 | { 464 | "data": { 465 | "application/vnd.jupyter.widget-view+json": { 466 | "model_id": "", 467 | "version_major": 2, 468 | "version_minor": 0 469 | }, 470 | "text/plain": [ 471 | "HBox(children=(FloatProgress(value=1.0, bar_style='info', description='Validating', layout=Layout(flex='2'), m…" 472 | ] 473 | }, 474 | "metadata": {}, 475 | "output_type": "display_data" 476 | }, 477 | { 478 | "name": "stdout", 479 | "output_type": "stream", 480 | "text": [ 481 | "1 {'val_loss': tensor(0.5682, device='cuda:0')}\n" 482 | ] 483 | }, 484 | { 485 | "data": { 486 | "application/vnd.jupyter.widget-view+json": { 487 | "model_id": "", 488 | "version_major": 2, 489 | "version_minor": 0 490 | }, 491 | "text/plain": [ 492 | "HBox(children=(FloatProgress(value=1.0, bar_style='info', description='Validating', layout=Layout(flex='2'), m…" 493 | ] 494 | }, 495 | "metadata": {}, 496 | "output_type": "display_data" 497 | }, 498 | { 499 | "name": "stdout", 500 | "output_type": "stream", 501 | "text": [ 502 | "2 {'val_loss': tensor(0.5035, device='cuda:0')}\n" 503 | ] 504 | }, 505 | { 506 | "data": { 507 | "application/vnd.jupyter.widget-view+json": { 508 | "model_id": "", 509 | "version_major": 2, 510 | "version_minor": 0 511 | }, 512 | "text/plain": [ 513 | "HBox(children=(FloatProgress(value=1.0, bar_style='info', description='Validating', layout=Layout(flex='2'), m…" 514 | ] 515 | }, 516 | "metadata": {}, 517 | "output_type": "display_data" 518 | }, 519 | { 520 | "name": "stdout", 521 | "output_type": "stream", 522 | "text": [ 523 | "3 {'val_loss': tensor(0.4900, device='cuda:0')}\n" 524 | ] 525 | }, 526 | { 527 | "data": { 528 | "application/vnd.jupyter.widget-view+json": { 529 | "model_id": "", 530 | "version_major": 2, 531 | "version_minor": 0 532 | }, 533 | "text/plain": [ 534 | "HBox(children=(FloatProgress(value=1.0, bar_style='info', description='Validating', layout=Layout(flex='2'), m…" 535 | ] 536 | }, 537 | "metadata": {}, 538 | "output_type": "display_data" 539 | }, 540 | { 541 | "name": "stdout", 542 | "output_type": "stream", 543 | "text": [ 544 | "4 {'val_loss': tensor(0.3823, device='cuda:0')}\n" 545 | ] 546 | }, 547 | { 548 | "data": { 549 | "application/vnd.jupyter.widget-view+json": { 550 | "model_id": "", 551 | "version_major": 2, 552 | "version_minor": 0 553 | }, 554 | "text/plain": [ 555 | "HBox(children=(FloatProgress(value=1.0, bar_style='info', description='Validating', layout=Layout(flex='2'), m…" 556 | ] 557 | }, 558 | "metadata": {}, 559 | "output_type": "display_data" 560 | }, 561 | { 562 | "name": "stdout", 563 | "output_type": "stream", 564 | "text": [ 565 | "5 {'val_loss': tensor(0.3732, device='cuda:0')}\n" 566 | ] 567 | }, 568 | { 569 | "data": { 570 | "application/vnd.jupyter.widget-view+json": { 571 | "model_id": "", 572 | "version_major": 2, 573 | "version_minor": 0 574 | }, 575 | "text/plain": [ 576 | "HBox(children=(FloatProgress(value=1.0, bar_style='info', description='Validating', layout=Layout(flex='2'), m…" 577 | ] 578 | }, 579 | "metadata": {}, 580 | "output_type": "display_data" 581 | }, 582 | { 583 | "name": "stdout", 584 | "output_type": "stream", 585 | "text": [ 586 | "6 {'val_loss': tensor(0.3496, device='cuda:0')}\n" 587 | ] 588 | }, 589 | { 590 | "data": { 591 | "application/vnd.jupyter.widget-view+json": { 592 | "model_id": "", 593 | "version_major": 2, 594 | "version_minor": 0 595 | }, 596 | "text/plain": [ 597 | "HBox(children=(FloatProgress(value=1.0, bar_style='info', description='Validating', layout=Layout(flex='2'), m…" 598 | ] 599 | }, 600 | "metadata": {}, 601 | "output_type": "display_data" 602 | }, 603 | { 604 | "name": "stdout", 605 | "output_type": "stream", 606 | "text": [ 607 | "7 {'val_loss': tensor(0.2884, device='cuda:0')}\n" 608 | ] 609 | }, 610 | { 611 | "data": { 612 | "application/vnd.jupyter.widget-view+json": { 613 | "model_id": "", 614 | "version_major": 2, 615 | "version_minor": 0 616 | }, 617 | "text/plain": [ 618 | "HBox(children=(FloatProgress(value=1.0, bar_style='info', description='Validating', layout=Layout(flex='2'), m…" 619 | ] 620 | }, 621 | "metadata": {}, 622 | "output_type": "display_data" 623 | }, 624 | { 625 | "name": "stdout", 626 | "output_type": "stream", 627 | "text": [ 628 | "8 {'val_loss': tensor(0.3093, device='cuda:0')}\n" 629 | ] 630 | }, 631 | { 632 | "data": { 633 | "application/vnd.jupyter.widget-view+json": { 634 | "model_id": "", 635 | "version_major": 2, 636 | "version_minor": 0 637 | }, 638 | "text/plain": [ 639 | "HBox(children=(FloatProgress(value=1.0, bar_style='info', description='Validating', layout=Layout(flex='2'), m…" 640 | ] 641 | }, 642 | "metadata": {}, 643 | "output_type": "display_data" 644 | }, 645 | { 646 | "name": "stdout", 647 | "output_type": "stream", 648 | "text": [ 649 | "9 {'val_loss': tensor(0.2922, device='cuda:0')}\n" 650 | ] 651 | }, 652 | { 653 | "data": { 654 | "application/vnd.jupyter.widget-view+json": { 655 | "model_id": "", 656 | "version_major": 2, 657 | "version_minor": 0 658 | }, 659 | "text/plain": [ 660 | "HBox(children=(FloatProgress(value=1.0, bar_style='info', description='Validating', layout=Layout(flex='2'), m…" 661 | ] 662 | }, 663 | "metadata": {}, 664 | "output_type": "display_data" 665 | }, 666 | { 667 | "name": "stdout", 668 | "output_type": "stream", 669 | "text": [ 670 | "10 {'val_loss': tensor(0.2817, device='cuda:0')}\n" 671 | ] 672 | }, 673 | { 674 | "data": { 675 | "application/vnd.jupyter.widget-view+json": { 676 | "model_id": "", 677 | "version_major": 2, 678 | "version_minor": 0 679 | }, 680 | "text/plain": [ 681 | "HBox(children=(FloatProgress(value=1.0, bar_style='info', description='Validating', layout=Layout(flex='2'), m…" 682 | ] 683 | }, 684 | "metadata": {}, 685 | "output_type": "display_data" 686 | }, 687 | { 688 | "name": "stdout", 689 | "output_type": "stream", 690 | "text": [ 691 | "11 {'val_loss': tensor(0.2926, device='cuda:0')}\n" 692 | ] 693 | }, 694 | { 695 | "data": { 696 | "application/vnd.jupyter.widget-view+json": { 697 | "model_id": "", 698 | "version_major": 2, 699 | "version_minor": 0 700 | }, 701 | "text/plain": [ 702 | "HBox(children=(FloatProgress(value=1.0, bar_style='info', description='Validating', layout=Layout(flex='2'), m…" 703 | ] 704 | }, 705 | "metadata": {}, 706 | "output_type": "display_data" 707 | }, 708 | { 709 | "name": "stdout", 710 | "output_type": "stream", 711 | "text": [ 712 | "12 {'val_loss': tensor(0.3395, device='cuda:0')}\n" 713 | ] 714 | }, 715 | { 716 | "data": { 717 | "application/vnd.jupyter.widget-view+json": { 718 | "model_id": "", 719 | "version_major": 2, 720 | "version_minor": 0 721 | }, 722 | "text/plain": [ 723 | "HBox(children=(FloatProgress(value=1.0, bar_style='info', description='Validating', layout=Layout(flex='2'), m…" 724 | ] 725 | }, 726 | "metadata": {}, 727 | "output_type": "display_data" 728 | }, 729 | { 730 | "name": "stdout", 731 | "output_type": "stream", 732 | "text": [ 733 | "13 {'val_loss': tensor(0.3500, device='cuda:0')}\n" 734 | ] 735 | }, 736 | { 737 | "data": { 738 | "application/vnd.jupyter.widget-view+json": { 739 | "model_id": "", 740 | "version_major": 2, 741 | "version_minor": 0 742 | }, 743 | "text/plain": [ 744 | "HBox(children=(FloatProgress(value=1.0, bar_style='info', description='Validating', layout=Layout(flex='2'), m…" 745 | ] 746 | }, 747 | "metadata": {}, 748 | "output_type": "display_data" 749 | }, 750 | { 751 | "name": "stdout", 752 | "output_type": "stream", 753 | "text": [ 754 | "14 {'val_loss': tensor(0.3520, device='cuda:0')}\n" 755 | ] 756 | }, 757 | { 758 | "data": { 759 | "application/vnd.jupyter.widget-view+json": { 760 | "model_id": "", 761 | "version_major": 2, 762 | "version_minor": 0 763 | }, 764 | "text/plain": [ 765 | "HBox(children=(FloatProgress(value=1.0, bar_style='info', description='Validating', layout=Layout(flex='2'), m…" 766 | ] 767 | }, 768 | "metadata": {}, 769 | "output_type": "display_data" 770 | }, 771 | { 772 | "name": "stdout", 773 | "output_type": "stream", 774 | "text": [ 775 | "15 {'val_loss': tensor(0.3131, device='cuda:0')}\n" 776 | ] 777 | }, 778 | { 779 | "data": { 780 | "application/vnd.jupyter.widget-view+json": { 781 | "model_id": "", 782 | "version_major": 2, 783 | "version_minor": 0 784 | }, 785 | "text/plain": [ 786 | "HBox(children=(FloatProgress(value=1.0, bar_style='info', description='Validating', layout=Layout(flex='2'), m…" 787 | ] 788 | }, 789 | "metadata": {}, 790 | "output_type": "display_data" 791 | }, 792 | { 793 | "name": "stdout", 794 | "output_type": "stream", 795 | "text": [ 796 | "16 {'val_loss': tensor(0.3478, device='cuda:0')}\n" 797 | ] 798 | }, 799 | { 800 | "data": { 801 | "application/vnd.jupyter.widget-view+json": { 802 | "model_id": "", 803 | "version_major": 2, 804 | "version_minor": 0 805 | }, 806 | "text/plain": [ 807 | "HBox(children=(FloatProgress(value=1.0, bar_style='info', description='Validating', layout=Layout(flex='2'), m…" 808 | ] 809 | }, 810 | "metadata": {}, 811 | "output_type": "display_data" 812 | }, 813 | { 814 | "name": "stdout", 815 | "output_type": "stream", 816 | "text": [ 817 | "17 {'val_loss': tensor(0.3302, device='cuda:0')}\n" 818 | ] 819 | }, 820 | { 821 | "data": { 822 | "application/vnd.jupyter.widget-view+json": { 823 | "model_id": "", 824 | "version_major": 2, 825 | "version_minor": 0 826 | }, 827 | "text/plain": [ 828 | "HBox(children=(FloatProgress(value=1.0, bar_style='info', description='Validating', layout=Layout(flex='2'), m…" 829 | ] 830 | }, 831 | "metadata": {}, 832 | "output_type": "display_data" 833 | }, 834 | { 835 | "name": "stdout", 836 | "output_type": "stream", 837 | "text": [ 838 | "18 {'val_loss': tensor(0.3950, device='cuda:0')}\n" 839 | ] 840 | }, 841 | { 842 | "data": { 843 | "application/vnd.jupyter.widget-view+json": { 844 | "model_id": "", 845 | "version_major": 2, 846 | "version_minor": 0 847 | }, 848 | "text/plain": [ 849 | "HBox(children=(FloatProgress(value=1.0, bar_style='info', description='Validating', layout=Layout(flex='2'), m…" 850 | ] 851 | }, 852 | "metadata": {}, 853 | "output_type": "display_data" 854 | }, 855 | { 856 | "name": "stdout", 857 | "output_type": "stream", 858 | "text": [ 859 | "19 {'val_loss': tensor(0.3765, device='cuda:0')}\n", 860 | "\n" 861 | ] 862 | }, 863 | { 864 | "data": { 865 | "text/plain": [ 866 | "1" 867 | ] 868 | }, 869 | "execution_count": 6, 870 | "metadata": {}, 871 | "output_type": "execute_result" 872 | } 873 | ], 874 | "source": [ 875 | "\n", 876 | "dynamic_model = Dynamic_Model()\n", 877 | "\n", 878 | "# most basic trainer, uses good defaults (1 gpu)\n", 879 | "trainer = pl.Trainer( gpus=1,\n", 880 | " max_epochs=20,\n", 881 | " num_sanity_val_steps=0,\n", 882 | " #show_progress_bar=False,\n", 883 | " #progress_bar_refresh_rate=0,\n", 884 | " weights_summary=None, \n", 885 | " ) \n", 886 | "\n", 887 | "trainer.fit(dynamic_model) \n" 888 | ] 889 | }, 890 | { 891 | "cell_type": "code", 892 | "execution_count": null, 893 | "metadata": {}, 894 | "outputs": [], 895 | "source": [] 896 | } 897 | ], 898 | "metadata": { 899 | "kernelspec": { 900 | "display_name": "Python 3", 901 | "language": "python", 902 | "name": "python3" 903 | }, 904 | "language_info": { 905 | "codemirror_mode": { 906 | "name": "ipython", 907 | "version": 3 908 | }, 909 | "file_extension": ".py", 910 | "mimetype": "text/x-python", 911 | "name": "python", 912 | "nbconvert_exporter": "python", 913 | "pygments_lexer": "ipython3", 914 | "version": "3.7.4" 915 | } 916 | }, 917 | "nbformat": 4, 918 | "nbformat_minor": 2 919 | } 920 | -------------------------------------------------------------------------------- /readme.md: -------------------------------------------------------------------------------- 1 | [English version](https://github.com/Flyfoxs/dynamic_unet/blob/master/readme_en.md) 2 | 3 | # 将 Unet与预训练网络结合, 在多项器官分割中取得优异成绩 4 | - 测试过的预训练网络包括(如有其他需求可提交Issue) 5 | - VGG 6 | - Resnet 7 | - Densenet 8 | - Efficientnet 9 | 10 | - 核心改动已经提交到 [fastai](https://github.com/fastai/fastai) 11 | 12 | 13 | # Install 14 | ```shell script 15 | pip install git+https://github.com/Flyfoxs/dynamic_unet@master 16 | ``` 17 | 18 | 19 | # EfficientNet 20 | ```python 21 | encoder = efficient_unet(0) 22 | unet = DynamicUnet(encoder, n_classes=5, img_size=(224, 224), blur=False, blur_final=False, 23 | self_attention=False, y_range=None, norm_type=NormType, 24 | last_cross=True, 25 | bottle=False) 26 | 27 | print(unet(torch.rand(1,3,224,224)).shape) 28 | ``` 29 | 30 | # Densenet 31 | ```python 32 | encoder = nn.Sequential(*list(models.densenet121().children())[0]) 33 | unet = DynamicUnet(encoder, n_classes=5, img_size=(224, 224), blur=False, blur_final=False, 34 | self_attention=False, y_range=None, norm_type=NormType, 35 | last_cross=True, 36 | bottle=False) 37 | print(unet(torch.rand(1,3,224,224)).shape) 38 | ``` 39 | 40 | # Resnet 41 | ```python 42 | encoder = nn.Sequential(*list(models.resnet34().children())[:-3]) 43 | 44 | unet = DynamicUnet(encoder, n_classes=5, img_size=(224, 224), blur=False, blur_final=False, 45 | self_attention=False, y_range=None, norm_type=NormType, 46 | last_cross=True, 47 | bottle=False) 48 | print(unet(torch.rand(1,3,224,224)).shape) 49 | ``` 50 | 51 | 更多其他网络参考: [notebook](https://github.com/Flyfoxs/dynamic_unet/blob/master/notebook/different_network.ipynb) 52 | -------------------------------------------------------------------------------- /readme_en.md: -------------------------------------------------------------------------------- 1 | [Chinese version](https://github.com/Flyfoxs/dynamic_unet/blob/master/readme.md) 2 | # Take pretrained model as encoder of UNET, get better score in many organ segmentation task 3 | - Already test on these models 4 | - VGG 5 | - Resnet 6 | - Densenet 7 | - Efficientnet 8 | 9 | 10 | # Install 11 | ```shell script 12 | pip install git+https://github.com/Flyfoxs/dynamic_unet@master 13 | ``` 14 | 15 | - The core code submit to fastai [fastai](https://github.com/fastai/fastai) 16 | 17 | # EfficientNet 18 | ```python 19 | encoder = efficient_unet(0) 20 | unet = DynamicUnet(encoder, n_classes=5, img_size=(224, 224), blur=False, blur_final=False, 21 | self_attention=False, y_range=None, norm_type=NormType, 22 | last_cross=True, 23 | bottle=False) 24 | 25 | print(unet(torch.rand(1,3,224,224)).shape) 26 | ``` 27 | 28 | # Densenet 29 | ```python 30 | encoder = nn.Sequential(*list(models.densenet121().children())[0]) 31 | unet = DynamicUnet(encoder, n_classes=5, img_size=(224, 224), blur=False, blur_final=False, 32 | self_attention=False, y_range=None, norm_type=NormType, 33 | last_cross=True, 34 | bottle=False) 35 | print(unet(torch.rand(1,3,224,224)).shape) 36 | ``` 37 | 38 | # Resnet 39 | ```python 40 | encoder = nn.Sequential(*list(models.resnet34().children())[:-3]) 41 | 42 | unet = DynamicUnet(encoder, n_classes=5, img_size=(224, 224), blur=False, blur_final=False, 43 | self_attention=False, y_range=None, norm_type=NormType, 44 | last_cross=True, 45 | bottle=False) 46 | print(unet(torch.rand(1,3,224,224)).shape) 47 | ``` 48 | 49 | You can get more network example: [notebook](https://github.com/Flyfoxs/dynamic_unet/blob/master/notebook/different_network.ipynb) 50 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | efficientnet_pytorch 2 | filecache 3 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | from setuptools import setup 3 | import setuptools 4 | 5 | try: 6 | with open('README.md') as file: 7 | long_description = file.read() 8 | except Exception as e: 9 | print('Readme read failed') 10 | 11 | setup( 12 | name='dynamic_unet', 13 | version='0.1.2', 14 | description='dynamic_unet', 15 | long_description='dynamic_unet: https://github.com/Flyfoxs/dynamic_unet', 16 | url='https://github.com/Flyfoxs/dynamic_unet', 17 | author='Felix Li', 18 | author_email='lilao@163.com', 19 | license='MIT', 20 | classifiers=[ 21 | "Programming Language :: Python :: 3", 22 | "License :: OSI Approved :: MIT License", 23 | "Operating System :: OS Independent", 24 | ], 25 | 26 | install_requires=[ 27 | "termcolor>=1.1", 28 | "Pillow==6.2.2", # torchvision currently does not work with Pillow 7 29 | "yacs>=0.1.6", 30 | "tabulate", 31 | "easydict", 32 | "nibabel", 33 | "pydicom", 34 | "cloudpickle", 35 | "matplotlib", 36 | "tqdm>4.29.0", 37 | "tensorboard", 38 | "fvcore", 39 | "future", # used by caffe2 40 | "pydot", # used to save caffe2 SVGs 41 | "SimpleITK", 42 | "plotly", 43 | ], 44 | keywords='unet fastai', 45 | packages=setuptools.find_packages(), 46 | ) --------------------------------------------------------------------------------