├── .link ├── Readme.md ├── .tools │ ├── __init__.py │ ├── KerasTF_Import.py │ ├── update.sh │ ├── npy_exporter.py │ ├── pth_exporter.py │ ├── npz_exporter.py │ ├── Caffe_exporter.py │ ├── gluoncv_zoo.py │ ├── ckpt_exporter.py │ ├── TorchONNX_Import.py │ ├── pkl_exporter.py │ └── Caffe2_exporter.py └── .experimental │ ├── .eslintrc │ └── exporter.py ├── ImageEnhancement ├── Readme.md ├── Mixed │ ├── Readme.md │ ├── MoePhoto on CustomAnime │ │ ├── Readme.md │ │ └── exporter.py │ └── CBDNet on CommonNoise │ │ ├── README.md │ │ ├── CBDNet trained on CommonNoise.m │ │ └── CBDNet-JPEG trained on CommonNoise.m ├── AntiJPEG │ └── Readme.md ├── Debluring │ ├── Readme.md │ ├── DeblurGAN on GOPRO │ │ ├── README.md │ │ └── DeblurGAN trained on GOPRO.m │ ├── SHC Text Deblur on Custom │ │ ├── README.md │ │ └── SHC Text Deblur trained on Custom.m │ └── SRN Deblur on GOPRO │ │ ├── README.md │ │ ├── exporter.py │ │ └── SRN Deblur trained on GOPRO.m ├── Dehazing │ └── Readme.md ├── Denoising │ └── Readme.md └── SuperResolution │ ├── Readme.md │ ├── SRCNN on Set14 │ ├── Readme.md │ ├── 1-download.m │ └── 2-convert.m │ ├── SRGAN trained on VOC │ ├── README.md │ ├── SRGAN2x trained on VOC.m │ ├── SRGAN4x trained on VOC.m │ └── SRGAN8x trained on VOC.m │ ├── EDSR on DIV2K │ ├── Readme.md │ ├── EDSR2x trained on DIV2K.m │ ├── EDSR3x trained on DIV2K.m │ └── EDSR4x trained on DIV2K.m │ ├── SRResNet on CommonSR │ ├── README.md │ ├── SRResNet2x trained on CommonSR.m │ ├── SRResNet3x trained on CommonSR.m │ ├── SRResNet4x trained on CommonSR.m │ └── SRResNet8x trained on CommonSR.m │ ├── PESR on DIV2K │ ├── Readme.md │ └── PESR4x trained on DIV2K.m │ ├── VDSR on SR291 │ ├── 1-download.m │ ├── Readme.md │ └── 2-convert.m │ ├── ProSR trained on Flickr2K │ └── README.md │ └── D-DBPN on NTIRE2018 │ └── D-DBPN8x trained on NTIRE2018.m ├── ImageGeneration ├── Inpainting │ ├── Readme.md │ ├── EdgeConnect on CelebA │ │ ├── README.md │ │ ├── exporter.py │ │ └── debugger.py │ └── PCU on CustomAnime │ │ └── Readme.md ├── Human │ ├── Readme.md │ └── PGGAN on CelebA │ │ ├── Readme.md │ │ └── 2-exporter.py ├── Anime │ ├── PGGAN-128 trained on Anime │ │ ├── 1-download.md │ │ ├── PixelNorm.m │ │ ├── Readme.md │ │ └── 2-exporter.py │ ├── PGGAN-256 trained on HoloFaces │ │ ├── 1-download.md │ │ ├── PixelNorm.m │ │ ├── Readme.md │ │ └── 2-exporter.py │ ├── CartoonGANs trained on Multi-Style │ │ ├── 2-forward.m │ │ ├── Readme.md │ │ ├── 1-download.m │ │ ├── CartoonGAN trained on Hosoda Style.m │ │ ├── CartoonGAN trained on Paprika Style.m │ │ ├── CartoonGAN trained on Shinkai Style.m │ │ └── CartoonGAN trained on Hayao Style.m │ └── ComixGANs Photo2Comic Style Transfer │ │ ├── Readme.md │ │ ├── 1-download.m │ │ ├── ComixGAN Comic Style Transfer Alpha.m │ │ ├── ComixGAN Comic Style Transfer Beta.m │ │ ├── ComixGAN Comic Style Transfer Delta.m │ │ └── ComixGAN Comic Style Transfer Gamma.m └── Readme.md ├── ImageRecognition ├── Detection │ ├── Readme.md │ ├── SSD on VOC │ │ └── exporter.py │ ├── FCN on ADE │ │ └── exporter.py │ ├── FCN on VOC │ │ └── exporter.py │ └── YOLO3 on COCO │ │ └── coco_yolo3_darknet53.m ├── Classifation │ ├── Readme.md │ ├── RAN on CIFAR10 │ │ ├── Readme.md │ │ └── exporter.py │ ├── ResNet on ImageNet │ │ ├── Readme.md │ │ ├── exporter.py │ │ ├── Resnet18-V1b trained on ImageNet.m │ │ └── Resnet34-V1b trained on ImageNet.m │ ├── ResNet on CIFAR10 │ │ ├── Readme.md │ │ ├── exporter.py │ │ ├── ResnetV2-110 tested on CIFAR10 TestSet.mt │ │ ├── ResnetV2-20 tested on CIFAR10 TestSet.mt │ │ ├── ResnetV2-56 tested on CIFAR10 TestSet.mt │ │ ├── ResnetV2-20 tested on CIFAR10 TestSet.md │ │ ├── ResnetV2-56 tested on CIFAR10 TestSet.md │ │ └── ResnetV2-110 tested on CIFAR10 TestSet.md │ ├── DPN on ImageNet │ │ └── Readme.md │ ├── DenseNet on ImageNet │ │ ├── Readme.md │ │ ├── export.py │ │ ├── DenseNet161 trained on ImageNet.m │ │ ├── DenseNet169 trained on ImageNet.m │ │ └── DenseNet201 trained on ImageNet.m │ ├── PolyNet on ImageNet │ │ ├── Readme.md │ │ └── exporter.py │ ├── Inception on ImageNet21K │ │ └── Readme.md │ ├── ResNet on ImageNet11K │ │ └── Readme.md │ ├── SENet on ImageNet │ │ └── export.py │ ├── AlexNet on ImageNet │ │ ├── export.py │ │ ├── Readme.md │ │ ├── debugger.py │ │ └── AlexNet trained on ImageNet.m │ ├── SqueezeNet on ImageNet │ │ ├── exporter.py │ │ ├── Readme.md │ │ ├── SqueezeNet1.0 trained on ImageNet.m │ │ └── SqueezeNet1.1 trained on ImageNet.m │ ├── ResNeXt on CIFAR10 │ │ └── export.py │ ├── Wide-ResNet on CIFAR10 │ │ ├── export.py │ │ ├── WRN16-10 tested on CIFAR10 TestSet.mt │ │ ├── WRN28-10 tested on CIFAR10 TestSet.mt │ │ ├── WRN40-8 tested on CIFAR10 TestSet.mt │ │ ├── WRN28-10 tested on CIFAR10 TestSet.md │ │ ├── WRN16-10 tested on CIFAR10 TestSet.md │ │ └── WRN40-8 tested on CIFAR10 TestSet.md │ ├── ResNeXt on ImageNet │ │ └── export.py │ ├── VGG on ImageNet │ │ ├── export.py │ │ ├── imagenet_vgg13.m │ │ ├── imagenet_vgg16.m │ │ ├── imagenet_vgg19.m │ │ └── imagenet_vgg11.m │ ├── VGG-BN on ImageNet │ │ ├── export.py │ │ ├── imagenet_vgg11_bn.m │ │ ├── imagenet_vgg13_bn.m │ │ ├── imagenet_vgg16_bn.m │ │ └── imagenet_vgg19_bn.m │ ├── LeNet on MNIST │ │ ├── Readme.md │ │ ├── LeNet trained on MNIST.m │ │ ├── LeNet trained on MNIST.mt │ │ └── LeNet tested on MNIST TestSet.md │ └── Xception on ImageNet │ │ └── exporter.py ├── Anime │ ├── Illustration2Vec Alternative on Safebooru │ │ └── Readme.md │ ├── Illustration2Vec Sketch on Custom │ │ └── Readme.md │ ├── Illustration2Vec on Danbooru │ │ └── Readme.md │ ├── VGGs trained on Danbooru2017 │ │ ├── Readme.md │ │ ├── exporter.py │ │ ├── Illustration2Vec trained on Danbooru.m │ │ └── Illustration2Vec Mega trained on Danbooru.m │ └── ResNets trained on Danbooru2018 │ │ ├── 2-export.py │ │ ├── Readme.md │ │ ├── 1-download.m │ │ └── ResNet-20 trained on Danbooru2018.m └── HumanFace │ └── InsightFace Gender&Age on Custom │ └── Readme.md ├── .gitignore └── Readme.md /.link/Readme.md: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.link/.tools/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /ImageEnhancement/Readme.md: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /ImageEnhancement/Mixed/Readme.md: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /ImageEnhancement/AntiJPEG/Readme.md: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /ImageEnhancement/Debluring/Readme.md: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /ImageEnhancement/Dehazing/Readme.md: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /ImageEnhancement/Denoising/Readme.md: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /ImageGeneration/Inpainting/Readme.md: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /ImageRecognition/Detection/Readme.md: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /ImageEnhancement/SuperResolution/Readme.md: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /ImageRecognition/Classifation/Readme.md: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /ImageEnhancement/SuperResolution/SRCNN on Set14/Readme.md: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /ImageGeneration/Inpainting/EdgeConnect on CelebA/README.md: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /ImageRecognition/Classifation/RAN on CIFAR10/Readme.md: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /ImageRecognition/Classifation/ResNet on ImageNet/Readme.md: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.link/.tools/KerasTF_Import.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # coding=utf-8 3 | -------------------------------------------------------------------------------- /ImageRecognition/Classifation/ResNet on CIFAR10/Readme.md: -------------------------------------------------------------------------------- 1 | TODO: Same Test 2 | -------------------------------------------------------------------------------- /ImageRecognition/Classifation/DPN on ImageNet/Readme.md: -------------------------------------------------------------------------------- 1 | https://github.com/cypw/DPNs -------------------------------------------------------------------------------- /ImageRecognition/Classifation/DenseNet on ImageNet/Readme.md: -------------------------------------------------------------------------------- 1 | TODO: Same Tests 2 | -------------------------------------------------------------------------------- /ImageGeneration/Human/Readme.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | ## Datasets 5 | 6 | 7 | ## Subtask 8 | 9 | -------------------------------------------------------------------------------- /ImageRecognition/Classifation/PolyNet on ImageNet/Readme.md: -------------------------------------------------------------------------------- 1 | https://github.com/open-mmlab/polynet -------------------------------------------------------------------------------- /ImageEnhancement/Mixed/MoePhoto on CustomAnime/Readme.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | add `__init__.py` in `./python` -------------------------------------------------------------------------------- /ImageEnhancement/Mixed/CBDNet on CommonNoise/README.md: -------------------------------------------------------------------------------- 1 | https://github.com/WolframRhodium/Super-Resolution-Zoo/tree/master/CBDNet -------------------------------------------------------------------------------- /.link/.tools/update.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | pip install gluoncv --pre --upgrade 3 | echo " " 4 | pip show gluoncv 5 | echo " " 6 | sleep 60 7 | -------------------------------------------------------------------------------- /ImageRecognition/Anime/Illustration2Vec Alternative on Safebooru/Readme.md: -------------------------------------------------------------------------------- 1 | ## Remark 2 | 3 | ### TODO 4 | 5 | 6 | 7 | ## Source 8 | 9 | - **Cite:** https://github.com/GINK03/alt-i2v 10 | - **Download:** not found -------------------------------------------------------------------------------- /ImageRecognition/Classifation/Inception on ImageNet21K/Readme.md: -------------------------------------------------------------------------------- 1 | ### Remark 2 | 3 | ### Source 4 | 5 | https://github.com/dmlc/mxnet-model-gallery/blob/master/imagenet-21k-inception.md 6 | 7 | ### Files 8 | 9 | - Inception-0009.params 10 | - synset.txt -------------------------------------------------------------------------------- /ImageGeneration/Inpainting/PCU on CustomAnime/Readme.md: -------------------------------------------------------------------------------- 1 | ## Remark 2 | 3 | The behavior of partial convolutions is more than expected, the implementation of the keras version is very strange. 4 | 5 | I can't understand what happened, I need to do a step-by-step comparison. -------------------------------------------------------------------------------- /ImageRecognition/Anime/Illustration2Vec Sketch on Custom/Readme.md: -------------------------------------------------------------------------------- 1 | ## Remark 2 | 3 | ### TODO 4 | 5 | unable to build SE-Net 6 | 7 | ## Source 8 | 9 | - **Cite:** https://github.com/MerHS/sketch-i2v 10 | - **Download:** https://github.com/MerHS/sketch-i2v/releases/tag/1.0 -------------------------------------------------------------------------------- /ImageGeneration/Anime/PGGAN-128 trained on Anime/1-download.md: -------------------------------------------------------------------------------- 1 | 1. `git clone https://github.com/tkarras/progressive_growing_of_gans.git` 2 | 2. https://mega.nz/#!ZRUDjQiS!yMMBkq1CH7ohkU2kmL8a-jc-xJZCyKbkz_oAsE5hobw 3 | 3. find `network-snapshot-057891.pkl` and download 4 | 4. copy and run `2-exporter.py` as next step -------------------------------------------------------------------------------- /ImageGeneration/Anime/PGGAN-256 trained on HoloFaces/1-download.md: -------------------------------------------------------------------------------- 1 | 1. `git clone https://github.com/tkarras/progressive_growing_of_gans.git` 2 | 2. https://www.dropbox.com/s/yfv9ahlwlquj06z 3 | 3. find `2018-10-05-gwern-holofaces-progan-model2053-006353.pkl` and download 4 | 4. copy and run `2-exporter.py` as next step 5 | -------------------------------------------------------------------------------- /ImageEnhancement/Debluring/DeblurGAN on GOPRO/README.md: -------------------------------------------------------------------------------- 1 | ### Remark 2 | 3 | I double check my codes. I did nothing wrong. 4 | 5 | But the model looks not work. 6 | 7 | Waiting for update.(2018.11.8) 8 | 9 | ### Source 10 | https://github.com/RaphaelMeudec/deblur-gan 11 | 12 | ### Files 13 | 14 | - generator.h5 15 | 16 | -------------------------------------------------------------------------------- /ImageRecognition/HumanFace/InsightFace Gender&Age on Custom/Readme.md: -------------------------------------------------------------------------------- 1 | ### Remark 2 | 3 | > @GalAster: 4 | > **TODO:** Use ArgMaxLayer for ResNet50-Age trained on AsianFace 5 | > **FIX:** some bug on Gender 6 | > **Warning:** Untested 7 | 8 | ### Source 9 | 10 | https://github.com/deepinsight/insightface/wiki/Model-Zoo#41-genderage 11 | -------------------------------------------------------------------------------- /.link/.experimental/.eslintrc: -------------------------------------------------------------------------------- 1 | { 2 | "plugins": ["json"], 3 | "rules": { 4 | "indent": ["error", 4], 5 | "brace-style": "error", 6 | "array-bracket-newline": [ 7 | "error", { 8 | "multiline": true, 9 | "minItems": 2 10 | } 11 | ] 12 | 13 | } 14 | } -------------------------------------------------------------------------------- /ImageGeneration/Readme.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | ### Attention 6 | 7 | 8 | 9 | w x + b 10 | 11 | 12 | 13 | 14 | ### 微调技巧: 15 | 16 | #### 消除 Tanh 17 | 18 | ```mathematica 19 | (Tanh[x] + 1)/2 == LogisticSigmoid[2x] 20 | ``` 21 | 22 | #### 消除系数 23 | 24 | 这里有个 $2$ 还是很难看, 如果最后一层是卷积的话可以合并掉这个 25 | 26 | $k (w x + b) + s = k w + (k b + s)$ 27 | -------------------------------------------------------------------------------- /ImageRecognition/Anime/Illustration2Vec on Danbooru/Readme.md: -------------------------------------------------------------------------------- 1 | ## Remark 2 | 3 | ### TODO 4 | 5 | - trans to net app 6 | - threshold=0.25 7 | - softmax on rating 8 | 9 | ## Source 10 | 11 | - **Cite:** https://github.com/rezoo/illustration2vec 12 | - **Download:** https://github.com/rezoo/illustration2vec/releases/tag/v2.0.0 13 | - No plugin, No git -------------------------------------------------------------------------------- /ImageRecognition/Classifation/ResNet on ImageNet11K/Readme.md: -------------------------------------------------------------------------------- 1 | ### Remark 2 | 3 | Replace the DataBatchNormLayer with the Encoder, because I can't find the shifts of the model. 4 | 5 | Seems the model can still work. 6 | 7 | ### Source 8 | 9 | http://data.dmlc.ml/mxnet/models/imagenet-11k/ 10 | 11 | ### Files 12 | 13 | - resnet-152-0000.params 14 | - synset.txt -------------------------------------------------------------------------------- /ImageRecognition/Classifation/SENet on ImageNet/export.py: -------------------------------------------------------------------------------- 1 | import gluoncv.model_zoo as gz 2 | from gluoncv.utils import export_block 3 | 4 | 5 | def zoo_import(name, head=''): 6 | """Download from Gluoncv Zoo""" 7 | net = gz.get_model(name, pretrained=True) 8 | export_block(head + name, net, preprocess=True) 9 | 10 | 11 | zoo_import('senet_154') 12 | -------------------------------------------------------------------------------- /ImageRecognition/Detection/SSD on VOC/exporter.py: -------------------------------------------------------------------------------- 1 | import gluoncv.model_zoo as gz 2 | from gluoncv.utils import export_block 3 | 4 | 5 | def zoo_import(name, head=''): 6 | """Download from Gluoncv Zoo""" 7 | net = gz.get_model(name, pretrained=True) 8 | export_block(head + name, net, preprocess=True) 9 | 10 | 11 | zoo_import('ssd_512_vgg16_atrous_voc') 12 | -------------------------------------------------------------------------------- /ImageEnhancement/Debluring/SHC Text Deblur on Custom/README.md: -------------------------------------------------------------------------------- 1 | ### Remark 2 | 3 | I didn't do the test because I don't know how to generate the test set. 4 | 5 | I can't afford matlab now. 6 | 7 | ### Source 8 | https://github.com/meijianhan/DeepDeblur/blob/master/ModelSave/DeblurSHC19ConvLayers.hdf5 9 | 10 | ### Files 11 | 12 | - DeblurSHC19ConvLayers.hdf5 13 | 14 | -------------------------------------------------------------------------------- /ImageRecognition/Classifation/AlexNet on ImageNet/export.py: -------------------------------------------------------------------------------- 1 | import gluoncv.model_zoo as gz 2 | from gluoncv.utils import export_block 3 | 4 | 5 | def zoo_import(name, head=''): 6 | """Download from Gluoncv Zoo""" 7 | net = gz.get_model(name, pretrained=True) 8 | export_block(head + name, net, preprocess=True) 9 | 10 | 11 | zoo_import('alexnet', 'imagenet_') 12 | -------------------------------------------------------------------------------- /.link/.tools/npy_exporter.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # coding=utf-8 3 | import numpy as npy 4 | import wolframclient.serializers as wxf 5 | 6 | 7 | def npy2wxf(path): 8 | data = npy.load(path) 9 | wxf.export(data, path + '.wxf', target_format='wxf') 10 | 11 | 12 | '''test 13 | npy.save('4d_array.npy', random.rand(1, 3, 32, 32)) 14 | npy2wxf('4d_array.npy') 15 | ''' 16 | -------------------------------------------------------------------------------- /ImageRecognition/Detection/FCN on ADE/exporter.py: -------------------------------------------------------------------------------- 1 | import gluoncv.model_zoo as gz 2 | from gluoncv.utils import export_block 3 | 4 | 5 | def zoo_import(name, head=''): 6 | """Download from Gluoncv Zoo""" 7 | net = gz.get_model(name, pretrained=True) 8 | export_block(head + name, net, preprocess=True) 9 | 10 | 11 | zoo_import('fcn_resnet50_ade') 12 | zoo_import('fcn_resnet101_ade') 13 | -------------------------------------------------------------------------------- /ImageRecognition/Detection/FCN on VOC/exporter.py: -------------------------------------------------------------------------------- 1 | import gluoncv.model_zoo as gz 2 | from gluoncv.utils import export_block 3 | 4 | 5 | def zoo_import(name, head=''): 6 | """Download from Gluoncv Zoo""" 7 | net = gz.get_model(name, pretrained=True) 8 | export_block(head + name, net, preprocess=True) 9 | 10 | 11 | zoo_import('fcn_resnet50_voc') 12 | zoo_import('fcn_resnet101_voc') 13 | -------------------------------------------------------------------------------- /ImageRecognition/Classifation/SqueezeNet on ImageNet/exporter.py: -------------------------------------------------------------------------------- 1 | import gluoncv.model_zoo as gz 2 | from gluoncv.utils import export_block 3 | 4 | 5 | def zoo_import(name, head=''): 6 | """Download from Gluoncv Zoo""" 7 | net = gz.get_model(name, pretrained=True) 8 | export_block(head + name, net, preprocess=True) 9 | 10 | 11 | zoo_import('squeezenet1.0') 12 | zoo_import('squeezenet1.1') -------------------------------------------------------------------------------- /.link/.experimental/exporter.py: -------------------------------------------------------------------------------- 1 | import gluoncv.model_zoo as gz 2 | from gluoncv.utils import export_block 3 | 4 | 5 | def zoo_import(name, head=''): 6 | """Download from Gluoncv Zoo""" 7 | net = gz.get_model(name, pretrained=True) 8 | export_block(head + name, net, preprocess=True) 9 | 10 | 11 | zoo_import('fcn_resnet101_voc') 12 | zoo_import('psp_resnet101_voc') 13 | zoo_import('deeplab_resnet101_voc') 14 | -------------------------------------------------------------------------------- /.link/.tools/pth_exporter.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # coding=utf-8 3 | import torch 4 | import wolframclient.serializers as wxf 5 | 6 | 7 | def pth2wxf(path): 8 | pth = torch.load(path, map_location=torch.device('cpu')) 9 | npy = {key: value.numpy() for key, value in pth.items()} 10 | wxf.export(npy, path + '.wxf', target_format='wxf') 11 | 12 | 13 | ''' test 14 | pth2wxf('checkpoint.pth') 15 | ''' 16 | -------------------------------------------------------------------------------- /ImageRecognition/Classifation/ResNeXt on CIFAR10/export.py: -------------------------------------------------------------------------------- 1 | import gluoncv.model_zoo as gz 2 | from gluoncv.utils import export_block 3 | 4 | 5 | def zoo_import(name, head=''): 6 | """Download from Gluoncv Zoo""" 7 | net = gz.get_model(name, pretrained=True) 8 | export_block(head + name, net, preprocess=True) 9 | 10 | 11 | # can not download 12 | # zoo_import('cifar_resnext29_32x4d') 13 | zoo_import('cifar_resnext29_16x64d') 14 | -------------------------------------------------------------------------------- /ImageRecognition/Classifation/ResNet on CIFAR10/exporter.py: -------------------------------------------------------------------------------- 1 | import gluoncv.model_zoo as gz 2 | from gluoncv.utils import export_block 3 | 4 | 5 | def zoo_import(name, head=''): 6 | """Download from Gluoncv Zoo""" 7 | net = gz.get_model(name, pretrained=True) 8 | export_block(head + name, net, preprocess=True) 9 | 10 | 11 | zoo_import('cifar_resnet20_v2') 12 | zoo_import('cifar_resnet56_v2') 13 | zoo_import('cifar_resnet110_v2') 14 | -------------------------------------------------------------------------------- /ImageRecognition/Anime/VGGs trained on Danbooru2017/Readme.md: -------------------------------------------------------------------------------- 1 | # ResNets trained on Danbooru2018 2 | 3 | #### Input 4 | 5 | ``` 6 | 7 | ``` 8 | 9 | - No plugin, No git 10 | 11 | #### Output 12 | 13 | ``` 14 | 15 | ``` 16 | 17 | ## Reference 18 | 19 | - Link: [rezoo/illustration2vec](https://github.com/rezoo/illustration2vec) 20 | 21 | ## Remark 22 | 23 | ### TODO 24 | 25 | - trans to net app 26 | - threshold=0.25 27 | - softmax on rating 28 | -------------------------------------------------------------------------------- /ImageRecognition/Classifation/Wide-ResNet on CIFAR10/export.py: -------------------------------------------------------------------------------- 1 | import gluoncv.model_zoo as gz 2 | from gluoncv.utils import export_block 3 | 4 | 5 | def zoo_import(name, head=''): 6 | """Download from Gluoncv Zoo""" 7 | net = gz.get_model(name, pretrained=True) 8 | export_block(head + name, net, preprocess=True) 9 | 10 | 11 | zoo_import('cifar_wideresnet16_10') 12 | zoo_import('cifar_wideresnet28_10') 13 | zoo_import('cifar_wideresnet40_8') 14 | -------------------------------------------------------------------------------- /.link/.tools/npz_exporter.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # coding=utf-8 3 | import numpy as npz 4 | import wolframclient.serializers as wxf 5 | 6 | 7 | def npz2wxf(path): 8 | data = npz.load(path) 9 | wxf.export(data, path + '.wxf', target_format='wxf') 10 | 11 | 12 | a = npz.array([[1, 2, 3], [4, 5, 6]]) 13 | b = npz.arange(0, 1.0, 0.1) 14 | c = npz.sin(b) 15 | npz.savez("result.npz", a, b, sin_arr=c) 16 | r = npz.load("result.npz") 17 | print(r.all()) 18 | -------------------------------------------------------------------------------- /ImageRecognition/Classifation/ResNeXt on ImageNet/export.py: -------------------------------------------------------------------------------- 1 | import gluoncv.model_zoo as gz 2 | from gluoncv.utils import export_block 3 | 4 | 5 | def zoo_import(name, head=''): 6 | """Download from Gluoncv Zoo""" 7 | net = gz.get_model(name, pretrained=True) 8 | export_block(head + name, net, preprocess=True) 9 | 10 | # can not download 11 | # zoo_import('resnext50_32x4d') 12 | # zoo_import('resnext101_32x4d') 13 | # zoo_import('resnext101_64x4d') 14 | -------------------------------------------------------------------------------- /ImageGeneration/Anime/CartoonGANs trained on Multi-Style/2-forward.m: -------------------------------------------------------------------------------- 1 | (* ::Package:: *) 2 | 3 | SetDirectory@NotebookDirectory[]; 4 | mainNet = Import@"CartoonGan trained on Hayao Style.WLNet" 5 | mainNet = Import@"CartoonGan trained on Hosoda Style.WLNet" 6 | 7 | img = ImageResize[ExampleData[{"TestImage", "Mandrill"}], 256] 8 | newNet = NetReplacePart[mainNet, "Input" -> NetEncoder[{"Image", ImageDimensions@img}]]; 9 | 10 | 11 | newNet[img, TargetDevice -> "GPU"] 12 | -------------------------------------------------------------------------------- /ImageRecognition/Classifation/RAN on CIFAR10/exporter.py: -------------------------------------------------------------------------------- 1 | import gluoncv.model_zoo as gz 2 | from gluoncv.utils import export_block 3 | 4 | 5 | def zoo_import(name, head=''): 6 | """Download from Gluoncv Zoo""" 7 | net = gz.get_model(name, pretrained=True) 8 | export_block(head + name, net, preprocess=True) 9 | 10 | 11 | zoo_import('cifar_residualattentionnet56') 12 | zoo_import('cifar_residualattentionnet92') 13 | zoo_import('cifar_residualattentionnet452') -------------------------------------------------------------------------------- /ImageRecognition/Classifation/VGG on ImageNet/export.py: -------------------------------------------------------------------------------- 1 | import gluoncv.model_zoo as gz 2 | from gluoncv.utils import export_block 3 | 4 | 5 | def zoo_import(name, head=''): 6 | """Download from Gluoncv Zoo""" 7 | net = gz.get_model(name, pretrained=True) 8 | export_block(head + name, net, preprocess=True) 9 | 10 | 11 | zoo_import('vgg11', 'imagenet_') 12 | zoo_import('vgg13', 'imagenet_') 13 | zoo_import('vgg16', 'imagenet_') 14 | zoo_import('vgg19', 'imagenet_') 15 | -------------------------------------------------------------------------------- /ImageRecognition/Classifation/AlexNet on ImageNet/Readme.md: -------------------------------------------------------------------------------- 1 | ### Remark 2 | 3 | ### Source 4 | 5 | https://github.com/dmlc/gluon-cv 6 | 7 | ### Reference 8 | ```TeX 9 | @ImageClassifation{krizhevsky2012imagenet, 10 | title={Imagenet classification with deep convolutional neural networks}, 11 | author={Krizhevsky, Alex and Sutskever, Ilya and Hinton, Geoffrey E}, 12 | booktitle={Advances in neural information processing systems}, 13 | pages={1097--1105}, 14 | year={2012} 15 | } 16 | ``` -------------------------------------------------------------------------------- /ImageRecognition/Classifation/VGG-BN on ImageNet/export.py: -------------------------------------------------------------------------------- 1 | import gluoncv.model_zoo as gz 2 | from gluoncv.utils import export_block 3 | 4 | 5 | def zoo_import(name, head=''): 6 | """Download from Gluoncv Zoo""" 7 | net = gz.get_model(name, pretrained=True) 8 | export_block(head + name, net, preprocess=True) 9 | 10 | 11 | zoo_import('vgg11_bn', 'imagenet_') 12 | zoo_import('vgg13_bn', 'imagenet_') 13 | zoo_import('vgg16_bn', 'imagenet_') 14 | zoo_import('vgg19_bn', 'imagenet_') 15 | -------------------------------------------------------------------------------- /.link/.tools/Caffe_exporter.py: -------------------------------------------------------------------------------- 1 | import cv2.dnn as opencv 2 | import wolframclient.serializers as wxf 3 | 4 | 5 | def caffe2wxf(path): 6 | model = opencv.readNetFromCaffe( 7 | path + ".prototxt", 8 | path + ".caffemodel" 9 | ) 10 | layers = model.getLayerNames() 11 | npy = {} 12 | for i in layers: 13 | try: 14 | npy[i] = model.getParam(i) 15 | except Exception: 16 | pass 17 | wxf.export(npy, path + '.wxf', target_format='wxf') 18 | -------------------------------------------------------------------------------- /ImageRecognition/Classifation/DenseNet on ImageNet/export.py: -------------------------------------------------------------------------------- 1 | import gluoncv.model_zoo as gz 2 | from gluoncv.utils import export_block 3 | 4 | 5 | def zoo_import(name, head=''): 6 | """Download from Gluoncv Zoo""" 7 | net = gz.get_model(name, pretrained=True) 8 | export_block(head + name, net, preprocess=True) 9 | 10 | 11 | zoo_import('densenet121', 'imagenet_') 12 | zoo_import('densenet161', 'imagenet_') 13 | zoo_import('densenet169', 'imagenet_') 14 | zoo_import('densenet201', 'imagenet_') 15 | -------------------------------------------------------------------------------- /ImageRecognition/Classifation/SqueezeNet on ImageNet/Readme.md: -------------------------------------------------------------------------------- 1 | ### Remark 2 | 3 | ### Source 4 | 5 | https://github.com/dmlc/gluon-cv 6 | 7 | ### Reference 8 | ```TeX 9 | @ImageClassifation{iandola2016squeezenet, 10 | title={Squeezenet: Alexnet-level accuracy with 50x fewer parameters and< 0.5 mb model size}, 11 | author={Iandola, Forrest N and Han, Song and Moskewicz, Matthew W and Ashraf, Khalid and Dally, William J and Keutzer, Kurt}, 12 | journal={arXiv preprint arXiv:1602.07360}, 13 | year={2016} 14 | } 15 | ``` -------------------------------------------------------------------------------- /ImageEnhancement/SuperResolution/SRGAN trained on VOC/README.md: -------------------------------------------------------------------------------- 1 | ### Remark 2 | 3 | TODO: use single prelu 4 | 5 | ### Source 6 | 7 | https://github.com/WolframRhodium/Super-Resolution-Zoo/tree/master/SRGAN/SRGAN%40leftthomas 8 | 9 | --- 10 | 11 | ### Reference 12 | ```TeX 13 | @ImageSuperResolution{isola2017image, 14 | title={Image-to-image translation with conditional adversarial networks}, 15 | author={Isola, Phillip and Zhu, Jun-Yan and Zhou, Tinghui and Efros, Alexei A}, 16 | journal={arXiv preprint}, 17 | year={2017} 18 | } 19 | ``` -------------------------------------------------------------------------------- /ImageRecognition/Anime/ResNets trained on Danbooru2018/2-export.py: -------------------------------------------------------------------------------- 1 | import re 2 | import torch 3 | import wolframclient.serializers as wxf 4 | 5 | 6 | def pth2wxf(path): 7 | pth = torch.load(path, map_location=torch.device('cpu')) 8 | npy = { 9 | key: value.numpy() 10 | for key, value 11 | in pth.items() 12 | if not re.match('.*_tracked$', key) 13 | } 14 | wxf.export(npy, path + '.wxf', target_format='wxf') 15 | 16 | 17 | pth2wxf('resnet18.pth') 18 | pth2wxf('resnet34.pth') 19 | pth2wxf('resnet50.pth') 20 | -------------------------------------------------------------------------------- /ImageRecognition/Classifation/ResNet on ImageNet/exporter.py: -------------------------------------------------------------------------------- 1 | import gluoncv.model_zoo as gz 2 | from gluoncv.utils import export_block 3 | 4 | 5 | def zoo_import(name, head=''): 6 | """Download from Gluoncv Zoo""" 7 | net = gz.get_model(name, pretrained=True) 8 | export_block(head + name, net, preprocess=True) 9 | 10 | 11 | zoo_import('resnet18_v1b', "imagenet_") 12 | zoo_import('resnet34_v1b', "imagenet_") 13 | zoo_import('resnet50_v1s', "imagenet_") 14 | zoo_import('resnet101_v1s', "imagenet_") 15 | zoo_import('resnet152_v1s', "imagenet_") 16 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # idea config 2 | *.iml 3 | .idea 4 | 5 | # Cache 6 | *.cpy 7 | *dump 8 | report.m 9 | /未分类/ 10 | 11 | # Compress 12 | *.tar 13 | *.gz 14 | 15 | # Binary 16 | *.npy 17 | *.npz 18 | *.pkl 19 | *.zip 20 | 21 | 22 | # models 23 | *.WLNet 24 | *.WMLF 25 | *.WXF 26 | 27 | *.params 28 | *-symbol.json 29 | *.h5 30 | *.hdf5 31 | *.pth 32 | *.pb 33 | *.pbtxt 34 | *.ckpt* 35 | *.txt 36 | *.app 37 | *.prototxt 38 | *.caffemodel 39 | *.MX 40 | *.MXNet 41 | *.mat 42 | 43 | # Notebook 44 | *.nb 45 | *.jupyter 46 | 47 | # Picture 48 | *.png 49 | *.jpg 50 | *.jpeg -------------------------------------------------------------------------------- /ImageRecognition/Classifation/LeNet on MNIST/Readme.md: -------------------------------------------------------------------------------- 1 | ### Remark 2 | 3 | ### Source 4 | 5 | https://resources.wolframcloud.com/NeuralNetRepository/resources/LeNet-Trained-on-MNIST-Data 6 | 7 | ### Reference 8 | ```TeX 9 | @ImageClassifation{lecun1998gradient, 10 | title={Gradient-based learning applied to document recognition}, 11 | author={LeCun, Yann and Bottou, L{\'e}on and Bengio, Yoshua and Haffner, Patrick}, 12 | journal={Proceedings of the IEEE}, 13 | volume={86}, 14 | number={11}, 15 | pages={2278--2324}, 16 | year={1998}, 17 | publisher={IEEE} 18 | } 19 | ``` -------------------------------------------------------------------------------- /ImageEnhancement/SuperResolution/EDSR on DIV2K/Readme.md: -------------------------------------------------------------------------------- 1 | ### Remark 2 | 3 | ### Source 4 | 5 | https://github.com/WolframRhodium/Super-Resolution-Zoo/tree/master/EDSR 6 | 7 | ### Reference 8 | ```TeX 9 | @ImageSuperResolution{lim2017enhanced, 10 | title={Enhanced deep residual networks for single image super-resolution}, 11 | author={Lim, Bee and Son, Sanghyun and Kim, Heewon and Nah, Seungjun and Lee, Kyoung Mu}, 12 | booktitle={The IEEE conference on computer vision and pattern recognition (CVPR) workshops}, 13 | volume={1}, 14 | number={2}, 15 | pages={4}, 16 | year={2017} 17 | } 18 | ``` -------------------------------------------------------------------------------- /ImageEnhancement/Debluring/SRN Deblur on GOPRO/README.md: -------------------------------------------------------------------------------- 1 | ## Remark 2 | 3 | > **@GalAster:** 4 | > I didn't understand how the LSTM version works, the dimension is not match. 5 | > By convention I don't convert grayscale models, even though the authors say that many situations are better than color models. 6 | 7 | ## Example 8 | 9 | 10 | 11 | ## Source 12 | https://github.com/jiangsutx/SRN-Deblur 13 | 14 | ## Files 15 | 16 | - Git: `git clone https://github.com/jiangsutx/SRN-Deblur.git` 17 | - Exporter: `./exporter.py` 18 | - Models: `./checkpoints/download_model.sh` 19 | 20 | ## Reference 21 | 22 | 23 | -------------------------------------------------------------------------------- /ImageEnhancement/SuperResolution/SRCNN on Set14/1-download.m: -------------------------------------------------------------------------------- 1 | (* ::Package:: *) 2 | 3 | SetDirectory@NotebookDirectory[]; 4 | CheckDownload[link_, path_] := If[ 5 | FileExistsQ@path, 6 | Return[], 7 | ResourceFunction["MonitoredDownload"][ 8 | link, path, 9 | "IncludePlot" -> True, 10 | OverwriteTarget -> False 11 | ]; 12 | ]; 13 | 14 | 15 | CheckDownload[ 16 | "https://github.com/MarkPrecursor/SRCNN-keras/raw/master/3051crop_weight_200.h5", 17 | "3051crop_weight_200.h5" 18 | ]; 19 | CheckDownload[ 20 | "https://github.com/MarkPrecursor/SRCNN-keras/raw/master/m_model_adam_new30.h5", 21 | "m_model_adam_new30.h5" 22 | ]; -------------------------------------------------------------------------------- /ImageRecognition/Anime/ResNets trained on Danbooru2018/Readme.md: -------------------------------------------------------------------------------- 1 | # ResNets trained on Danbooru2018 2 | 3 | #### Input 4 | 5 | ``` 6 | - class_names_100.ckpt.json 7 | - class_names_500.ckpt.json 8 | - class_names_6000.ckpt.json 9 | - resnet18.pth 10 | - resnet34.pth 11 | - resnet50.pth 12 | - Test.jpg 13 | ``` 14 | 15 | - No plugin, No git 16 | 17 | #### Output 18 | 19 | ``` 20 | - ResNet-20 trained on Danbooru2018.MAT 21 | - ResNet-36 trained on Danbooru2018.MAT 22 | - ResNet-53 trained on Danbooru2018.MAT 23 | ``` 24 | 25 | ## Reference 26 | 27 | - Link: [RF5/danbooru-pretrained](https://github.com/RF5/danbooru-pretrained) 28 | -------------------------------------------------------------------------------- /.link/.tools/gluoncv_zoo.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # coding=utf-8 3 | """https://github.com/dmlc/gluon-cv""" 4 | 5 | import gluoncv.model_zoo as gz 6 | from gluoncv.utils import export_block 7 | 8 | 9 | def zoo_import(name, head=''): 10 | """Download from Gluoncv Zoo""" 11 | net = gz.get_model(name, pretrained=True) 12 | export_block(head + name, net, preprocess=True) 13 | 14 | 15 | '''test 16 | zoo_import('cifar_resnet20_v2') 17 | zoo_import('cifar_resnet56_v2') 18 | zoo_import('cifar_resnet110_v2') 19 | 20 | zoo_import('cifar_wideresnet16_10') 21 | zoo_import('cifar_wideresnet28_10') 22 | zoo_import('cifar_wideresnet40_8') 23 | ''' 24 | -------------------------------------------------------------------------------- /ImageEnhancement/SuperResolution/SRResNet on CommonSR/README.md: -------------------------------------------------------------------------------- 1 | ### Remark 2 | 3 | There are also a grayscale network in the original repo, but the calculation and volume of grayscale are not reduced, so only the RGB network is converted. 4 | 5 | ### Source 6 | 7 | https://github.com/WolframRhodium/Super-Resolution-Zoo/tree/master/SRGAN/BasicSR%40xinntao 8 | 9 | ### Reference 10 | ```TeX 11 | @ImageSuperResolution{isola2017image, 12 | title={Image-to-image translation with conditional adversarial networks}, 13 | author={Isola, Phillip and Zhu, Jun-Yan and Zhou, Tinghui and Efros, Alexei A}, 14 | journal={arXiv preprint}, 15 | year={2017} 16 | } 17 | ``` -------------------------------------------------------------------------------- /.link/.tools/ckpt_exporter.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # coding=utf-8 3 | import tensorflow as tf 4 | import wolframclient.serializers as wxf 5 | 6 | 7 | def ckpt2wxf(path, name): 8 | ckpt = tf.train.get_checkpoint_state(path) 9 | reader = tf.train.NewCheckpointReader(ckpt.model_checkpoint_path) 10 | all_variables = list(reader.get_variable_to_shape_map().keys()) 11 | npy = dict(zip(all_variables, map(reader.get_tensor, all_variables))) 12 | wxf.export(npy, name + '.wxf', target_format='wxf') 13 | 14 | 15 | ''' 16 | ckpt2wxf('./checkpoint/Fuji/', 'see fuji in the dark') 17 | ckpt2wxf('./checkpoint/Sony/', 'see sony in the dark') 18 | ''' 19 | -------------------------------------------------------------------------------- /.link/.tools/TorchONNX_Import.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # coding=utf-8 3 | import torch.onnx 4 | import torch.utils.model_zoo as zoo 5 | import torchvision 6 | from torch.autograd import Variable 7 | 8 | state_dict = zoo.load_url('https://s3.amazonaws.com/pytorch/models/resnet18-5c106cde.pth') 9 | 10 | dummy_input = Variable(torch.randn(10, 3, 224, 224)).cuda() 11 | model = torchvision.models.alexnet(pretrained=True).cuda() 12 | 13 | input_names = ["actual_input_1"] + ["learned_%d" % i for i in range(16)] 14 | output_names = ["output1"] 15 | 16 | torch.onnx.export(model, dummy_input, "alexnet.onnx", verbose=True, input_names=input_names, output_names=output_names) 17 | -------------------------------------------------------------------------------- /ImageEnhancement/SuperResolution/PESR on DIV2K/Readme.md: -------------------------------------------------------------------------------- 1 | ### Remark 2 | 3 | > **@GalAster:** 4 | > There are two models in official repo. 5 | > PSNR-Optimized Model and Perception-Optimized Model. 6 | > I checked the **PSNR-Optimized Model** and think that almost the same as **EDSR4x**. 7 | > So I only convert the Perception-Optimized one. 8 | > And the script also works on another one in fact. 9 | 10 | ### Source 11 | https://github.com/WolframRhodium/Super-Resolution-Zoo/tree/master/PESR 12 | 13 | ### Reference 14 | **[1] Perception-Enhanced Single Image Super-Resolution via Relativistic Generative Networks. [[Github]](https://github.com/thangvubk/PESR/tree/master)** 15 | -------------------------------------------------------------------------------- /ImageEnhancement/SuperResolution/VDSR on SR291/1-download.m: -------------------------------------------------------------------------------- 1 | (* ::Package:: *) 2 | 3 | SetDirectory@NotebookDirectory[]; 4 | CheckDownload[link_, path_] := If[ 5 | FileExistsQ@path, 6 | Return[], 7 | ResourceFunction["MonitoredDownload"][ 8 | link, path, 9 | "IncludePlot" -> True, 10 | OverwriteTarget -> False 11 | ]; 12 | ]; 13 | 14 | 15 | CheckDownload[ 16 | "https://github.com/WolframRhodium/Super-Resolution-Zoo/raw/master/VDSR/caffe-vdsr%40huangzehao/VDSR-0000.params", 17 | "VDSR-0000.params" 18 | ]; 19 | CheckDownload[ 20 | "https://github.com/WolframRhodium/Super-Resolution-Zoo/raw/master/VDSR/caffe-vdsr%40huangzehao/VDSR-symbol.json", 21 | "VDSR-symbol.json" 22 | ]; -------------------------------------------------------------------------------- /ImageEnhancement/SuperResolution/VDSR on SR291/Readme.md: -------------------------------------------------------------------------------- 1 | # Very Deep Convolutional Networks Super-Resolution 2 | 3 | 1. VDSR **不是** RGB 模型, 只能作用于 Gray 或者 Y 通道. 4 | 2. VDSR **不是** 放大模型, 需要预先 Cubic 插值 5 | 6 | ### Input 7 | 8 | - No git, no download 9 | 10 | ``` 11 | - VDSR-0000.params 12 | - VDSR-symbol.json 13 | ``` 14 | 15 | ### Output 16 | 17 | ``` 18 | - PGGAN-128 trained on Anime.MAT 19 | ``` 20 | 21 | ## 22 | 23 | ## Reference 24 | 25 | - Link: [WolframRhodium/Super-Resolution-Zoo](https://github.com/WolframRhodium/Super-Resolution-Zoo/tree/master/VDSR/caffe-vdsr%40huangzehao) 26 | - Source: [huangzehao/caffe-vdsr](https://github.com/huangzehao/caffe-vdsr/tree/master) 27 | -------------------------------------------------------------------------------- /ImageEnhancement/SuperResolution/ProSR trained on Flickr2K/README.md: -------------------------------------------------------------------------------- 1 | ### Remark 2 | 3 | 4x and 8x models are completely unusable, Mathematica does not have bicubic interpolation amplification algorithm 4 | 5 | ### Source 6 | 7 | https://github.com/WolframRhodium/Super-Resolution-Zoo/tree/master/ProSR 8 | 9 | --- 10 | 11 | ### Reference 12 | ```TeX 13 | @ImageSuperResolution{wang2018fully, 14 | title={A Fully Progressive Approach to Single-Image Super-Resolution}, 15 | author={Wang, Yifan and Perazzi, Federico and McWilliams, Brian and Sorkine-Hornung, Alexander and Sorkine-Hornung, Olga and Schroers, Christopher}, 16 | journal={arXiv preprint arXiv:1804.02900}, 17 | year={2018} 18 | } 19 | ``` -------------------------------------------------------------------------------- /ImageGeneration/Anime/CartoonGANs trained on Multi-Style/Readme.md: -------------------------------------------------------------------------------- 1 | # CartoonGANs trained on Multi-Style 2 | 3 | #### Input 4 | 5 | - Shinkai 新海诚的《君の名は。》, 6 | - Paprika 金敏的《パプリカ》, 7 | - Hayao 宫崎骏的《風立ちぬ》。 8 | 9 | ``` 10 | - Hayao.h5 11 | - Hosoda.h5 12 | - Paprika.h5 13 | - Shinkai.h5 14 | - Test.png 15 | ``` 16 | 17 | - No plugin, No git 18 | 19 | #### Output 20 | 21 | ``` 22 | - CartoonGAN trained on Hayao Style.MAT 23 | - CartoonGAN trained on Hosoda Style.MAT 24 | - CartoonGAN trained on Paprika Style.MAT 25 | - CartoonGAN trained on Shinkai Style.MAT 26 | ``` 27 | 28 | ## Reference 29 | 30 | - Link: [penny4860/Keras-CartoonGan](https://github.com/penny4860/Keras-CartoonGan) 31 | 32 | 33 | -------------------------------------------------------------------------------- /ImageGeneration/Anime/PGGAN-128 trained on Anime/PixelNorm.m: -------------------------------------------------------------------------------- 1 | Input: ChannelT[$$Channels, TensorT[$$InputDimensions]] 2 | Output: ChannelT[$$Channels, TensorT[$$InputDimensions]] 3 | 4 | Parameters: 5 | $Epsilon: Defaulting[ScalarT, 10^-8] 6 | $$Channels: SizeT 7 | $$InputDimensions: SizeListT[SizeT] 8 | 9 | (*ATTENTION: there's no 'RSQRT' in ONNX*) 10 | Writer: Function[ 11 | input = GetInput["Input", "Batchwise"]; 12 | path = SowNode["mean", SowSquare@input, "axis" -> 1, "keepdims" -> True]; 13 | path = SowRSqrt@SowNode["_PlusScalar", path, "scalar" -> #Epsilon]; 14 | output = SowNode["broadcast_mul", {input, path}]; 15 | SetOutput["Output", output] 16 | ] 17 | 18 | Suffix: "alizationLayer" -------------------------------------------------------------------------------- /ImageGeneration/Anime/PGGAN-256 trained on HoloFaces/PixelNorm.m: -------------------------------------------------------------------------------- 1 | Input: ChannelT[$$Channels, TensorT[$$InputDimensions]] 2 | Output: ChannelT[$$Channels, TensorT[$$InputDimensions]] 3 | 4 | Parameters: 5 | $Epsilon: Defaulting[ScalarT, 10^-8] 6 | $$Channels: SizeT 7 | $$InputDimensions: SizeListT[SizeT] 8 | 9 | (*ATTENTION: there's no 'RSQRT' in ONNX*) 10 | Writer: Function[ 11 | input = GetInput["Input", "Batchwise"]; 12 | path = SowNode["mean", SowSquare@input, "axis" -> 1, "keepdims" -> True]; 13 | path = SowRSqrt@SowNode["_PlusScalar", path, "scalar" -> #Epsilon]; 14 | output = SowNode["broadcast_mul", {input, path}]; 15 | SetOutput["Output", output] 16 | ] 17 | 18 | Suffix: "alizationLayer" -------------------------------------------------------------------------------- /ImageRecognition/Classifation/Xception on ImageNet/exporter.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # coding=utf-8 3 | import re 4 | 5 | import torch 6 | import wolframclient.serializers as wxf 7 | from pretrainedmodels import xception 8 | 9 | # manually fix this first 10 | model = xception(num_classes=1000, pretrained=False).cpu() 11 | net = list(model.modules()) 12 | params = model.state_dict() 13 | 14 | # remove `bn.num_batches_tracked` because it can broke the model 15 | npy = { 16 | key: value.numpy() 17 | for key, value 18 | in params.items() 19 | if not re.match('.*_tracked$', key) 20 | } 21 | 22 | wxf.export(npy, 'imagenet_xception.wxf', target_format='wxf') 23 | # torch.save(model, 'imagenet_xception.pth') 24 | -------------------------------------------------------------------------------- /ImageGeneration/Anime/PGGAN-256 trained on HoloFaces/Readme.md: -------------------------------------------------------------------------------- 1 | ## PGGAN-128 trained on Anime 2 | 3 | - 此模型有较为严重的模式崩溃 4 | 5 | ## Source 6 | 7 | ### Input 8 | 9 | - Need git, need manual download 10 | 11 | ``` 12 | - /progressive_growing_of_gans/ 13 | - network-snapshot-057891.pkl 14 | - 2-export.py 15 | ``` 16 | 17 | ### Output 18 | 19 | ``` 20 | - PGGAN-128 trained on Anime.MAT 21 | - preview.jpg 22 | ``` 23 | 24 | ### Preview 25 | 26 | ![preview](https://user-images.githubusercontent.com/17541209/67645096-a1034200-f961-11e9-9f25-12258e8dc28b.jpg) 27 | 28 | 29 | ## Reference 30 | 31 | - Link: [r/SpiceandWolf](https://www.reddit.com/r/SpiceandWolf/comments/a1oa89/experiments_in_generating_holo_faces_with_neural/) 32 | 33 | -------------------------------------------------------------------------------- /ImageRecognition/Classifation/PolyNet on ImageNet/exporter.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # coding=utf-8 3 | import re 4 | 5 | import torch 6 | import wolframclient.serializers as wxf 7 | from pretrainedmodels import polynet 8 | 9 | # manually fix this first 10 | model = polynet(num_classes=1000, pretrained='imagenet') 11 | net = list(model.modules()) 12 | params = model.state_dict() 13 | 14 | # remove `bn.num_batches_tracked` because it can broke the model 15 | npy = { 16 | key: value.numpy() 17 | for key, value 18 | in params.items() 19 | if not re.match('.*_tracked$', key) 20 | } 21 | 22 | wxf.export(npy, 'imagenet_polynet.wxf', target_format='wxf') 23 | torch.save(polynet(num_classes=1000), 'imagenet_polynet.pth') 24 | -------------------------------------------------------------------------------- /.link/.tools/pkl_exporter.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # coding=utf-8 3 | import pickle as pkl 4 | 5 | import wolframclient.serializers as wxf 6 | 7 | 8 | def pkl2wxf(path): 9 | file = open(path, 'rb') 10 | objs = [] 11 | while True: 12 | try: 13 | objs.append(pkl.load(file)) 14 | except EOFError: 15 | break 16 | file.close() 17 | print(objs) 18 | wxf.export(objs, path + '.wxf', target_format='wxf') 19 | 20 | 21 | ''' test 22 | f = open('objs.pkl', 'wb') 23 | # Test basic types 24 | testDict = { 25 | 0: None, 26 | 1: [1, 2, 3, 4], 27 | 2: ('true', 'false'), 28 | 3: {'yes': True, 'no': False} 29 | } 30 | pkl.dump(testDict, f) 31 | f.close() 32 | pkl2wxf('objs.pkl') 33 | ''' 34 | -------------------------------------------------------------------------------- /ImageEnhancement/SuperResolution/VDSR on SR291/2-convert.m: -------------------------------------------------------------------------------- 1 | (* ::Package:: *) 2 | 3 | (* ::Subchapter:: *) 4 | (*Import Weights*) 5 | 6 | 7 | SetDirectory@NotebookDirectory[]; 8 | NetModel[]; 9 | << DeepMath` 10 | DeepMath`NetMerge; 11 | NeuralNetworks`Private`MXNetFormat`readLayerCustom["broadcast_add", _] := ThreadingLayer[Plus] 12 | 13 | 14 | raw = Import["VDSR-symbol.json", "MXNet"] 15 | 16 | 17 | (* ::Subchapter:: *) 18 | (*Main*) 19 | 20 | 21 | path = Values@Normal@NetTake[raw, {"conv1", "conv20"}]; 22 | new = NetChain@Join[Partition[Most@path, 2], {Last@path}]; 23 | mainNet = NetMerge[new, Plus] 24 | 25 | 26 | (* ::Subchapter:: *) 27 | (*Export Model*) 28 | 29 | 30 | Export["VDSR trained on SR291.MAT", mainNet, "WXF", PerformanceGoal -> "Speed"] 31 | -------------------------------------------------------------------------------- /ImageGeneration/Anime/ComixGANs Photo2Comic Style Transfer/Readme.md: -------------------------------------------------------------------------------- 1 | # CartoonGANs trained on Multi-Style 2 | 3 | #### Input 4 | 5 | ``` 6 | - ComixGAN Comic Style Transfer Alpha.hdf5 7 | - ComixGAN Comic Style Transfer Beta.h5 8 | - ComixGAN Comic Style Transfer Gamma.h5 9 | - ComixGAN Comic Style Transfer Delta.h5 10 | ``` 11 | 12 | - No plugin, No git 13 | 14 | #### Output 15 | 16 | ``` 17 | - ComixGAN Comic Style Transfer Alpha.MAT 18 | - ComixGAN Comic Style Transfer Beta.MAT 19 | - ComixGAN Comic Style Transfer Gamma.MAT 20 | - ComixGAN Comic Style Transfer Delta.MAT 21 | ``` 22 | 23 | ## Reference 24 | 25 | - Link: [nijuyr/comixGAN](https://github.com/nijuyr/comixGAN) 26 | - Link: [maciej3031/comixify](https://github.com/maciej3031/comixify) 27 | 28 | -------------------------------------------------------------------------------- /ImageGeneration/Anime/PGGAN-128 trained on Anime/Readme.md: -------------------------------------------------------------------------------- 1 | ## PGGAN-128 trained on Anime 2 | 3 | ## Source 4 | 5 | ### Input 6 | 7 | - Need git, need manual download 8 | 9 | ``` 10 | - /progressive_growing_of_gans/ 11 | - network-snapshot-057891.pkl 12 | - 2-export.py 13 | ``` 14 | 15 | ### Output 16 | 17 | ``` 18 | - PGGAN-128 trained on Anime.MAT 19 | - preview.jpg 20 | - preview-2.jpg 21 | ``` 22 | 23 | ### Preview 24 | 25 | ![preview](https://user-images.githubusercontent.com/17541209/67645076-7d3ffc00-f961-11e9-9128-dc9507815493.jpg) 26 | ![preview-2](https://user-images.githubusercontent.com/17541209/67645078-7d3ffc00-f961-11e9-9692-e03418096e20.jpg) 27 | 28 | ## Reference 29 | 30 | - Link: [gwern/Faces](https://www.gwern.net/Faces#progan) 31 | 32 | -------------------------------------------------------------------------------- /ImageGeneration/Human/PGGAN on CelebA/Readme.md: -------------------------------------------------------------------------------- 1 | ## Remark 2 | 3 | > **@GalAster:** 4 | > I removed `ScaledConvolution` or so called `WScaleLayer`, which means scaled the weights to `1/sqrt(channal)`. 5 | > And I use normal `ConvolutionLayer`. 6 | > I also tried to export full net with all resolutions but failed. 7 | 8 | > **@GalAster:** 9 | > A word of caution: Do not store high-definition pictures in your notebook! 10 | 11 | ## Example 12 | 13 | ![Example](https://user-images.githubusercontent.com/17541209/48298682-47112e80-e4fc-11e8-81bd-5153afa10320.png) 14 | 15 | ## Source 16 | https://github.com/tkarras/progressive_growing_of_gans 17 | 18 | ## Files 19 | 20 | - Git: `git clone https://github.com/tkarras/progressive_growing_of_gans.git` 21 | - Exporter: `./exporter.py` 22 | - Models: https://drive.google.com/drive/folders/15hvzxt_XxuokSmj0uO4xxMTMWVc0cIMU 23 | - Plugins: 24 | - PixelNormalizationLayer 25 | 26 | ## Reference 27 | 28 | 29 | -------------------------------------------------------------------------------- /ImageGeneration/Anime/CartoonGANs trained on Multi-Style/1-download.m: -------------------------------------------------------------------------------- 1 | (* ::Package:: *) 2 | 3 | SetDirectory@NotebookDirectory[]; 4 | CheckDownload[link_, path_] := If[ 5 | FileExistsQ@path, 6 | Return[], 7 | ResourceFunction["MonitoredDownload"][ 8 | link, path, 9 | "IncludePlot" -> True, 10 | OverwriteTarget -> False 11 | ]; 12 | ]; 13 | 14 | 15 | CheckDownload[ 16 | "https://github.com/penny4860/Keras-CartoonGan/raw/master/params/Hayao.h5", 17 | "Hayao.h5" 18 | ]; 19 | CheckDownload[ 20 | "https://github.com/penny4860/Keras-CartoonGan/raw/master/params/Hosoda.h5", 21 | "Hosoda.h5" 22 | ]; 23 | CheckDownload[ 24 | "https://github.com/penny4860/Keras-CartoonGan/raw/master/params/Paprika.h5", 25 | "Paprika.h5" 26 | ]; 27 | CheckDownload[ 28 | "https://github.com/penny4860/Keras-CartoonGan/raw/master/params/Shinkai.h5", 29 | "Shinkai.h5" 30 | ]; 31 | CheckDownload[ 32 | "https://github.com/penny4860/Keras-CartoonGan/raw/master/sample_in/in1.png", 33 | "Test.png" 34 | ]; 35 | -------------------------------------------------------------------------------- /ImageGeneration/Anime/ComixGANs Photo2Comic Style Transfer/1-download.m: -------------------------------------------------------------------------------- 1 | (* ::Package:: *) 2 | 3 | SetDirectory@NotebookDirectory[]; 4 | CheckDownload[link_, path_] := If[ 5 | FileExistsQ@path, 6 | Return[], 7 | ResourceFunction["MonitoredDownload"][ 8 | link, path, 9 | "IncludePlot" -> True, 10 | OverwriteTarget -> False 11 | ]; 12 | ]; 13 | 14 | 15 | CheckDownload[ 16 | "https://github.com/nijuyr/comixGAN/raw/master/data2/checkpoints/best/G_weights.best.hdf5", 17 | "ComixGAN Comic Style Transfer Alpha.hdf5" 18 | ]; 19 | CheckDownload[ 20 | "https://github.com/nijuyr/comixGAN/raw/master/data2/checkpoints/comparison/comix_gan.h5", 21 | "ComixGAN Comic Style Transfer Beta.h5" 22 | ]; 23 | CheckDownload[ 24 | "https://github.com/maciej3031/comixify/raw/master/ComixGAN/pretrained_models/generator_model2.h5", 25 | "ComixGAN Comic Style Transfer Gamma.h5" 26 | ]; 27 | CheckDownload[ 28 | "https://github.com/maciej3031/comixify/raw/master/ComixGAN/pretrained_models/generator_model.h5", 29 | "ComixGAN Comic Style Transfer Delta.h5" 30 | ]; 31 | -------------------------------------------------------------------------------- /ImageEnhancement/SuperResolution/D-DBPN on NTIRE2018/D-DBPN8x trained on NTIRE2018.m: -------------------------------------------------------------------------------- 1 | (* ::Package:: *) 2 | 3 | SetDirectory@NotebookDirectory[]; 4 | Needs["MXNetLink`"] 5 | Needs["NeuralNetworks`"] 6 | DateString[] 7 | 8 | 9 | (* ::Subitem:: *) 10 | (*Wed 24 Oct 2018 22:56:19*) 11 | 12 | 13 | (* ::Subchapter:: *) 14 | (*Import Weights*) 15 | 16 | 17 | params = NDArrayImport@"NTIRE2018_x8-0000.params"; 18 | 19 | 20 | (* ::Subchapter:: *) 21 | (*Encoder & Decoder*) 22 | 23 | 24 | encoder = NetEncoder[{"Image", {640, 360}}] 25 | decoder = NetDecoder["Image"] 26 | 27 | 28 | (* ::Subchapter:: *) 29 | (*Pre-defined Structure*) 30 | 31 | 32 | ndarray[n_] := params["arg:learned_" <> ToString[n]]; 33 | (*NetMapOperator[ParametricRampLayer["Slope"\[Rule]ndarray[n]]]*) 34 | prelu[i_, n_] := ParametricRampLayer["Slope" -> Flatten@ConstantArray[Normal@ndarray[i], n]] 35 | 36 | 37 | (* ::Subchapter:: *) 38 | (*Main*) 39 | 40 | 41 | mainNet = NetGraph[nodes, path, "Input" -> encoder, "Output" -> decoder] 42 | 43 | 44 | (* ::Subchapter:: *) 45 | (*Export Model*) 46 | -------------------------------------------------------------------------------- /.link/.tools/Caffe2_exporter.py: -------------------------------------------------------------------------------- 1 | import re 2 | 3 | import caffe2.proto.caffe2_pb2 as caffe 4 | import numpy as npy 5 | import wolframclient.serializers as wxf 6 | 7 | 8 | def layer_iter(layers): 9 | '''add layer name when confused''' 10 | for layer in layers: 11 | layer_name = re.sub('[-/]', '_', layer.name) 12 | layer_type = layer.type 13 | layer_blobs = layer.blobs 14 | yield (layer_name, layer_type, layer_blobs) 15 | 16 | 17 | def caffe2wxf(path): 18 | data = dict() 19 | model = caffe.NetParameter() 20 | model.ParseFromString(open(path, 'rb').read()) 21 | 22 | get_array = lambda p: npy.array(p.data, dtype='float32').reshape(p.shape.dim) 23 | 24 | def add_array(node): 25 | for i in range(len(node.blobs)): 26 | data[node.name + "_" + str(i + 1)] = get_array(node.blobs[i]) 27 | 28 | def add_node(layers): 29 | for i in range(len(layers)): 30 | add_array(layers[i]) 31 | 32 | add_node(model.layer) 33 | wxf.export(data, path + '.wxf', target_format='wxf') 34 | -------------------------------------------------------------------------------- /ImageEnhancement/Debluring/SRN Deblur on GOPRO/exporter.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | import wolframclient.serializers as wxf 3 | 4 | 5 | def ckpt2wxf(path, name): 6 | ckpt = tf.train.get_checkpoint_state(path) 7 | reader = tf.train.NewCheckpointReader(ckpt.model_checkpoint_path) 8 | all_variables = list(reader.get_variable_to_shape_map().keys()) 9 | npy = dict(zip(all_variables, map(reader.get_tensor, all_variables))) 10 | wxf.export(npy, name + '.wxf', target_format='wxf') 11 | 12 | 13 | with tf.Session() as sess: 14 | saver = tf.train.import_meta_graph('./checkpoints/color/deblur.model-523000.meta') 15 | saver.restore(sess, "./checkpoints/color/deblur.model-523000") 16 | saver.save(sess, "./tmp/color/model.ckpt") 17 | ckpt2wxf("./tmp/color/", "color") 18 | 19 | # with tf.Session() as sess: 20 | # saver = tf.train.import_meta_graph('./checkpoints/lstm/deblur.model-523000.meta') 21 | # saver.restore(sess, "./checkpoints/lstm/deblur.model-523000") 22 | # saver.save(sess, "./tmp/lstm/model.ckpt") 23 | # ckpt2wxf("./tmp/lstm/", "lstm") 24 | -------------------------------------------------------------------------------- /ImageRecognition/Anime/VGGs trained on Danbooru2017/exporter.py: -------------------------------------------------------------------------------- 1 | import gzip 2 | import shutil 3 | 4 | import caffe2.proto.caffe2_pb2 as caffe 5 | import numpy as npy 6 | import wolframclient.serializers as wxf 7 | 8 | 9 | def npy2wxf(path): 10 | data = npy.load(path) 11 | wxf.export(data, path + '.wxf', target_format='wxf') 12 | 13 | 14 | def caffe2wxf(path): 15 | data = dict() 16 | model = caffe.NetParameter() 17 | model.ParseFromString(open(path, 'rb').read()) 18 | 19 | get_array = lambda p: npy.array(p.data, dtype='float32').reshape(p.shape.dim) 20 | 21 | def add_array(node): 22 | for i in range(len(node.blobs)): 23 | data[node.name + "_" + str(i + 1)] = get_array(node.blobs[i]) 24 | 25 | def add_node(layers): 26 | for i in range(len(layers)): 27 | add_array(layers[i]) 28 | 29 | add_node(model.layer) 30 | wxf.export(data, path + '.wxf', target_format='wxf') 31 | 32 | 33 | shutil.copyfileobj(gzip.open('tag_list.json.gz', 'rb'), open('tag_list.json', 'wb')) 34 | npy2wxf("image_mean.npy") 35 | caffe2wxf("illust2vec_ver200.caffemodel") 36 | caffe2wxf("illust2vec_tag_ver200.caffemodel") 37 | -------------------------------------------------------------------------------- /ImageGeneration/Inpainting/EdgeConnect on CelebA/exporter.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import wolframclient.serializers as wxf 3 | 4 | from src.networks import InpaintGenerator, EdgeGenerator 5 | 6 | 7 | def edge_model(dataset): 8 | data = torch.load('checkpoints/' + dataset + '/EdgeModel_gen.pth', map_location='cpu') 9 | npy = {key: value.numpy() for key, value in data['generator'].items()} 10 | wxf.export(npy, 'EdgeModel_' + dataset + '.wxf', target_format='wxf') 11 | generator = EdgeGenerator() 12 | generator.load_state_dict(data['generator']) 13 | torch.save(generator, 'EdgeModel_' + dataset + '.pth') 14 | 15 | 16 | def inpaint_model(dataset): 17 | data = torch.load('checkpoints/' + dataset + '/InpaintingModel_gen.pth', map_location='cpu') 18 | npy = {key: value.numpy() for key, value in data['generator'].items()} 19 | wxf.export(npy, 'InpaintingModel_' + dataset + '.wxf', target_format='wxf') 20 | generator = InpaintGenerator() 21 | generator.load_state_dict(data['generator']) 22 | torch.save(generator, 'InpaintingModel_' + dataset + '.pth') 23 | 24 | 25 | def export_model(dataset): 26 | inpaint_model(dataset) 27 | edge_model(dataset) 28 | 29 | 30 | for model in ['celeba', 'places2', 'psv']: 31 | export_model(model) 32 | -------------------------------------------------------------------------------- /ImageEnhancement/SuperResolution/SRCNN on Set14/2-convert.m: -------------------------------------------------------------------------------- 1 | (* ::Package:: *) 2 | 3 | (* ::Subchapter:: *) 4 | (*Import Weights*) 5 | 6 | 7 | SetDirectory@NotebookDirectory[]; 8 | 9 | 10 | $NCHW = TransposeLayer[{1<->4, 2<->3, 3<->4}]; 11 | getCN[i_, s_, p_] := ConvolutionLayer[ 12 | "Weights" -> $NCHW@params@TemplateApply["/convolution2d_`1`/convolution2d_`1`_W:0", {i}], 13 | "Biases" -> params@TemplateApply["/convolution2d_`1`/convolution2d_`1`_b:0", {i}], 14 | "PaddingSize" -> p, "Stride" -> s 15 | ]; 16 | 17 | 18 | (* ::Subchapter:: *) 19 | (*Main*) 20 | 21 | 22 | params = Import["m_model_adam_new30.h5", "Data"]; 23 | mainNet = NetChain[ 24 | { 25 | getCN[1, 1, 4], Ramp, 26 | getCN[2, 1, 0], Ramp, 27 | getCN[3, 1, 2] 28 | }, 29 | "Input" -> {1, Automatic, Automatic}, 30 | "Output" -> "Image" 31 | ]; 32 | Export["SRCNN-S trained on Set14.MAT", mainNet, "WXF", PerformanceGoal -> "Speed"] 33 | 34 | 35 | (* ::Subchapter:: *) 36 | (*Export Model*) 37 | 38 | 39 | params = Import["3051crop_weight_200.h5", "Data"]; 40 | mainNet = NetChain[ 41 | { 42 | getCN[1, 1, 4], Ramp, 43 | getCN[2, 1, 1], Ramp, 44 | getCN[3, 1, 2] 45 | }, 46 | "Input" -> {1, Automatic, Automatic}, 47 | "Output" -> "Image" 48 | ]; 49 | Export["SRCNN-L trained on Set14.MAT", mainNet, "WXF", PerformanceGoal -> "Speed"] 50 | -------------------------------------------------------------------------------- /ImageEnhancement/Mixed/MoePhoto on CustomAnime/exporter.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | import wolframclient.serializers as wxf 4 | 5 | ''' 6 | import torch.onnx as onnx 7 | form .python.models import * 8 | model = Net4x().cuda() 9 | model.load_state_dict(torch.load('./model/a4/model_new.pth')).cuda() 10 | dummy_input = Variable(torch.randn(1, 1, 36, 36)).cuda() 11 | onnx.export(model, dummy_input, "a4.onnx", verbose=True) 12 | ''' 13 | 14 | 15 | def exportSR(name): 16 | pth = torch.load('./model/' + name + '/model_new.pth') 17 | npy = {key: value.numpy() for key, value in pth.items()} 18 | wxf.export(npy, 'moe_' + name + '.wxf', target_format='wxf') 19 | 20 | 21 | def exportDN(name): 22 | pth = torch.load('./model/' + name + '/model_new.pth') 23 | npy = {key: value.numpy() for key, value in pth.items()} 24 | wxf.export(npy, 'moe_' + name + '.wxf', target_format='wxf') 25 | 26 | 27 | def exportDH(name): 28 | pth = torch.load('./model/' + name + '/AOD_net_epoch_relu_10.pth') 29 | npy = {key: value.numpy() for key, value in pth.items()} 30 | wxf.export(npy, 'moe_' + name + '.wxf', target_format='wxf') 31 | 32 | 33 | [exportSR(i) for i in ['a2', 'a3', 'a4', 'p2', 'p3', 'p4']] 34 | [exportDN(i) for i in ['l15', 'l25', 'l50', 'dn_lite5', 'dn_lite10', 'dn_lite15']] 35 | [exportDH(i) for i in ['dehaze']] 36 | -------------------------------------------------------------------------------- /ImageGeneration/Human/PGGAN on CelebA/2-exporter.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pickle 3 | 4 | import numpy as np 5 | import tensorflow as tf 6 | import wolframclient.serializers as wxf 7 | 8 | name = 'karras2018iclr-celebahq-1024x1024' 9 | file = open(name + '.pkl', 'rb') 10 | sess = tf.InteractiveSession() 11 | G, D, Gs = pickle.load(file) 12 | saver = tf.train.Saver() 13 | save_path = "./tmp/" + name + "/" 14 | model_name = 'model' 15 | if not os.path.exists(save_path): 16 | os.makedirs(save_path) 17 | save_path_full = os.path.join(save_path, model_name) 18 | saver.save(sess, save_path_full) 19 | 20 | ckpt = tf.train.get_checkpoint_state(save_path) 21 | reader = tf.train.NewCheckpointReader(ckpt.model_checkpoint_path) 22 | all_variables = list(reader.get_variable_to_shape_map().keys()) 23 | npy = dict(zip(all_variables, map(reader.get_tensor, all_variables))) 24 | # remove `float32` because it had not be supported 25 | npy.pop('D_paper/lod') 26 | npy.pop('G_paper/lod') 27 | npy.pop('G_paper_1/lod') 28 | 29 | wxf.export(npy, name + '.wxf', target_format='wxf') 30 | 31 | # Generate latent vectors. 32 | latents = np.random.RandomState(1000).randn(1000, *Gs.input_shapes[0][1:]) # 1000 random latents 33 | latents = latents[[477, 56, 83, 887, 583, 391, 86, 340, 341, 415]] # hand-picked top-10 34 | 35 | wxf.export(latents, 'latents.wxf', target_format='wxf') 36 | -------------------------------------------------------------------------------- /ImageRecognition/Anime/ResNets trained on Danbooru2018/1-download.m: -------------------------------------------------------------------------------- 1 | (* ::Package:: *) 2 | 3 | SetDirectory@NotebookDirectory[]; 4 | CheckDownload[link_, path_] := If[ 5 | FileExistsQ@path, 6 | Return[], 7 | ResourceFunction["MonitoredDownload"][ 8 | link, path, 9 | "IncludePlot" -> True, 10 | OverwriteTarget -> False 11 | ]; 12 | ]; 13 | 14 | 15 | CheckDownload[ 16 | "https://raw.githubusercontent.com/RF5/danbooru-pretrained/master/config/class_names_100.json", 17 | "class_names_100.ckpt.json" 18 | ]; 19 | CheckDownload[ 20 | "https://raw.githubusercontent.com/RF5/danbooru-pretrained/master/config/class_names_500.json", 21 | "class_names_500.ckpt.json" 22 | ]; 23 | CheckDownload[ 24 | "https://raw.githubusercontent.com/RF5/danbooru-pretrained/master/config/class_names_6000.json", 25 | "class_names_6000.ckpt.json" 26 | ]; 27 | CheckDownload[ 28 | "https://github.com/RF5/danbooru-pretrained/releases/download/v0.1/resnet18-3f77756f.pth", 29 | "resnet18.pth" 30 | ]; 31 | CheckDownload[ 32 | "https://github.com/RF5/danbooru-pretrained/releases/download/v0.1/resnet34-88a5e79d.pth", 33 | "resnet34.pth" 34 | ]; 35 | CheckDownload[ 36 | "https://github.com/RF5/danbooru-pretrained/releases/download/v0.1/resnet50-13306192.pth", 37 | "resnet50.pth" 38 | ]; 39 | CheckDownload[ 40 | "https://github.com/RF5/danbooru-pretrained/raw/master/img/egpic2.jpg", 41 | "Test.jpg" 42 | ]; 43 | -------------------------------------------------------------------------------- /ImageGeneration/Anime/PGGAN-128 trained on Anime/2-exporter.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pickle 3 | import tensorflow as tf 4 | import wolframclient.serializers as wxf 5 | 6 | name = 'network-snapshot-057891' 7 | out_name = 'PGGAN-128 trained on Anime' 8 | 9 | file = open(name + '.pkl', 'rb') 10 | sess = tf.InteractiveSession() 11 | G, D, Gs = pickle.load(file) 12 | saver = tf.train.Saver() 13 | save_path = "./target/" + name + "/" 14 | model_name = 'model' 15 | if not os.path.exists(save_path): 16 | os.makedirs(save_path) 17 | save_path_full = os.path.join(save_path, model_name) 18 | saver.save(sess, save_path_full) 19 | 20 | ckpt = tf.train.get_checkpoint_state(save_path) 21 | reader = tf.train.NewCheckpointReader(ckpt.model_checkpoint_path) 22 | all_variables = list(reader.get_variable_to_shape_map().keys()) 23 | npy = dict(zip(all_variables, map(reader.get_tensor, all_variables))) 24 | wxf.export(npy, out_name + '.WXF', target_format='wxf') 25 | 26 | # Save as protobuf 27 | with tf.Session() as sess: 28 | tf.initialize_all_variables().run() 29 | output_graph_def = tf.graph_util.convert_variables_to_constants( 30 | sess=sess, 31 | input_graph_def=sess.graph_def, 32 | output_node_names=['Gs/images_out'] 33 | # output_node_names=['Gs/ToRGB_lod0/add'] 34 | ) 35 | 36 | with tf.gfile.GFile("./target/" + name + ".pb", "wb") as file: # 保存模型 37 | file.write(output_graph_def.SerializeToString()) # 序列化输出 38 | -------------------------------------------------------------------------------- /ImageGeneration/Anime/PGGAN-256 trained on HoloFaces/2-exporter.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pickle 3 | import tensorflow as tf 4 | import wolframclient.serializers as wxf 5 | 6 | name = '2018-10-05-gwern-holofaces-progan-model2053-006353' 7 | out_name = 'PGGAN-256 trained on Holofaces' 8 | 9 | file = open(name + '.pkl', 'rb') 10 | sess = tf.InteractiveSession() 11 | G, D, Gs = pickle.load(file) 12 | saver = tf.train.Saver() 13 | save_path = "./target/" + name + "/" 14 | model_name = 'model' 15 | if not os.path.exists(save_path): 16 | os.makedirs(save_path) 17 | save_path_full = os.path.join(save_path, model_name) 18 | saver.save(sess, save_path_full) 19 | 20 | ckpt = tf.train.get_checkpoint_state(save_path) 21 | reader = tf.train.NewCheckpointReader(ckpt.model_checkpoint_path) 22 | all_variables = list(reader.get_variable_to_shape_map().keys()) 23 | npy = dict(zip(all_variables, map(reader.get_tensor, all_variables))) 24 | wxf.export(npy, out_name + '.WXF', target_format='wxf') 25 | 26 | # Save as protobuf 27 | with tf.Session() as sess: 28 | tf.initialize_all_variables().run() 29 | output_graph_def = tf.graph_util.convert_variables_to_constants( 30 | sess=sess, 31 | input_graph_def=sess.graph_def, 32 | output_node_names=['Gs/images_out'] 33 | #output_node_names=['Gs/ToRGB_lod0/add'] 34 | ) 35 | 36 | with tf.gfile.GFile("./target/" + name + ".pb", "wb") as file: # 保存模型 37 | file.write(output_graph_def.SerializeToString()) # 序列化输出 38 | -------------------------------------------------------------------------------- /ImageRecognition/Classifation/ResNet on CIFAR10/ResnetV2-110 tested on CIFAR10 TestSet.mt: -------------------------------------------------------------------------------- 1 | BeginTestSection["ClassificationBenchmark"]; 2 | 3 | 4 | (*Dependency Check*) 5 | VerificationTest[ 6 | << MachineLearning`; 7 | << NeuralNetworks`; 8 | << MXNetLink`; 9 | << DeepMath`;, 10 | Null, TestID -> "Dependency Check" 11 | ]; 12 | 13 | 14 | (*Pre-define*) 15 | VerificationTest[ 16 | model := model = Import[netName <> ".WXF"]; 17 | data := data = Import@"D:\\WLNet-Data-Set\\CIFAR10\\CIFAR10 TestData.MX"; 18 | cm := cm = ClassificationBenchmark[model, data]; 19 | dump := dump = DumpSave[".cache.mx", cm]; 20 | analyze := analyze = ClassificationBenchmark[cm, netName];, 21 | Null, TestID -> "Pre-define" 22 | ]; 23 | 24 | 25 | (*Warm-Up*) 26 | VerificationTest[ 27 | Print@With[{size = 1000}, 28 | x = RandomReal[1, {size, size}]; 29 | layer = NetInitialize@LinearLayer[size, "Input" -> size, "Biases" -> None]; 30 | time = First@RepeatedTiming[layer[x, TargetDevice -> "GPU"]]; 31 | Quantity[size^2 * (2 * size \[Minus] 1) / time, "FLOPS"] 32 | ];, 33 | Null, TestID -> "GPU Warm-Up" 34 | ]; 35 | 36 | 37 | (*Evaluation*) 38 | VerificationTest[Head[model], NetChain, TestID -> "Loading Model"]; 39 | VerificationTest[Head[data], List, TestID -> "Loading Data"]; 40 | VerificationTest[Head[cm], ClassifierMeasurementsObject, TestID -> "Benchmark Test"]; 41 | VerificationTest[Head[dump], List, TestID -> "Result Dump"]; 42 | VerificationTest[Head[analyze], Association, TestID -> "Analyzing"]; 43 | 44 | EndTestSection[]; -------------------------------------------------------------------------------- /ImageRecognition/Classifation/Wide-ResNet on CIFAR10/WRN16-10 tested on CIFAR10 TestSet.mt: -------------------------------------------------------------------------------- 1 | BeginTestSection["ClassificationBenchmark"]; 2 | 3 | 4 | (*Dependency Check*) 5 | VerificationTest[ 6 | << MachineLearning`; 7 | << NeuralNetworks`; 8 | << MXNetLink`; 9 | << DeepMath`;, 10 | Null, TestID -> "Dependency Check" 11 | ]; 12 | 13 | 14 | (*Pre-define*) 15 | VerificationTest[ 16 | model := model = Import[netName <> ".WXF"]; 17 | data := data = Import@"D:\\WLNet-Data-Set\\CIFAR10\\CIFAR10 TestData.MX"; 18 | cm := cm = ClassificationBenchmark[model, data]; 19 | dump := dump = DumpSave[".cache.mx", cm]; 20 | analyze := analyze = ClassificationBenchmark[cm, netName];, 21 | Null, TestID -> "Pre-define" 22 | ]; 23 | 24 | 25 | (*Warm-Up*) 26 | VerificationTest[ 27 | Print@With[{size = 1000}, 28 | x = RandomReal[1, {size, size}]; 29 | layer = NetInitialize@LinearLayer[size, "Input" -> size, "Biases" -> None]; 30 | time = First@RepeatedTiming[layer[x, TargetDevice -> "GPU"]]; 31 | Quantity[size^2 * (2 * size \[Minus] 1) / time, "FLOPS"] 32 | ];, 33 | Null, TestID -> "GPU Warm-Up" 34 | ]; 35 | 36 | 37 | (*Evaluation*) 38 | VerificationTest[Head[model], NetChain, TestID -> "Loading Model"]; 39 | VerificationTest[Head[data], List, TestID -> "Loading Data"]; 40 | VerificationTest[Head[cm], ClassifierMeasurementsObject, TestID -> "Benchmark Test"]; 41 | VerificationTest[Head[dump], List, TestID -> "Result Dump"]; 42 | VerificationTest[Head[analyze], Association, TestID -> "Analyzing"]; 43 | 44 | EndTestSection[]; -------------------------------------------------------------------------------- /ImageRecognition/Classifation/Wide-ResNet on CIFAR10/WRN28-10 tested on CIFAR10 TestSet.mt: -------------------------------------------------------------------------------- 1 | BeginTestSection["ClassificationBenchmark"]; 2 | 3 | 4 | (*Dependency Check*) 5 | VerificationTest[ 6 | << MachineLearning`; 7 | << NeuralNetworks`; 8 | << MXNetLink`; 9 | << DeepMath`;, 10 | Null, TestID -> "Dependency Check" 11 | ]; 12 | 13 | 14 | (*Pre-define*) 15 | VerificationTest[ 16 | model := model = Import[netName <> ".WXF"]; 17 | data := data = Import@"D:\\WLNet-Data-Set\\CIFAR10\\CIFAR10 TestData.MX"; 18 | cm := cm = ClassificationBenchmark[model, data]; 19 | dump := dump = DumpSave[".cache.mx", cm]; 20 | analyze := analyze = ClassificationBenchmark[cm, netName];, 21 | Null, TestID -> "Pre-define" 22 | ]; 23 | 24 | 25 | (*Warm-Up*) 26 | VerificationTest[ 27 | Print@With[{size = 1000}, 28 | x = RandomReal[1, {size, size}]; 29 | layer = NetInitialize@LinearLayer[size, "Input" -> size, "Biases" -> None]; 30 | time = First@RepeatedTiming[layer[x, TargetDevice -> "GPU"]]; 31 | Quantity[size^2 * (2 * size \[Minus] 1) / time, "FLOPS"] 32 | ];, 33 | Null, TestID -> "GPU Warm-Up" 34 | ]; 35 | 36 | 37 | (*Evaluation*) 38 | VerificationTest[Head[model], NetChain, TestID -> "Loading Model"]; 39 | VerificationTest[Head[data], List, TestID -> "Loading Data"]; 40 | VerificationTest[Head[cm], ClassifierMeasurementsObject, TestID -> "Benchmark Test"]; 41 | VerificationTest[Head[dump], List, TestID -> "Result Dump"]; 42 | VerificationTest[Head[analyze], Association, TestID -> "Analyzing"]; 43 | 44 | EndTestSection[]; -------------------------------------------------------------------------------- /ImageRecognition/Classifation/Wide-ResNet on CIFAR10/WRN40-8 tested on CIFAR10 TestSet.mt: -------------------------------------------------------------------------------- 1 | BeginTestSection["ClassificationBenchmark"]; 2 | 3 | 4 | (*Dependency Check*) 5 | VerificationTest[ 6 | << MachineLearning`; 7 | << NeuralNetworks`; 8 | << MXNetLink`; 9 | << DeepMath`;, 10 | Null, TestID -> "Dependency Check" 11 | ]; 12 | 13 | 14 | (*Pre-define*) 15 | VerificationTest[ 16 | model := model = Import[netName <> ".WXF"]; 17 | data := data = Import@"D:\\WLNet-Data-Set\\CIFAR10\\CIFAR10 TestData.MX"; 18 | cm := cm = ClassificationBenchmark[model, data]; 19 | dump := dump = DumpSave[".cache.mx", cm]; 20 | analyze := analyze = ClassificationBenchmark[cm, netName];, 21 | Null, TestID -> "Pre-define" 22 | ]; 23 | 24 | 25 | (*Warm-Up*) 26 | VerificationTest[ 27 | Print@With[{size = 1000}, 28 | x = RandomReal[1, {size, size}]; 29 | layer = NetInitialize@LinearLayer[size, "Input" -> size, "Biases" -> None]; 30 | time = First@RepeatedTiming[layer[x, TargetDevice -> "GPU"]]; 31 | Quantity[size^2 * (2 * size \[Minus] 1) / time, "FLOPS"] 32 | ];, 33 | Null, TestID -> "GPU Warm-Up" 34 | ]; 35 | 36 | 37 | (*Evaluation*) 38 | VerificationTest[Head[model], NetChain, TestID -> "Loading Model"]; 39 | VerificationTest[Head[data], List, TestID -> "Loading Data"]; 40 | VerificationTest[Head[cm], ClassifierMeasurementsObject, TestID -> "Benchmark Test"]; 41 | VerificationTest[Head[dump], List, TestID -> "Result Dump"]; 42 | VerificationTest[Head[analyze], Association, TestID -> "Analyzing"]; 43 | 44 | EndTestSection[]; -------------------------------------------------------------------------------- /ImageRecognition/Classifation/LeNet on MNIST/LeNet trained on MNIST.m: -------------------------------------------------------------------------------- 1 | (* ::Package:: *) 2 | 3 | SetDirectory@NotebookDirectory[]; 4 | << NeuralNetworks` 5 | << MXNetLink` 6 | << DeepMath` 7 | DateString[] 8 | 9 | 10 | (* ::Subitem:: *) 11 | (*Sat 17 Nov 2018 14:55:36*) 12 | 13 | 14 | (* ::Subchapter:: *) 15 | (*Main*) 16 | 17 | 18 | mainNet = NetModel["LeNet Trained on MNIST Data"] 19 | 20 | 21 | (* ::Subchapter:: *) 22 | (*Export Model*) 23 | 24 | 25 | Export["LeNet trained on MNIST.WXF", mainNet] 26 | 27 | 28 | (* ::Subchapter:: *) 29 | (*Test*) 30 | 31 | 32 | << MachineLearning`;<< NeuralNetworks`;<< MXNetLink`;<< DeepMath`; 33 | SetDirectory@NotebookDirectory[];DateString[] 34 | test = TestReport["LeNet trained on MNIST.mt"] 35 | 36 | 37 | (* ::Subitem:: *) 38 | (*Sun 18 Nov 2018 19:48:27*) 39 | 40 | 41 | (* ::Subchapter:: *) 42 | (*Test Report*) 43 | 44 | 45 | upload = ImportString["\ 46 | ![Classification Curve.png](https://i.loli.net/2018/11/17/5bf004151b12a.png) 47 | ![High Precision Classification Curve.png](https://i.loli.net/2018/11/17/5bf0041535eab.png) 48 | ![Accuracy Rejection Curve.png](https://i.loli.net/2018/11/17/5bf00415503a1.png) 49 | ![ConfusionMatrix.png](https://i.loli.net/2018/11/17/5bf0041545c4e.png) 50 | ", "Data"]; 51 | report = ClassificationBenchmark[analyze, 52 | DeepMath`Tools`TestReportAnalyze[test], 53 | "Image" -> AssociationThread[Rule @@ Transpose[StringSplit[#, {"![", "](", ")"}]& /@ upload]] 54 | ]; 55 | ClassificationBenchmark["LeNet tested on MNIST TestSet", report] 56 | -------------------------------------------------------------------------------- /ImageRecognition/Classifation/ResNet on CIFAR10/ResnetV2-20 tested on CIFAR10 TestSet.mt: -------------------------------------------------------------------------------- 1 | BeginTestSection["ClassificationBenchmark"]; 2 | 3 | 4 | (*Dependency Check*) 5 | VerificationTest[ 6 | << MachineLearning`; 7 | << NeuralNetworks`; 8 | << MXNetLink`; 9 | << DeepMath`;, 10 | Null, TestID -> "Dependency Check" 11 | ]; 12 | 13 | 14 | (*Pre-define*) 15 | VerificationTest[ 16 | netName = "ResnetV2-20 trained on CIFAR10"; 17 | model := model = Import[netName <> ".WXF"]; 18 | data := data = Import@"D:\\WLNet-Data-Set\\CIFAR10\\CIFAR10 TestData.MX"; 19 | cm := cm = ClassificationBenchmark[model, data]; 20 | dump := dump = DumpSave[".cache.mx", cm]; 21 | analyze := analyze = ClassificationBenchmark[cm, netName];, 22 | Null, TestID -> "Pre-define" 23 | ]; 24 | 25 | 26 | (*Warm-Up*) 27 | VerificationTest[ 28 | Print@With[{size = 1000}, 29 | x = RandomReal[1, {size, size}]; 30 | layer = NetInitialize@LinearLayer[size, "Input" -> size, "Biases" -> None]; 31 | time = First@RepeatedTiming[layer[x, TargetDevice -> "GPU"]]; 32 | Quantity[size^2 * (2 * size \[Minus] 1) / time, "FLOPS"] 33 | ];, 34 | Null, TestID -> "GPU Warm-Up" 35 | ]; 36 | 37 | 38 | (*Evaluation*) 39 | VerificationTest[Head[model], NetChain, TestID -> "Loading Model"]; 40 | VerificationTest[Head[data], List, TestID -> "Loading Data"]; 41 | VerificationTest[Head[cm], ClassifierMeasurementsObject, TestID -> "Benchmark Test"]; 42 | VerificationTest[Head[dump], List, TestID -> "Result Dump"]; 43 | VerificationTest[Head[analyze], Association, TestID -> "Analyzing"]; 44 | 45 | EndTestSection[]; -------------------------------------------------------------------------------- /ImageRecognition/Classifation/ResNet on CIFAR10/ResnetV2-56 tested on CIFAR10 TestSet.mt: -------------------------------------------------------------------------------- 1 | BeginTestSection["ClassificationBenchmark"]; 2 | 3 | 4 | (*Dependency Check*) 5 | VerificationTest[ 6 | << MachineLearning`; 7 | << NeuralNetworks`; 8 | << MXNetLink`; 9 | << DeepMath`;, 10 | Null, TestID -> "Dependency Check" 11 | ]; 12 | 13 | 14 | (*Pre-define*) 15 | VerificationTest[ 16 | netName = "ResnetV2-56 trained on CIFAR10"; 17 | model := model = Import[netName <> ".WXF"]; 18 | data := data = Import@"D:\\WLNet-Data-Set\\CIFAR10\\CIFAR10 TestData.MX"; 19 | cm := cm = ClassificationBenchmark[model, data]; 20 | dump := dump = DumpSave[".cache.mx", cm]; 21 | analyze := analyze = ClassificationBenchmark[cm, netName];, 22 | Null, TestID -> "Pre-define" 23 | ]; 24 | 25 | 26 | (*Warm-Up*) 27 | VerificationTest[ 28 | Print@With[{size = 1000}, 29 | x = RandomReal[1, {size, size}]; 30 | layer = NetInitialize@LinearLayer[size, "Input" -> size, "Biases" -> None]; 31 | time = First@RepeatedTiming[layer[x, TargetDevice -> "GPU"]]; 32 | Quantity[size^2 * (2 * size \[Minus] 1) / time, "FLOPS"] 33 | ];, 34 | Null, TestID -> "GPU Warm-Up" 35 | ]; 36 | 37 | 38 | (*Evaluation*) 39 | VerificationTest[Head[model], NetChain, TestID -> "Loading Model"]; 40 | VerificationTest[Head[data], List, TestID -> "Loading Data"]; 41 | VerificationTest[Head[cm], ClassifierMeasurementsObject, TestID -> "Benchmark Test"]; 42 | VerificationTest[Head[dump], List, TestID -> "Result Dump"]; 43 | VerificationTest[Head[analyze], Association, TestID -> "Analyzing"]; 44 | 45 | EndTestSection[]; -------------------------------------------------------------------------------- /ImageRecognition/Classifation/LeNet on MNIST/LeNet trained on MNIST.mt: -------------------------------------------------------------------------------- 1 | BeginTestSection["ClassificationBenchmark"]; 2 | 3 | 4 | (*Dependency Check*) 5 | VerificationTest[ 6 | << MachineLearning`; 7 | << NeuralNetworks`; 8 | << MXNetLink`; 9 | << DeepMath`;, 10 | Null, TestID -> "Dependency Check" 11 | ]; 12 | 13 | 14 | (*Pre-define*) 15 | VerificationTest[ 16 | netName = "LeNet trained on MNIST"; 17 | testName = "LeNet Tested on MNIST TestSet"; 18 | model := model = NetModel["LeNet Trained on MNIST Data"]; 19 | data := data = ResourceData[ResourceObject["MNIST"], "TestData"]; 20 | cm := cm = ClassificationBenchmark[model, data]; 21 | dump := dump = DumpSave[".cache.mx", cm]; 22 | analyze := analyze = ClassificationBenchmark[cm, "LeNet trained on MNIST"];, 23 | Null, TestID -> "Pre-define" 24 | ]; 25 | 26 | 27 | (*Warm-Up*) 28 | VerificationTest[ 29 | Print@With[{size = 1000}, 30 | x = RandomReal[1, {size, size}]; 31 | layer = NetInitialize@LinearLayer[size, "Input" -> size, "Biases" -> None]; 32 | time = First@RepeatedTiming[layer[x, TargetDevice -> "GPU"]]; 33 | Quantity[size^2 * (2 * size \[Minus] 1) / time, "FLOPS"] 34 | ];, 35 | Null, TestID -> "GPU Warm-Up" 36 | ]; 37 | 38 | 39 | (*Evaluation*) 40 | VerificationTest[Head[model], NetChain, TestID -> "Loading Model"]; 41 | VerificationTest[Head[data], List, TestID -> "Loading Data"]; 42 | VerificationTest[Head[cm], ClassifierMeasurementsObject, TestID -> "Benchmark Test"]; 43 | VerificationTest[Head[dump], List, TestID -> "Result Dump"]; 44 | VerificationTest[Head[analyze], Association, TestID -> "Analyzing"]; 45 | 46 | 47 | EndTestSection[]; 48 | -------------------------------------------------------------------------------- /ImageGeneration/Inpainting/EdgeConnect on CelebA/debugger.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import torch 3 | import wolframclient.serializers as wxf 4 | 5 | from src.networks import EdgeGenerator 6 | 7 | forward_dict = {} 8 | 9 | 10 | def make_hook(name): 11 | def hook(m, input, output): 12 | forward_dict[name + '_in'] = input[0].detach().numpy() 13 | forward_dict[name + '_out'] = output[0].detach().numpy() 14 | return hook 15 | 16 | 17 | dataset = 'celeba' 18 | data = torch.load('checkpoints/' + dataset + '/EdgeModel_gen.pth', map_location='cpu') 19 | generator = EdgeGenerator().cpu().eval() 20 | generator.load_state_dict(data['generator']) 21 | 22 | # 垃圾 torch 不接受 img[:,:,::-1] 或 img[...::-1] 的写法 23 | img = torch.Tensor(cv2.imread('examples/celeba/images/celeba_01.png')) 24 | img = img[:, :, [2, 1, 0]].unsqueeze(0).permute(0, 3, 1, 2).cpu() 25 | 26 | layers = list(generator.named_modules()) 27 | 28 | generator.encoder[0].register_forward_hook(make_hook('encoder.0')) 29 | generator.encoder[1].register_forward_hook(make_hook('encoder.1')) 30 | generator.encoder[2].register_forward_hook(make_hook('encoder.2')) 31 | generator.encoder[3].register_forward_hook(make_hook('encoder.3')) 32 | generator.middle.register_forward_hook(make_hook('middle.0')) 33 | generator.decoder.register_forward_hook(make_hook('decoder.0')) 34 | generator.decoder[-1].register_forward_hook(make_hook('decoder.7')) # output 35 | out = generator(img) 36 | 37 | # npy = {key: value.detach().numpy() for key, value in inter_feature.items()} 38 | wxf.export(forward_dict, 'debug.wxf', target_format='wxf') 39 | wxf.export(out.detach().numpy(), 'out.wxf', target_format='wxf') 40 | -------------------------------------------------------------------------------- /ImageRecognition/Classifation/AlexNet on ImageNet/debugger.py: -------------------------------------------------------------------------------- 1 | from argparse import Namespace 2 | 3 | import wolframclient.serializers as wxf 4 | from gluoncv.data.transforms.presets.imagenet import transform_eval 5 | from gluoncv.model_zoo import get_model 6 | from mxnet import image, symbol, gluon 7 | 8 | params = Namespace( 9 | model='alexnet', 10 | input_pic='./ILSVRC2012_val_00000001.png', 11 | debug_nodes=[ 12 | "alexnet0_conv0_fwd_output", 13 | "alexnet0_conv1_fwd_output", 14 | "alexnet0_conv2_fwd_output", 15 | "alexnet0_conv3_fwd_output", 16 | "alexnet0_conv4_fwd_output", 17 | "alexnet0_dense0_fwd_output", 18 | "alexnet0_dense1_fwd_output", 19 | "alexnet0_dense2_fwd_output", 20 | "alexnet0_dropout0_fwd_output", 21 | "alexnet0_dropout1_fwd_output", 22 | "alexnet0_flatten0_flatten0_output", 23 | "alexnet0_pool0_fwd_output", 24 | "alexnet0_pool1_fwd_output", 25 | "alexnet0_pool2_fwd_output" 26 | ] 27 | ) 28 | 29 | # 预处理模型以及图片 30 | net = get_model(params.model, pretrained=True) 31 | img = image.imread(params.input_pic) 32 | img = transform_eval(img, resize_short=224, crop_size=224) 33 | wxf.export(img.asnumpy(), 'input.wxf', target_format='wxf') 34 | 35 | # 列出可选的输出节点 36 | nodes = net(symbol.var('flow')).get_internals().list_outputs() 37 | wxf.export(nodes, 'nodes.wxf', target_format='wxf') 38 | 39 | 40 | def debug_net(net): 41 | data = symbol.var('flow') 42 | internals = net(data).get_internals() 43 | hooks = [internals[i] for i in params.debug_nodes] 44 | new = gluon.SymbolBlock(hooks, data, params=net.collect_params()) 45 | return new 46 | 47 | 48 | debug = debug_net(net) 49 | ndarray = [i.asnumpy() for i in debug(img)] 50 | wxf.export(ndarray, 'debug.wxf', target_format='wxf') 51 | -------------------------------------------------------------------------------- /ImageRecognition/Classifation/VGG on ImageNet/imagenet_vgg13.m: -------------------------------------------------------------------------------- 1 | (* ::Package:: *) 2 | 3 | SetDirectory@NotebookDirectory[]; 4 | Needs["MXNetLink`"] 5 | Needs["NeuralNetworks`"] 6 | DateString[] 7 | 8 | 9 | (* ::Subitem:: *) 10 | (*Fri 19 Oct 2018 16:27:54*) 11 | 12 | 13 | (* ::Subchapter:: *) 14 | (*Import Weights*) 15 | 16 | 17 | params = NDArrayImport["imagenet_vgg13-0000.params"]; 18 | 19 | 20 | (* ::Subchapter:: *) 21 | (*Encoder & Decoder*) 22 | 23 | 24 | mShift = {0.485, 0.456, 0.406}; 25 | vShift = {0.229, 0.224, 0.225}^2; 26 | encoder = NetEncoder[{"Image", 224, "MeanImage" -> mShift, "VarianceImage" -> vShift}] 27 | decoder = NetExtract[NetModel["ResNet-50 Trained on ImageNet Competition Data"], "Output"] 28 | 29 | 30 | (* ::Subchapter:: *) 31 | (*Pre-defined Structure*) 32 | 33 | 34 | getCN[i_] := ConvolutionLayer[ 35 | "Weights" -> params["arg:vgg1_conv" <> i <> "_weight"], 36 | "Biases" -> params["arg:vgg1_conv" <> i <> "_bias"], 37 | "PaddingSize" -> 1, "Stride" -> 1 38 | ] 39 | getFC[i_, n_] := LinearLayer[n, 40 | "Weights" -> params["arg:vgg1_dense" <> i <> "_weight"], 41 | "Biases" -> params["arg:vgg1_dense" <> i <> "_bias"] 42 | ] 43 | getBlock[i_, j_] := NetChain@Flatten@Table[{getCN[ToString@n], Ramp}, {n, i, j}] 44 | getBlock2[i_, j_] := NetChain@{getFC[ToString@i, j], Ramp, DropoutLayer[0.5]} 45 | pool = PoolingLayer[{2, 2}, "Stride" -> 2] 46 | 47 | 48 | (* ::Subchapter:: *) 49 | (*Main*) 50 | 51 | 52 | mainNet = NetChain[{ 53 | getBlock[0, 1], pool, 54 | getBlock[2, 3], pool, 55 | getBlock[4, 5], pool, 56 | getBlock[6, 7], pool, 57 | getBlock[8, 9], pool, 58 | getBlock2[0, 4096], 59 | getBlock2[1, 4096], 60 | getFC["2", 1000] 61 | }, 62 | "Input" -> encoder, "Output" -> decoder 63 | ] 64 | 65 | 66 | (* ::Subchapter:: *) 67 | (*Export Model*) 68 | 69 | 70 | Export["imagenet_vgg13.WXF", mainNet] 71 | -------------------------------------------------------------------------------- /ImageRecognition/Classifation/VGG on ImageNet/imagenet_vgg16.m: -------------------------------------------------------------------------------- 1 | (* ::Package:: *) 2 | 3 | SetDirectory@NotebookDirectory[]; 4 | Needs["MXNetLink`"] 5 | Needs["NeuralNetworks`"] 6 | DateString[] 7 | 8 | 9 | (* ::Subitem:: *) 10 | (*Fri 19 Oct 2018 16:25:44*) 11 | 12 | 13 | (* ::Subchapter:: *) 14 | (*Import Weights*) 15 | 16 | 17 | params = NDArrayImport["imagenet_vgg16-0000.params"]; 18 | 19 | 20 | (* ::Subchapter:: *) 21 | (*Encoder & Decoder*) 22 | 23 | 24 | mShift = {0.485, 0.456, 0.406}; 25 | vShift = {0.229, 0.224, 0.225}^2; 26 | encoder = NetEncoder[{"Image", 224, "MeanImage" -> mShift, "VarianceImage" -> vShift}] 27 | decoder = NetExtract[NetModel["ResNet-50 Trained on ImageNet Competition Data"], "Output"] 28 | 29 | 30 | (* ::Subchapter:: *) 31 | (*Pre-defined Structure*) 32 | 33 | 34 | getCN[i_] := ConvolutionLayer[ 35 | "Weights" -> params["arg:vgg2_conv" <> i <> "_weight"], 36 | "Biases" -> params["arg:vgg2_conv" <> i <> "_bias"], 37 | "PaddingSize" -> 1, "Stride" -> 1 38 | ] 39 | getFC[i_, n_] := LinearLayer[n, 40 | "Weights" -> params["arg:vgg2_dense" <> i <> "_weight"], 41 | "Biases" -> params["arg:vgg2_dense" <> i <> "_bias"] 42 | ] 43 | getBlock[i_, j_] := NetChain@Flatten@Table[{getCN[ToString@n], Ramp}, {n, i, j}] 44 | getBlock2[i_, j_] := NetChain@{getFC[ToString@i, j], Ramp, DropoutLayer[0.5]} 45 | pool = PoolingLayer[{2, 2}, "Stride" -> 2] 46 | 47 | 48 | (* ::Subchapter:: *) 49 | (*Main*) 50 | 51 | 52 | mainNet = NetChain[{ 53 | getBlock[0, 1], pool, 54 | getBlock[2, 3], pool, 55 | getBlock[4, 6], pool, 56 | getBlock[7, 9], pool, 57 | getBlock[10, 12], pool, 58 | getBlock2[0, 4096], 59 | getBlock2[1, 4096], 60 | getFC["2", 1000] 61 | }, 62 | "Input" -> encoder, "Output" -> decoder 63 | ] 64 | 65 | 66 | (* ::Subchapter:: *) 67 | (*Export Model*) 68 | 69 | 70 | Export["imagenet_vgg16.WXF", mainNet] 71 | -------------------------------------------------------------------------------- /ImageEnhancement/SuperResolution/SRResNet on CommonSR/SRResNet2x trained on CommonSR.m: -------------------------------------------------------------------------------- 1 | (* ::Package:: *) 2 | 3 | SetDirectory@NotebookDirectory[]; 4 | Needs["MXNetLink`"] 5 | Needs["NeuralNetworks`"] 6 | DateString[] 7 | 8 | 9 | (* ::Subitem:: *) 10 | (*Sat 27 Oct 2018 15:08:10*) 11 | 12 | 13 | (* ::Subchapter:: *) 14 | (*Import Weights*) 15 | 16 | 17 | params = NDArrayImport["SRResNet_bicx2_in3nf64nb16-0000.params"]; 18 | 19 | 20 | (* ::Subchapter:: *) 21 | (*Encoder & Decoder*) 22 | 23 | 24 | encoder = NetEncoder[{"Image", {640, 360}}] 25 | decoder = NetDecoder["Image"] 26 | 27 | 28 | (* ::Subchapter::Closed:: *) 29 | (*Pre-defined Structure*) 30 | 31 | 32 | getCN[i_, s_, p_] := ConvolutionLayer[ 33 | "Weights" -> params["arg:learned_" <> ToString[i]], 34 | "Biases" -> params["arg:learned_" <> ToString[i + 1]], 35 | "PaddingSize" -> p, "Stride" -> s 36 | ] 37 | getBlock[i_] := NetGraph[{ 38 | getCN[i, 1, 1], 39 | ElementwiseLayer["ReLU"], 40 | getCN[i + 2, 1, 1], 41 | ThreadingLayer[Plus] 42 | }, { 43 | NetPort["Input"] -> 1 -> 2 -> 3, 44 | {NetPort["Input"], 3} -> 4 45 | }]; 46 | $head = getCN[0, 1, 1]; 47 | $body = NetGraph[{ 48 | NetChain@Table[getBlock[i], {i, 2, 64, 4}], 49 | getCN[66, 1, 1], 50 | ThreadingLayer[Plus] 51 | }, { 52 | NetPort["Input"] -> 1 -> 2 , 53 | {NetPort["Input"], 2} -> 3 54 | }]; 55 | $tail = NetChain@{ 56 | getCN[68, 1, 1], 57 | PixelShuffleLayer[2], 58 | ElementwiseLayer["ReLU"], 59 | getCN[70, 1, 1], 60 | ElementwiseLayer["ReLU"], 61 | getCN[72, 1, 1] 62 | }; 63 | 64 | 65 | (* ::Subchapter:: *) 66 | (*Main*) 67 | 68 | 69 | mainNet = NetChain[{$head, $body, $tail}] // NetFlatten; 70 | mainNet = NetReplacePart[mainNet, {"Input" -> encoder, "Output" -> decoder}] 71 | 72 | 73 | (* ::Subchapter:: *) 74 | (*Export Model*) 75 | 76 | 77 | Export["SRResNet2x trained on CommonSR.WXF", mainNet] 78 | -------------------------------------------------------------------------------- /ImageEnhancement/SuperResolution/SRResNet on CommonSR/SRResNet3x trained on CommonSR.m: -------------------------------------------------------------------------------- 1 | (* ::Package:: *) 2 | 3 | SetDirectory@NotebookDirectory[]; 4 | Needs["MXNetLink`"] 5 | Needs["NeuralNetworks`"] 6 | DateString[] 7 | 8 | 9 | (* ::Subitem:: *) 10 | (*Sat 27 Oct 2018 15:12:54*) 11 | 12 | 13 | (* ::Subchapter:: *) 14 | (*Import Weights*) 15 | 16 | 17 | params = NDArrayImport["SRResNet_bicx3_in3nf64nb16-0000.params"]; 18 | 19 | 20 | (* ::Subchapter:: *) 21 | (*Encoder & Decoder*) 22 | 23 | 24 | encoder = NetEncoder[{"Image", {640, 360}}] 25 | decoder = NetDecoder["Image"] 26 | 27 | 28 | (* ::Subchapter::Closed:: *) 29 | (*Pre-defined Structure*) 30 | 31 | 32 | getCN[i_, s_, p_] := ConvolutionLayer[ 33 | "Weights" -> params["arg:learned_" <> ToString[i]], 34 | "Biases" -> params["arg:learned_" <> ToString[i + 1]], 35 | "PaddingSize" -> p, "Stride" -> s 36 | ] 37 | getBlock[i_] := NetGraph[{ 38 | getCN[i, 1, 1], 39 | ElementwiseLayer["ReLU"], 40 | getCN[i + 2, 1, 1], 41 | ThreadingLayer[Plus] 42 | }, { 43 | NetPort["Input"] -> 1 -> 2 -> 3, 44 | {NetPort["Input"], 3} -> 4 45 | }]; 46 | $head = getCN[0, 1, 1]; 47 | $body = NetGraph[{ 48 | NetChain@Table[getBlock[i], {i, 2, 64, 4}], 49 | getCN[66, 1, 1], 50 | ThreadingLayer[Plus] 51 | }, { 52 | NetPort["Input"] -> 1 -> 2 , 53 | {NetPort["Input"], 2} -> 3 54 | }]; 55 | $tail = NetChain@{ 56 | getCN[68, 1, 1], 57 | PixelShuffleLayer[3], 58 | ElementwiseLayer["ReLU"], 59 | getCN[70, 1, 1], 60 | ElementwiseLayer["ReLU"], 61 | getCN[72, 1, 1] 62 | }; 63 | 64 | 65 | (* ::Subchapter:: *) 66 | (*Main*) 67 | 68 | 69 | mainNet = NetChain[{$head, $body, $tail}] // NetFlatten; 70 | mainNet = NetReplacePart[mainNet, {"Input" -> encoder, "Output" -> decoder}] 71 | 72 | 73 | (* ::Subchapter:: *) 74 | (*Export Model*) 75 | 76 | 77 | Export["SRResNet3x trained on CommonSR.WXF", mainNet] 78 | -------------------------------------------------------------------------------- /ImageRecognition/Classifation/VGG on ImageNet/imagenet_vgg19.m: -------------------------------------------------------------------------------- 1 | (* ::Package:: *) 2 | 3 | SetDirectory@NotebookDirectory[]; 4 | Needs["MXNetLink`"] 5 | Needs["NeuralNetworks`"] 6 | DateString[] 7 | 8 | 9 | (* ::Subitem:: *) 10 | (*Fri 19 Oct 2018 16:23:22*) 11 | 12 | 13 | (* ::Subchapter:: *) 14 | (*Import Weights*) 15 | 16 | 17 | params = NDArrayImport["imagenet_vgg19-0000.params"]; 18 | 19 | 20 | (* ::Subchapter:: *) 21 | (*Encoder & Decoder*) 22 | 23 | 24 | mShift = {0.485, 0.456, 0.406}; 25 | vShift = {0.229, 0.224, 0.225}^2; 26 | encoder = NetEncoder[{"Image", 224, "MeanImage" -> mShift, "VarianceImage" -> vShift}] 27 | decoder = NetExtract[NetModel["ResNet-50 Trained on ImageNet Competition Data"], "Output"] 28 | 29 | 30 | (* ::Subchapter:: *) 31 | (*Pre-defined Structure*) 32 | 33 | 34 | getCN[i_] := ConvolutionLayer[ 35 | "Weights" -> params["arg:vgg3_conv" <> i <> "_weight"], 36 | "Biases" -> params["arg:vgg3_conv" <> i <> "_bias"], 37 | "PaddingSize" -> 1, "Stride" -> 1 38 | ] 39 | getFC[i_, n_] := LinearLayer[n, 40 | "Weights" -> params["arg:vgg3_dense" <> i <> "_weight"], 41 | "Biases" -> params["arg:vgg3_dense" <> i <> "_bias"] 42 | ] 43 | getBlock[i_, j_] := NetChain@Flatten@Table[{getCN[ToString@n], Ramp}, {n, i, j}] 44 | getBlock2[i_, j_] := NetChain@{getFC[ToString@i, j], Ramp, DropoutLayer[0.5]} 45 | pool = PoolingLayer[{2, 2}, "Stride" -> 2] 46 | 47 | 48 | (* ::Subchapter:: *) 49 | (*Main*) 50 | 51 | 52 | mainNet = NetChain[{ 53 | getBlock[0, 1], pool, 54 | getBlock[2, 3], pool, 55 | getBlock[4, 7], pool, 56 | getBlock[8, 11], pool, 57 | getBlock[12, 15], pool, 58 | getBlock2[0, 4096], 59 | getBlock2[1, 4096], 60 | getFC["2", 1000] 61 | }, 62 | "Input" -> encoder, "Output" -> decoder 63 | ] 64 | 65 | 66 | (* ::Subchapter:: *) 67 | (*Export Model*) 68 | 69 | 70 | Export["imagenet_vgg19.WXF", mainNet] 71 | -------------------------------------------------------------------------------- /ImageRecognition/Classifation/VGG on ImageNet/imagenet_vgg11.m: -------------------------------------------------------------------------------- 1 | (* ::Package:: *) 2 | 3 | SetDirectory@NotebookDirectory[]; 4 | Needs["MXNetLink`"] 5 | Needs["NeuralNetworks`"] 6 | DateString[] 7 | 8 | 9 | (* ::Subitem:: *) 10 | (*Fri 19 Oct 2018 16:29:40*) 11 | 12 | 13 | (* ::Subchapter:: *) 14 | (*Import Weights*) 15 | 16 | 17 | params = NDArrayImport["imagenet_vgg11-0000.params"]; 18 | 19 | 20 | (* ::Subchapter:: *) 21 | (*Encoder & Decoder*) 22 | 23 | 24 | mShift = {0.485, 0.456, 0.406}; 25 | vShift = {0.229, 0.224, 0.225}^2; 26 | encoder = NetEncoder[{"Image", 224, "MeanImage" -> mShift, "VarianceImage" -> vShift}] 27 | decoder = NetExtract[NetModel["ResNet-50 Trained on ImageNet Competition Data"], "Output"] 28 | 29 | 30 | (* ::Subchapter:: *) 31 | (*Pre-defined Structure*) 32 | 33 | 34 | getCN[i_] := ConvolutionLayer[ 35 | "Weights" -> params["arg:vgg0_conv" <> i <> "_weight"], 36 | "Biases" -> params["arg:vgg0_conv" <> i <> "_bias"], 37 | "PaddingSize" -> 1, "Stride" -> 1 38 | ] 39 | getFC[i_, n_] := LinearLayer[n, 40 | "Weights" -> params["arg:vgg0_dense" <> i <> "_weight"], 41 | "Biases" -> params["arg:vgg0_dense" <> i <> "_bias"] 42 | ] 43 | pool := PoolingLayer[{2, 2}, "Stride" -> 2] 44 | getBlock[i_, j_] := NetChain@Flatten[{ 45 | Table[{getCN[ToString@n], Ramp}, {n, i, j}], 46 | pool 47 | }] 48 | 49 | getBlock2[i_, j_] := NetChain@{getFC[ToString@i, j], Ramp, DropoutLayer[0.5]} 50 | 51 | 52 | 53 | (* ::Subchapter:: *) 54 | (*Main*) 55 | 56 | 57 | mainNet = NetChain[{ 58 | getBlock[0, 0], 59 | getBlock[1, 1], 60 | getBlock[2, 3], 61 | getBlock[4, 5], 62 | getBlock[6, 7], 63 | getBlock2[0, 4096], 64 | getBlock2[1, 4096], 65 | {getFC["2", 1000], SoftmaxLayer[]} 66 | }, 67 | "Input" -> encoder, "Output" -> decoder 68 | ] 69 | 70 | 71 | (* ::Subchapter:: *) 72 | (*Export Model*) 73 | 74 | 75 | Export["imagenet_vgg11.WXF", mainNet] 76 | -------------------------------------------------------------------------------- /ImageEnhancement/SuperResolution/SRResNet on CommonSR/SRResNet4x trained on CommonSR.m: -------------------------------------------------------------------------------- 1 | (* ::Package:: *) 2 | 3 | SetDirectory@NotebookDirectory[]; 4 | Needs["MXNetLink`"] 5 | Needs["NeuralNetworks`"] 6 | DateString[] 7 | 8 | 9 | (* ::Subitem:: *) 10 | (*Sat 27 Oct 2018 15:02:51*) 11 | 12 | 13 | (* ::Subchapter:: *) 14 | (*Import Weights*) 15 | 16 | 17 | params = NDArrayImport["SRResNet_bicx4_in3nf64nb16-0000.params"]; 18 | 19 | 20 | (* ::Subchapter:: *) 21 | (*Encoder & Decoder*) 22 | 23 | 24 | encoder = NetEncoder[{"Image", {640, 360}}] 25 | decoder = NetDecoder["Image"] 26 | 27 | 28 | (* ::Subchapter::Closed:: *) 29 | (*Pre-defined Structure*) 30 | 31 | 32 | getCN[i_, s_, p_] := ConvolutionLayer[ 33 | "Weights" -> params["arg:learned_" <> ToString[i]], 34 | "Biases" -> params["arg:learned_" <> ToString[i + 1]], 35 | "PaddingSize" -> p, "Stride" -> s 36 | ] 37 | getBlock[i_] := NetGraph[{ 38 | getCN[i, 1, 1], 39 | ElementwiseLayer["ReLU"], 40 | getCN[i + 2, 1, 1], 41 | ThreadingLayer[Plus] 42 | }, { 43 | NetPort["Input"] -> 1 -> 2 -> 3, 44 | {NetPort["Input"], 3} -> 4 45 | }]; 46 | $head = getCN[0, 1, 1]; 47 | $body = NetGraph[{ 48 | NetChain@Table[getBlock[i], {i, 2, 64, 4}], 49 | getCN[66, 1, 1], 50 | ThreadingLayer[Plus] 51 | }, { 52 | NetPort["Input"] -> 1 -> 2 , 53 | {NetPort["Input"], 2} -> 3 54 | }]; 55 | $tail = NetChain@{ 56 | getCN[68, 1, 1], 57 | PixelShuffleLayer[2], 58 | ElementwiseLayer["ReLU"], 59 | getCN[70, 1, 1], 60 | PixelShuffleLayer[2], 61 | ElementwiseLayer["ReLU"], 62 | getCN[72, 1, 1], 63 | ElementwiseLayer["ReLU"], 64 | getCN[74, 1, 1] 65 | }; 66 | 67 | 68 | (* ::Subchapter:: *) 69 | (*Main*) 70 | 71 | 72 | mainNet = NetChain[{$head, $body, $tail}] // NetFlatten; 73 | mainNet = NetReplacePart[mainNet, {"Input" -> encoder, "Output" -> decoder}] 74 | 75 | 76 | (* ::Subchapter:: *) 77 | (*Export Model*) 78 | 79 | 80 | Export["SRResNet4x trained on CommonSR.WXF", mainNet] 81 | -------------------------------------------------------------------------------- /Readme.md: -------------------------------------------------------------------------------- 1 | # NeuralNetworks-Zoo 2 | 3 | ![NeuralNetworks](https://img.shields.io/badge/NeuralNetworks-11.3.5-orange.svg) 4 | ![Release Vision](https://img.shields.io/badge/Release-v0.3.x-ff69b4.svg) 5 | ![Models](https://img.shields.io/badge/Models-42-brightgreen.svg) 6 | ![Repo Size](https://img.shields.io/github/repo-size/GalAster/WLNet-ModelZoo.svg) 7 | 8 | 9 | 10 | ## Formats 11 | 12 | You can download these awesome models in https://m.vers.site/NetModel/ 13 | 14 | There exist the following format: 15 | 16 | - `*.WLNet` format 17 | 18 | Standard Wolfram Neural Networks model, support for version upgrade sequences 19 | 20 | Can be exported directly to ONNX format 21 | 22 | - `*.WXF` format 23 | 24 | If the model is more complicated, then the official function is a bit stretched. 25 | 26 | This is the extended model using [DeepMath](https://github.com/Moe-Net/DeepMathFantasy), you must install `DeepMath` then you can use them normally. 27 | 28 | ```Mathematica 29 | PacletInstall@"https://github.com/Moe-Net/DeepMathFantasy/releases/download/v0.1.0/DeepMath-0.1.0.paclet"; 30 | << DeepMath`; 31 | DeepMath`Tools`LayersRegister[]; 32 | ``` 33 | 34 | This situation can also be exported directly to ONNX format. 35 | 36 | But I don't offer a promise of backward compatibility 37 | 38 | - `*.APP` format 39 | 40 | If the model consists of multiple parts and uses complex operations, then this format is used 41 | 42 | Unfortunately, this case cannot be converted to ONNX format. 43 | 44 | ## Request 45 | 46 | If you really like a fantasy model, but this model doesn't have the ONNX format, then you can make a `Request` on the issue page. 47 | 48 | I will try my best to convert that model. 49 | 50 | 51 | ## Contribution 52 | 53 | **All Pull Requests are welcome!** 54 | 55 | You can add readme, introduce interesting usage examples, build unit tests, fix wrong references, etc. 56 | 57 | **Never upload images!** 58 | 59 | You can use an external link URL like https://sm.ms/ in markdown -------------------------------------------------------------------------------- /ImageEnhancement/SuperResolution/SRResNet on CommonSR/SRResNet8x trained on CommonSR.m: -------------------------------------------------------------------------------- 1 | (* ::Package:: *) 2 | 3 | SetDirectory@NotebookDirectory[]; 4 | Needs["MXNetLink`"] 5 | Needs["NeuralNetworks`"] 6 | DateString[] 7 | 8 | 9 | (* ::Subitem:: *) 10 | (*Sat 27 Oct 2018 15:01:16*) 11 | 12 | 13 | (* ::Subchapter:: *) 14 | (*Import Weights*) 15 | 16 | 17 | params = NDArrayImport["SRResNet_bicx8_in3nf64nb16-0000.params"]; 18 | 19 | 20 | (* ::Subchapter:: *) 21 | (*Encoder & Decoder*) 22 | 23 | 24 | encoder = NetEncoder[{"Image", {640, 360}}] 25 | decoder = NetDecoder["Image"] 26 | 27 | 28 | (* ::Subchapter::Closed:: *) 29 | (*Pre-defined Structure*) 30 | 31 | 32 | getCN[i_, s_, p_] := ConvolutionLayer[ 33 | "Weights" -> params["arg:learned_" <> ToString[i]], 34 | "Biases" -> params["arg:learned_" <> ToString[i + 1]], 35 | "PaddingSize" -> p, "Stride" -> s 36 | ] 37 | getBlock[i_] := NetGraph[{ 38 | getCN[i, 1, 1], 39 | ElementwiseLayer["ReLU"], 40 | getCN[i + 2, 1, 1], 41 | ThreadingLayer[Plus] 42 | }, { 43 | NetPort["Input"] -> 1 -> 2 -> 3, 44 | {NetPort["Input"], 3} -> 4 45 | }]; 46 | $head = getCN[0, 1, 1]; 47 | $body = NetGraph[{ 48 | NetChain@Table[getBlock[i], {i, 2, 64, 4}], 49 | getCN[66, 1, 1], 50 | ThreadingLayer[Plus] 51 | }, { 52 | NetPort["Input"] -> 1 -> 2 , 53 | {NetPort["Input"], 2} -> 3 54 | }]; 55 | $tail = NetChain@{ 56 | getCN[68, 1, 1], 57 | PixelShuffleLayer[2], 58 | ElementwiseLayer["ReLU"], 59 | getCN[70, 1, 1], 60 | PixelShuffleLayer[2], 61 | ElementwiseLayer["ReLU"], 62 | getCN[72, 1, 1], 63 | PixelShuffleLayer[2], 64 | ElementwiseLayer["ReLU"], 65 | getCN[74, 1, 1], 66 | ElementwiseLayer["ReLU"], 67 | getCN[76, 1, 1] 68 | }; 69 | 70 | 71 | (* ::Subchapter:: *) 72 | (*Main*) 73 | 74 | 75 | mainNet = NetChain[{$head, $body, $tail}] // NetFlatten; 76 | mainNet = NetReplacePart[mainNet, {"Input" -> encoder, "Output" -> decoder}] 77 | 78 | 79 | (* ::Subchapter:: *) 80 | (*Export Model*) 81 | 82 | 83 | Export["SRResNet8x trained on CommonSR.WXF", mainNet] 84 | -------------------------------------------------------------------------------- /ImageRecognition/Classifation/AlexNet on ImageNet/AlexNet trained on ImageNet.m: -------------------------------------------------------------------------------- 1 | (* ::Package:: *) 2 | 3 | SetDirectory@NotebookDirectory[]; 4 | Needs["MXNetLink`"] 5 | Needs["NeuralNetworks`"] 6 | DateString[] 7 | 8 | 9 | (* ::Subitem:: *) 10 | (*Thu 18 Oct 2018 19:08:29*) 11 | 12 | 13 | (* ::Subchapter:: *) 14 | (*Import Weights*) 15 | 16 | 17 | params = NDArrayImport["imagenet_alexnet-0000.params"]; 18 | 19 | 20 | (* ::Subchapter:: *) 21 | (*Encoder & Decoder*) 22 | 23 | 24 | mShift = {0.485, 0.456, 0.406}; 25 | vShift = {0.229, 0.224, 0.225}^2; 26 | encoder = NetEncoder[{"Image", 224, "MeanImage" -> mShift, "VarianceImage" -> vShift}] 27 | decoder = NetExtract[NetModel["ResNet-50 Trained on ImageNet Competition Data"], "Output"] 28 | 29 | 30 | (* ::Subchapter:: *) 31 | (*Pre-defined Structure*) 32 | 33 | 34 | getCN[i_, p_, s_] := ConvolutionLayer[ 35 | "Weights" -> params["arg:alexnet0_conv" <> i <> "_weight"], 36 | "Biases" -> params["arg:alexnet0_conv" <> i <> "_bias"], 37 | "PaddingSize" -> p, "Stride" -> s 38 | ] 39 | getFC[i_, n_] := LinearLayer[n, 40 | "Weights" -> params["arg:alexnet0_dense" <> i <> "_weight"], 41 | "Biases" -> params["arg:alexnet0_dense" <> i <> "_bias"] 42 | ] 43 | 44 | 45 | (* ::Subchapter:: *) 46 | (*Main*) 47 | 48 | 49 | mainNet = NetChain[{ 50 | getCN["0", 2, 4], 51 | ElementwiseLayer["ReLU"], 52 | PoolingLayer[{3, 3}, "Stride" -> 2], 53 | getCN["1", 2, 1], 54 | ElementwiseLayer["ReLU"], 55 | PoolingLayer[{3, 3}, "Stride" -> 2], 56 | getCN["2", 1, 1], 57 | ElementwiseLayer["ReLU"], 58 | getCN["3", 1, 1], 59 | ElementwiseLayer["ReLU"], 60 | getCN["4", 1, 1], 61 | ElementwiseLayer["ReLU"], 62 | PoolingLayer[{3, 3}, "Stride" -> 2], 63 | FlattenLayer[], 64 | getFC["0", 4096] 65 | , ElementwiseLayer["ReLU"], 66 | DropoutLayer[0.5], 67 | getFC["1", 4096], 68 | ElementwiseLayer["ReLU"], 69 | DropoutLayer[0.5], 70 | getFC["2", 1000], 71 | SoftmaxLayer[] 72 | }, 73 | "Input" -> encoder, 74 | "Output" -> decoder 75 | ] 76 | 77 | 78 | (* ::Subchapter:: *) 79 | (*Export Model*) 80 | 81 | 82 | Export["AlexNet trained on ImageNet.WXF", mainNet] 83 | -------------------------------------------------------------------------------- /ImageRecognition/Classifation/SqueezeNet on ImageNet/SqueezeNet1.0 trained on ImageNet.m: -------------------------------------------------------------------------------- 1 | (* ::Package:: *) 2 | 3 | SetDirectory@NotebookDirectory[]; 4 | Needs["MXNetLink`"] 5 | Needs["NeuralNetworks`"] 6 | DateString[] 7 | 8 | 9 | (* ::Subitem:: *) 10 | (*Fri 26 Oct 2018 17:28:43*) 11 | 12 | 13 | (* ::Subchapter:: *) 14 | (*Import Weights*) 15 | 16 | 17 | params = NDArrayImport["squeezenet1.0-0000.params"]; 18 | 19 | 20 | (* ::Subchapter:: *) 21 | (*Encoder & Decoder*) 22 | 23 | 24 | mShift = {0.485, 0.456, 0.406}; 25 | vShift = {0.229, 0.224, 0.225}^2; 26 | encoder = NetEncoder[{"Image", 227, "MeanImage" -> mShift, "VarianceImage" -> vShift}] 27 | decoder = NetExtract[NetModel["ResNet-50 Trained on ImageNet Competition Data"], "Output"] 28 | 29 | 30 | (* ::Subchapter::Closed:: *) 31 | (*Pre-defined Structure*) 32 | 33 | 34 | getCN[i_, p_, s_] := ConvolutionLayer[ 35 | "Weights" -> params["arg:squeezenet0_conv" <> ToString[ i ] <> "_weight"], 36 | "Biases" -> params["arg:squeezenet0_conv" <> ToString[i] <> "_bias"], 37 | "PaddingSize" -> p, "Stride" -> s 38 | ] 39 | getBlock[i_] := NetGraph[{ 40 | NetGraph[{getCN[i, 0, 1], Ramp}, {1 -> 2}], 41 | NetGraph[{getCN[i + 1, 0, 1], Ramp}, {1 -> 2}], 42 | NetGraph[{getCN[i + 2, 1, 1], Ramp}, {1 -> 2}], 43 | CatenateLayer[] 44 | }, 45 | {NetPort["Input"] -> 1 -> {2, 3} -> 4} 46 | ] // NetFlatten 47 | 48 | 49 | (* ::Subchapter:: *) 50 | (*Main*) 51 | 52 | 53 | mainNet = NetChain[{ 54 | getCN[0, 0, 2], 55 | ElementwiseLayer["ReLU"], 56 | PoolingLayer[{3, 3}, "Stride" -> 2], 57 | NetChain@Table[getBlock[i], {i, 1, 7, 3}], 58 | PoolingLayer[{3, 3}, "Stride" -> 2], 59 | NetChain@Table[getBlock[i], {i, 10, 19, 3}], 60 | PoolingLayer[{3, 3}, "Stride" -> 2], 61 | NetChain@Table[getBlock[i], {i, 22, 22, 3}], 62 | DropoutLayer[0.5], 63 | getCN[25, 0, 1], 64 | ElementwiseLayer["ReLU"], 65 | PoolingLayer[{13, 13}, "Stride" -> 13, "Function" -> Mean], 66 | FlattenLayer[], 67 | SoftmaxLayer[] 68 | }, 69 | "Input" -> encoder, 70 | "Output" -> decoder 71 | ] 72 | 73 | 74 | (* ::Subchapter:: *) 75 | (*Export Model*) 76 | 77 | 78 | Export["SqueezeNet1.0 trained on ImageNet.WXF", mainNet] 79 | -------------------------------------------------------------------------------- /ImageRecognition/Classifation/SqueezeNet on ImageNet/SqueezeNet1.1 trained on ImageNet.m: -------------------------------------------------------------------------------- 1 | (* ::Package:: *) 2 | 3 | SetDirectory@NotebookDirectory[]; 4 | Needs["MXNetLink`"] 5 | Needs["NeuralNetworks`"] 6 | DateString[] 7 | 8 | 9 | (* ::Subitem:: *) 10 | (*Fri 26 Oct 2018 17:37:15*) 11 | 12 | 13 | (* ::Subchapter:: *) 14 | (*Import Weights*) 15 | 16 | 17 | params = NDArrayImport["squeezenet1.1-0000.params"]; 18 | 19 | 20 | (* ::Subchapter:: *) 21 | (*Encoder & Decoder*) 22 | 23 | 24 | mShift = {0.485, 0.456, 0.406}; 25 | vShift = {0.229, 0.224, 0.225}^2; 26 | encoder = NetEncoder[{"Image", 227, "MeanImage" -> mShift, "VarianceImage" -> vShift}] 27 | decoder = NetExtract[NetModel["ResNet-50 Trained on ImageNet Competition Data"], "Output"] 28 | 29 | 30 | (* ::Subchapter::Closed:: *) 31 | (*Pre-defined Structure*) 32 | 33 | 34 | getCN[i_, p_, s_] := ConvolutionLayer[ 35 | "Weights" -> params["arg:squeezenet1_conv" <> ToString[ i ] <> "_weight"], 36 | "Biases" -> params["arg:squeezenet1_conv" <> ToString[i] <> "_bias"], 37 | "PaddingSize" -> p, "Stride" -> s 38 | ] 39 | getBlock[i_] := NetGraph[{ 40 | NetGraph[{getCN[i, 0, 1], Ramp}, {1 -> 2}], 41 | NetGraph[{getCN[i + 1, 0, 1], Ramp}, {1 -> 2}], 42 | NetGraph[{getCN[i + 2, 1, 1], Ramp}, {1 -> 2}], 43 | CatenateLayer[] 44 | }, 45 | {NetPort["Input"] -> 1 -> {2, 3} -> 4} 46 | ] // NetFlatten 47 | 48 | 49 | (* ::Subchapter:: *) 50 | (*Main*) 51 | 52 | 53 | mainNet = NetChain[{ 54 | getCN[0, 0, 2], 55 | ElementwiseLayer["ReLU"], 56 | PoolingLayer[{3, 3}, "Stride" -> 2], 57 | NetChain@Table[getBlock[i], {i, 1, 7, 3}], 58 | PoolingLayer[{3, 3}, "Stride" -> 2], 59 | NetChain@Table[getBlock[i], {i, 10, 19, 3}], 60 | PoolingLayer[{3, 3}, "Stride" -> 2], 61 | NetChain@Table[getBlock[i], {i, 22, 22, 3}], 62 | DropoutLayer[0.5], 63 | getCN[25, 0, 1], 64 | ElementwiseLayer["ReLU"], 65 | PoolingLayer[{13, 13}, "Stride" -> 13, "Function" -> Mean], 66 | FlattenLayer[], 67 | SoftmaxLayer[] 68 | }, 69 | "Input" -> encoder, 70 | "Output" -> decoder 71 | ] 72 | 73 | 74 | (* ::Subchapter:: *) 75 | (*Export Model*) 76 | 77 | 78 | Export["SqueezeNet1.1 trained on ImageNet.WXF", mainNet] 79 | -------------------------------------------------------------------------------- /ImageRecognition/Classifation/VGG-BN on ImageNet/imagenet_vgg11_bn.m: -------------------------------------------------------------------------------- 1 | (* ::Package:: *) 2 | 3 | SetDirectory@NotebookDirectory[]; 4 | Needs["MXNetLink`"] 5 | Needs["NeuralNetworks`"] 6 | DateString[] 7 | 8 | 9 | (* ::Subitem:: *) 10 | (*Fri 19 Oct 2018 18:57:21*) 11 | 12 | 13 | (* ::Subchapter:: *) 14 | (*Import Weights*) 15 | 16 | 17 | params = NDArrayImport["imagenet_vgg11_bn-0000.params"]; 18 | 19 | 20 | (* ::Subchapter:: *) 21 | (*Encoder & Decoder*) 22 | 23 | 24 | mShift = {0.485, 0.456, 0.406}; 25 | vShift = {0.229, 0.224, 0.225}^2; 26 | encoder = NetEncoder[{"Image", 224, "MeanImage" -> mShift, "VarianceImage" -> vShift}] 27 | decoder = NetExtract[NetModel["ResNet-50 Trained on ImageNet Competition Data"], "Output"] 28 | 29 | 30 | (* ::Subchapter:: *) 31 | (*Pre-defined Structure*) 32 | 33 | 34 | getCN[i_] := ConvolutionLayer[ 35 | "Weights" -> params["arg:vgg0_conv" <> i <> "_weight"], 36 | "Biases" -> params["arg:vgg0_conv" <> i <> "_bias"], 37 | "PaddingSize" -> 1, "Stride" -> 1 38 | ]; 39 | getBN[i_] := BatchNormalizationLayer[ 40 | "Epsilon" -> 1*^-5, 41 | "Beta" -> params["arg:vgg0_batchnorm" <> i <> "_beta"], 42 | "Gamma" -> params["arg:vgg0_batchnorm" <> i <> "_gamma"], 43 | "MovingMean" -> params["aux:vgg0_batchnorm" <> i <> "_running_mean"], 44 | "MovingVariance" -> params["aux:vgg0_batchnorm" <> i <> "_running_var"] 45 | ] 46 | getFC[i_, n_] := LinearLayer[n, 47 | "Weights" -> params["arg:vgg0_dense" <> i <> "_weight"], 48 | "Biases" -> params["arg:vgg0_dense" <> i <> "_bias"] 49 | ]; 50 | getBlock[i_, j_] := NetChain@Flatten@Table[{getCN[ToString[n]], getBN[ToString[n]], Ramp}, {n, i, j}]; 51 | getBlock2[i_, j_] := NetChain@{getFC[ToString@i, j], Ramp, DropoutLayer[0.5]}; 52 | pool = PoolingLayer[{2, 2}, "Stride" -> 2]; 53 | 54 | 55 | (* ::Subchapter:: *) 56 | (*Main*) 57 | 58 | 59 | mainNet = NetChain[{ 60 | getBlock[0, 0], pool, 61 | getBlock[1, 1], pool, 62 | getBlock[2, 3], pool, 63 | getBlock[4, 5], pool, 64 | getBlock[6, 7], pool, 65 | getBlock2[0, 4096], 66 | getBlock2[1, 4096], 67 | getFC["2", 1000] 68 | }, 69 | "Input" -> encoder, "Output" -> decoder 70 | ] 71 | 72 | 73 | (* ::Subchapter:: *) 74 | (*Export Model*) 75 | 76 | 77 | Export["imagenet_vgg11_bn.WXF", mainNet] 78 | -------------------------------------------------------------------------------- /ImageRecognition/Classifation/VGG-BN on ImageNet/imagenet_vgg13_bn.m: -------------------------------------------------------------------------------- 1 | (* ::Package:: *) 2 | 3 | SetDirectory@NotebookDirectory[]; 4 | Needs["MXNetLink`"] 5 | Needs["NeuralNetworks`"] 6 | DateString[] 7 | 8 | 9 | (* ::Subitem:: *) 10 | (*Fri 19 Oct 2018 18:57:08*) 11 | 12 | 13 | (* ::Subchapter:: *) 14 | (*Import Weights*) 15 | 16 | 17 | params = NDArrayImport["imagenet_vgg13_bn-0000.params"]; 18 | 19 | 20 | (* ::Subchapter:: *) 21 | (*Encoder & Decoder*) 22 | 23 | 24 | mShift = {0.485, 0.456, 0.406}; 25 | vShift = {0.229, 0.224, 0.225}^2; 26 | encoder = NetEncoder[{"Image", 224, "MeanImage" -> mShift, "VarianceImage" -> vShift}] 27 | decoder = NetExtract[NetModel["ResNet-50 Trained on ImageNet Competition Data"], "Output"] 28 | 29 | 30 | (* ::Subchapter:: *) 31 | (*Pre-defined Structure*) 32 | 33 | 34 | getCN[i_] := ConvolutionLayer[ 35 | "Weights" -> params["arg:vgg1_conv" <> i <> "_weight"], 36 | "Biases" -> params["arg:vgg1_conv" <> i <> "_bias"], 37 | "PaddingSize" -> 1, "Stride" -> 1 38 | ]; 39 | getBN[i_] := BatchNormalizationLayer[ 40 | "Epsilon" -> 1*^-5, 41 | "Beta" -> params["arg:vgg1_batchnorm" <> i <> "_beta"], 42 | "Gamma" -> params["arg:vgg1_batchnorm" <> i <> "_gamma"], 43 | "MovingMean" -> params["aux:vgg1_batchnorm" <> i <> "_running_mean"], 44 | "MovingVariance" -> params["aux:vgg1_batchnorm" <> i <> "_running_var"] 45 | ] 46 | getFC[i_, n_] := LinearLayer[n, 47 | "Weights" -> params["arg:vgg1_dense" <> i <> "_weight"], 48 | "Biases" -> params["arg:vgg1_dense" <> i <> "_bias"] 49 | ]; 50 | getBlock[i_, j_] := NetChain@Flatten@Table[{getCN[ToString[n]], getBN[ToString[n]], Ramp}, {n, i, j}]; 51 | getBlock2[i_, j_] := NetChain@{getFC[ToString@i, j], Ramp, DropoutLayer[0.5]}; 52 | pool = PoolingLayer[{2, 2}, "Stride" -> 2]; 53 | 54 | 55 | (* ::Subchapter:: *) 56 | (*Main*) 57 | 58 | 59 | mainNet = NetChain[{ 60 | getBlock[0, 1], pool, 61 | getBlock[2, 3], pool, 62 | getBlock[4, 5], pool, 63 | getBlock[6, 7], pool, 64 | getBlock[8, 9], pool, 65 | getBlock2[0, 4096], 66 | getBlock2[1, 4096], 67 | getFC["2", 1000] 68 | }, 69 | "Input" -> encoder, "Output" -> decoder 70 | ] 71 | 72 | 73 | (* ::Subchapter:: *) 74 | (*Export Model*) 75 | 76 | 77 | Export["imagenet_vgg13_bn.WXF", mainNet] 78 | -------------------------------------------------------------------------------- /ImageRecognition/Classifation/VGG-BN on ImageNet/imagenet_vgg16_bn.m: -------------------------------------------------------------------------------- 1 | (* ::Package:: *) 2 | 3 | SetDirectory@NotebookDirectory[]; 4 | Needs["MXNetLink`"] 5 | Needs["NeuralNetworks`"] 6 | DateString[] 7 | 8 | 9 | (* ::Subitem:: *) 10 | (*Fri 19 Oct 2018 18:55:29*) 11 | 12 | 13 | (* ::Subchapter:: *) 14 | (*Import Weights*) 15 | 16 | 17 | params = NDArrayImport["imagenet_vgg16_bn-0000.params"]; 18 | 19 | 20 | (* ::Subchapter:: *) 21 | (*Encoder & Decoder*) 22 | 23 | 24 | mShift = {0.485, 0.456, 0.406}; 25 | vShift = {0.229, 0.224, 0.225}^2; 26 | encoder = NetEncoder[{"Image", 224, "MeanImage" -> mShift, "VarianceImage" -> vShift}] 27 | decoder = NetExtract[NetModel["ResNet-50 Trained on ImageNet Competition Data"], "Output"] 28 | 29 | 30 | (* ::Subchapter:: *) 31 | (*Pre-defined Structure*) 32 | 33 | 34 | getCN[i_] := ConvolutionLayer[ 35 | "Weights" -> params["arg:vgg2_conv" <> i <> "_weight"], 36 | "Biases" -> params["arg:vgg2_conv" <> i <> "_bias"], 37 | "PaddingSize" -> 1, "Stride" -> 1 38 | ]; 39 | getBN[i_] := BatchNormalizationLayer[ 40 | "Epsilon" -> 1*^-5, 41 | "Beta" -> params["arg:vgg2_batchnorm" <> i <> "_beta"], 42 | "Gamma" -> params["arg:vgg2_batchnorm" <> i <> "_gamma"], 43 | "MovingMean" -> params["aux:vgg2_batchnorm" <> i <> "_running_mean"], 44 | "MovingVariance" -> params["aux:vgg2_batchnorm" <> i <> "_running_var"] 45 | ] 46 | getFC[i_, n_] := LinearLayer[n, 47 | "Weights" -> params["arg:vgg2_dense" <> i <> "_weight"], 48 | "Biases" -> params["arg:vgg2_dense" <> i <> "_bias"] 49 | ]; 50 | getBlock[i_, j_] := NetChain@Flatten@Table[{getCN[ToString[n]], getBN[ToString[n]], Ramp}, {n, i, j}]; 51 | getBlock2[i_, j_] := NetChain@{getFC[ToString@i, j], Ramp, DropoutLayer[0.5]}; 52 | pool = PoolingLayer[{2, 2}, "Stride" -> 2]; 53 | 54 | 55 | (* ::Subchapter:: *) 56 | (*Main*) 57 | 58 | 59 | mainNet = NetChain[{ 60 | getBlock[0, 1], pool, 61 | getBlock[2, 3], pool, 62 | getBlock[4, 6], pool, 63 | getBlock[7, 9], pool, 64 | getBlock[10, 12], pool, 65 | getBlock2[0, 4096], 66 | getBlock2[1, 4096], 67 | getFC["2", 1000] 68 | }, 69 | "Input" -> encoder, "Output" -> decoder 70 | ] 71 | 72 | 73 | (* ::Subchapter:: *) 74 | (*Export Model*) 75 | 76 | 77 | Export["imagenet_vgg16_bn.WXF", mainNet] 78 | -------------------------------------------------------------------------------- /ImageRecognition/Classifation/VGG-BN on ImageNet/imagenet_vgg19_bn.m: -------------------------------------------------------------------------------- 1 | (* ::Package:: *) 2 | 3 | SetDirectory@NotebookDirectory[]; 4 | Needs["MXNetLink`"] 5 | Needs["NeuralNetworks`"] 6 | DateString[] 7 | 8 | 9 | (* ::Subitem:: *) 10 | (*Fri 19 Oct 2018 18:53:05*) 11 | 12 | 13 | (* ::Subchapter:: *) 14 | (*Import Weights*) 15 | 16 | 17 | params = NDArrayImport["imagenet_vgg19_bn-0000.params"]; 18 | 19 | 20 | (* ::Subchapter:: *) 21 | (*Encoder & Decoder*) 22 | 23 | 24 | mShift = {0.485, 0.456, 0.406}; 25 | vShift = {0.229, 0.224, 0.225}^2; 26 | encoder = NetEncoder[{"Image", 224, "MeanImage" -> mShift, "VarianceImage" -> vShift}] 27 | decoder = NetExtract[NetModel["ResNet-50 Trained on ImageNet Competition Data"], "Output"] 28 | 29 | 30 | (* ::Subchapter:: *) 31 | (*Pre-defined Structure*) 32 | 33 | 34 | getCN[i_] := ConvolutionLayer[ 35 | "Weights" -> params["arg:vgg3_conv" <> i <> "_weight"], 36 | "Biases" -> params["arg:vgg3_conv" <> i <> "_bias"], 37 | "PaddingSize" -> 1, "Stride" -> 1 38 | ]; 39 | getBN[i_] := BatchNormalizationLayer[ 40 | "Epsilon" -> 1*^-5, 41 | "Beta" -> params["arg:vgg3_batchnorm" <> i <> "_beta"], 42 | "Gamma" -> params["arg:vgg3_batchnorm" <> i <> "_gamma"], 43 | "MovingMean" -> params["aux:vgg3_batchnorm" <> i <> "_running_mean"], 44 | "MovingVariance" -> params["aux:vgg3_batchnorm" <> i <> "_running_var"] 45 | ] 46 | getFC[i_, n_] := LinearLayer[n, 47 | "Weights" -> params["arg:vgg3_dense" <> i <> "_weight"], 48 | "Biases" -> params["arg:vgg3_dense" <> i <> "_bias"] 49 | ]; 50 | getBlock[i_, j_] := NetChain@Flatten@Table[{getCN[ToString[n]], getBN[ToString[n]], Ramp}, {n, i, j}]; 51 | getBlock2[i_, j_] := NetChain@{getFC[ToString@i, j], Ramp, DropoutLayer[0.5]}; 52 | pool = PoolingLayer[{2, 2}, "Stride" -> 2]; 53 | 54 | 55 | (* ::Subchapter:: *) 56 | (*Main*) 57 | 58 | 59 | mainNet = NetChain[{ 60 | getBlock[0, 1], pool, 61 | getBlock[2, 3], pool, 62 | getBlock[4, 7], pool, 63 | getBlock[8, 11], pool, 64 | getBlock[12, 15], pool, 65 | getBlock2[0, 4096], 66 | getBlock2[1, 4096], 67 | getFC["2", 1000] 68 | }, 69 | "Input" -> encoder, "Output" -> decoder 70 | ] 71 | 72 | 73 | (* ::Subchapter:: *) 74 | (*Export Model*) 75 | 76 | 77 | Export["imagenet_vgg19_bn.WXF", mainNet] 78 | -------------------------------------------------------------------------------- /ImageEnhancement/SuperResolution/SRGAN trained on VOC/SRGAN2x trained on VOC.m: -------------------------------------------------------------------------------- 1 | (* ::Package:: *) 2 | 3 | SetDirectory@NotebookDirectory[]; 4 | Needs["MXNetLink`"] 5 | Needs["NeuralNetworks`"] 6 | DateString[] 7 | 8 | 9 | (* ::Subitem:: *) 10 | (*Sat 27 Oct 2018 15:08:10*) 11 | 12 | 13 | (* ::Subchapter:: *) 14 | (*Import Weights*) 15 | 16 | 17 | params = NDArrayImport["SRGAN_2x-0000.params"]; 18 | 19 | 20 | (* ::Subchapter:: *) 21 | (*Encoder & Decoder*) 22 | 23 | 24 | encoder = NetEncoder[{"Image", {640, 360}}] 25 | decoder = NetDecoder["Image"] 26 | 27 | 28 | (* ::Subchapter::Closed:: *) 29 | (*Pre-defined Structure*) 30 | 31 | 32 | getCN[i_, s_, p_] := ConvolutionLayer[ 33 | "Weights" -> params["arg:learned_" <> ToString[i]], 34 | "Biases" -> params["arg:learned_" <> ToString[i + 1]], 35 | "PaddingSize" -> p, "Stride" -> s 36 | ] 37 | getBN[i_] := BatchNormalizationLayer[ 38 | "Epsilon" -> 1*^-5, 39 | "Gamma" -> params["arg:learned_" <> ToString[i]], 40 | "Beta" -> params["arg:learned_" <> ToString[i + 1]], 41 | "MovingMean" -> params["aux:learned_" <> ToString[i + 2]], 42 | "MovingVariance" -> params["aux:learned_" <> ToString[i + 3]] 43 | ] 44 | 45 | (*Notice that there is no single prelu in WLNet*) 46 | getPrelu[i_] := ParametricRampLayer[ 47 | "Slope" -> Flatten@ConstantArray[Normal@params["arg:learned_" <> ToString[i]], 64] 48 | ] 49 | 50 | getBlock[i_] := NetGraph[{ 51 | getCN[i, 1, 1], 52 | getBN[i + 2], 53 | getPrelu[i + 6], 54 | getCN[i + 7, 1, 1], 55 | getBN[i + 9], 56 | ThreadingLayer[Plus] 57 | }, { 58 | NetPort["Input"] -> 1 -> 2 -> 3 -> 4 -> 5, 59 | {NetPort["Input"], 5} -> 6 60 | }]; 61 | 62 | 63 | $head = NetChain@{ 64 | getCN[0, 1, 4], 65 | getPrelu[2] 66 | }; 67 | $body = NetGraph[{ 68 | NetChain@Table[getBlock[i], {i, 3, 55, 13} ], 69 | getCN[68, 1, 1], 70 | getPrelu[70], 71 | ThreadingLayer[Plus] 72 | }, { 73 | NetPort["Input"] -> 1 -> 2 -> 3 , 74 | {NetPort["Input"], 3} -> 4 75 | }]; 76 | $tail = NetChain@{ 77 | getCN[71, 1, 1], 78 | PixelShuffleLayer[2], 79 | getPrelu[73], 80 | getCN[74, 1, 4], 81 | (*LogisticSigmoid[2x]==(Tanh[x]+1)/2*) 82 | ElementwiseLayer[(Tanh[#] + 1) / 2&] 83 | }; 84 | 85 | 86 | (* ::Subchapter:: *) 87 | (*Main*) 88 | 89 | 90 | mainNet = NetChain[{$head, $body, $tail}] // NetFlatten; 91 | mainNet = NetReplacePart[mainNet, {"Input" -> encoder, "Output" -> decoder}] 92 | 93 | 94 | (* ::Subchapter:: *) 95 | (*Export Model*) 96 | 97 | 98 | Export["SRGAN2x trained on VOC.WXF", mainNet] 99 | -------------------------------------------------------------------------------- /ImageEnhancement/SuperResolution/SRGAN trained on VOC/SRGAN4x trained on VOC.m: -------------------------------------------------------------------------------- 1 | (* ::Package:: *) 2 | 3 | SetDirectory@NotebookDirectory[]; 4 | Needs["MXNetLink`"] 5 | Needs["NeuralNetworks`"] 6 | DateString[] 7 | 8 | 9 | (* ::Subitem:: *) 10 | (*Sat 27 Oct 2018 18:17:37*) 11 | 12 | 13 | (* ::Subchapter:: *) 14 | (*Import Weights*) 15 | 16 | 17 | params = NDArrayImport["SRGAN_4x-0000.params"]; 18 | 19 | 20 | (* ::Subchapter:: *) 21 | (*Encoder & Decoder*) 22 | 23 | 24 | encoder = NetEncoder[{"Image", {640, 360}}] 25 | decoder = NetDecoder["Image"] 26 | 27 | 28 | (* ::Subchapter::Closed:: *) 29 | (*Pre-defined Structure*) 30 | 31 | 32 | getCN[i_, s_, p_] := ConvolutionLayer[ 33 | "Weights" -> params["arg:learned_" <> ToString[i]], 34 | "Biases" -> params["arg:learned_" <> ToString[i + 1]], 35 | "PaddingSize" -> p, "Stride" -> s 36 | ] 37 | getBN[i_] := BatchNormalizationLayer[ 38 | "Epsilon" -> 1*^-5, 39 | "Gamma" -> params["arg:learned_" <> ToString[i]], 40 | "Beta" -> params["arg:learned_" <> ToString[i + 1]], 41 | "MovingMean" -> params["aux:learned_" <> ToString[i + 2]], 42 | "MovingVariance" -> params["aux:learned_" <> ToString[i + 3]] 43 | ] 44 | 45 | (*Notice that there is no single prelu in WLNet*) 46 | getPrelu[i_] := ParametricRampLayer[ 47 | "Slope" -> Flatten@ConstantArray[Normal@params["arg:learned_" <> ToString[i]], 64] 48 | ] 49 | 50 | getBlock[i_] := NetGraph[{ 51 | getCN[i, 1, 1], 52 | getBN[i + 2], 53 | getPrelu[i + 6], 54 | getCN[i + 7, 1, 1], 55 | getBN[i + 9], 56 | ThreadingLayer[Plus] 57 | }, { 58 | NetPort["Input"] -> 1 -> 2 -> 3 -> 4 -> 5, 59 | {NetPort["Input"], 5} -> 6 60 | }]; 61 | 62 | 63 | $head = NetChain@{ 64 | getCN[0, 1, 4], 65 | getPrelu[2] 66 | }; 67 | $body = NetGraph[{ 68 | NetChain@Table[getBlock[i], {i, 3, 55, 13} ], 69 | getCN[68, 1, 1], 70 | getPrelu[70], 71 | ThreadingLayer[Plus] 72 | }, { 73 | NetPort["Input"] -> 1 -> 2 -> 3 , 74 | {NetPort["Input"], 3} -> 4 75 | }]; 76 | $tail = NetChain@{ 77 | getCN[71, 1, 1], 78 | PixelShuffleLayer[2], 79 | getPrelu[73], 80 | getCN[74, 1, 1], 81 | PixelShuffleLayer[2], 82 | getPrelu[76], 83 | getCN[77, 1, 4], 84 | (*LogisticSigmoid[2x]==(Tanh[x]+1)/2*) 85 | ElementwiseLayer[(Tanh[#] + 1) / 2&] 86 | }; 87 | 88 | 89 | (* ::Subchapter:: *) 90 | (*Main*) 91 | 92 | 93 | mainNet = NetChain[{$head, $body, $tail}] // NetFlatten; 94 | mainNet = NetReplacePart[mainNet, {"Input" -> encoder, "Output" -> decoder}] 95 | 96 | 97 | (* ::Subchapter:: *) 98 | (*Export Model*) 99 | 100 | 101 | Export["SRGAN4x trained on VOC.WXF", mainNet] 102 | -------------------------------------------------------------------------------- /ImageEnhancement/SuperResolution/SRGAN trained on VOC/SRGAN8x trained on VOC.m: -------------------------------------------------------------------------------- 1 | (* ::Package:: *) 2 | 3 | SetDirectory@NotebookDirectory[]; 4 | Needs["MXNetLink`"] 5 | Needs["NeuralNetworks`"] 6 | DateString[] 7 | 8 | 9 | (* ::Subitem:: *) 10 | (*Sat 27 Oct 2018 18:15:08*) 11 | 12 | 13 | (* ::Subchapter:: *) 14 | (*Import Weights*) 15 | 16 | 17 | params = NDArrayImport["SRGAN_8x-0000.params"]; 18 | 19 | 20 | (* ::Subchapter:: *) 21 | (*Encoder & Decoder*) 22 | 23 | 24 | encoder = NetEncoder[{"Image", {640, 360}}] 25 | decoder = NetDecoder["Image"] 26 | 27 | 28 | (* ::Subchapter:: *) 29 | (*Pre-defined Structure*) 30 | 31 | 32 | getCN[i_, s_, p_] := ConvolutionLayer[ 33 | "Weights" -> params["arg:learned_" <> ToString[i]], 34 | "Biases" -> params["arg:learned_" <> ToString[i + 1]], 35 | "PaddingSize" -> p, "Stride" -> s 36 | ] 37 | getBN[i_] := BatchNormalizationLayer[ 38 | "Epsilon" -> 1*^-5, 39 | "Gamma" -> params["arg:learned_" <> ToString[i]], 40 | "Beta" -> params["arg:learned_" <> ToString[i + 1]], 41 | "MovingMean" -> params["aux:learned_" <> ToString[i + 2]], 42 | "MovingVariance" -> params["aux:learned_" <> ToString[i + 3]] 43 | ] 44 | 45 | (*Notice that there is no single prelu in WLNet*) 46 | getPrelu[i_] := ParametricRampLayer[ 47 | "Slope" -> Flatten@ConstantArray[Normal@params["arg:learned_" <> ToString[i]], 64] 48 | ] 49 | 50 | getBlock[i_] := NetGraph[{ 51 | getCN[i, 1, 1], 52 | getBN[i + 2], 53 | getPrelu[i + 6], 54 | getCN[i + 7, 1, 1], 55 | getBN[i + 9], 56 | ThreadingLayer[Plus] 57 | }, { 58 | NetPort["Input"] -> 1 -> 2 -> 3 -> 4 -> 5, 59 | {NetPort["Input"], 5} -> 6 60 | }]; 61 | 62 | 63 | $head = NetChain@{ 64 | getCN[0, 1, 4], 65 | getPrelu[2] 66 | }; 67 | $body = NetGraph[{ 68 | NetChain@Table[getBlock[i], {i, 3, 55, 13} ], 69 | getCN[68, 1, 1], 70 | getPrelu[70], 71 | ThreadingLayer[Plus] 72 | }, { 73 | NetPort["Input"] -> 1 -> 2 -> 3 , 74 | {NetPort["Input"], 3} -> 4 75 | }]; 76 | $tail = NetChain@{ 77 | getCN[71, 1, 1], 78 | PixelShuffleLayer[2], 79 | getPrelu[73], 80 | getCN[74, 1, 1], 81 | PixelShuffleLayer[2], 82 | getPrelu[76], 83 | getCN[77, 1, 1], 84 | PixelShuffleLayer[2], 85 | getPrelu[79], 86 | getCN[80, 1, 4], 87 | (*LogisticSigmoid[2x]==(Tanh[x]+1)/2*) 88 | ElementwiseLayer[(Tanh[#] + 1) / 2&] 89 | }; 90 | 91 | 92 | (* ::Subchapter:: *) 93 | (*Main*) 94 | 95 | 96 | mainNet = NetChain[{$head, $body, $tail}] // NetFlatten; 97 | mainNet = NetReplacePart[mainNet, {"Input" -> encoder, "Output" -> decoder}] 98 | 99 | 100 | (* ::Subchapter:: *) 101 | (*Export Model*) 102 | 103 | 104 | Export["SRGAN8x trained on VOC.WXF", mainNet] 105 | -------------------------------------------------------------------------------- /ImageEnhancement/Debluring/SHC Text Deblur on Custom/SHC Text Deblur trained on Custom.m: -------------------------------------------------------------------------------- 1 | (* ::Package:: *) 2 | 3 | SetDirectory@NotebookDirectory[]; 4 | Needs["MXNetLink`"] 5 | Needs["NeuralNetworks`"] 6 | DateString[] 7 | 8 | 9 | (* ::Subitem:: *) 10 | (*Sat 3 Nov 2018 22:22:07*) 11 | 12 | 13 | (* ::Subchapter:: *) 14 | (*Import Weights*) 15 | 16 | 17 | raw = Import["DeblurSHC19ConvLayers.hdf5", "Data"]; 18 | params[name_String] := Block[ 19 | {prefix, input}, 20 | prefix = "/model_weights/model_1/"; 21 | input = raw[prefix <> name]; 22 | Switch[ 23 | Length@Dimensions@input, 24 | 1, RawArray["Real32", input], 25 | 4, RawArray["Real32", TransposeLayer[{1<->4, 2<->3, 3<->4}][input]], 26 | _, RawArray["Real32", input] 27 | ] 28 | ] 29 | 30 | 31 | (* ::Subchapter:: *) 32 | (*Encoder & Decoder*) 33 | 34 | 35 | mShift = {0, 0, 0}; 36 | vShift = {1, 1, 1}^2; 37 | encoder = NetEncoder[{"Image", {640, 360}, ColorSpace -> "Grayscale"}] 38 | decoder = NetDecoder["Image"] 39 | 40 | 41 | (* ::Subchapter::Closed:: *) 42 | (*Pre-defined Structure*) 43 | 44 | 45 | getCN[i_Integer, p_Integer, s_Integer] := ConvolutionLayer[ 46 | "Weights" -> params["conv2d_" <> ToString[i] <> "/kernel:0"], 47 | "Biases" -> params["conv2d_" <> ToString[i] <> "/bias:0"], 48 | "PaddingSize" -> p, "Stride" -> s 49 | ]; 50 | getBN[i_Integer] := BatchNormalizationLayer[ 51 | "Momentum" -> 0.99, 52 | "Beta" -> params["batch_normalization_" <> ToString[i] <> "/beta:0"], 53 | "Gamma" -> params["batch_normalization_" <> ToString[i] <> "/gamma:0"], 54 | "MovingMean" -> params["batch_normalization_" <> ToString[i] <> "/moving_mean:0"], 55 | "MovingVariance" -> params["batch_normalization_" <> ToString[i] <> "/moving_variance:0"] 56 | ]; 57 | getBlock[i_] := NetChain[{ 58 | getBN[i], 59 | ElementwiseLayer["ReLU"], 60 | getCN[i + 1, 1, 1] 61 | }]; 62 | 63 | 64 | $body = NetGraph[ 65 | Flatten@{ 66 | getCN[3, 1, 1], 67 | Table[{getBlock[j], ThreadingLayer[Plus]}, {j, 3, 17}] 68 | }, 69 | Join[ 70 | {NetPort["Input"] -> 1 -> 2}, 71 | Table[{NetPort["Input"], j} -> j + 1 -> j + 2, {j, 2, 28, 2}], 72 | {{NetPort["Input"], 30} -> 31} 73 | ] 74 | ]; 75 | 76 | 77 | (* ::Subchapter:: *) 78 | (*Main*) 79 | 80 | 81 | mainNet = NetChain[{ 82 | PaddingLayer[{{0, 0}, {12, 12}, {12, 12}}, "Padding" -> 1], 83 | getCN[1, 0, 1], 84 | getBN[1], 85 | ElementwiseLayer["ReLU"], 86 | getCN[2, 1, 1], 87 | getBN[2], 88 | ElementwiseLayer["ReLU"], 89 | $body, 90 | getBN[18], 91 | ElementwiseLayer["ReLU"], 92 | getCN[19, 1, 1] 93 | }, 94 | "Input" -> encoder, 95 | "Output" -> decoder 96 | ] 97 | 98 | 99 | (* ::Subchapter:: *) 100 | (*Export Model*) 101 | 102 | 103 | Export["SHC Text Deblur trained on Custom.WXF", mainNet] 104 | -------------------------------------------------------------------------------- /ImageRecognition/Anime/VGGs trained on Danbooru2017/Illustration2Vec trained on Danbooru.m: -------------------------------------------------------------------------------- 1 | (* ::Package:: *) 2 | 3 | SetDirectory@NotebookDirectory[]; 4 | << NeuralNetworks` 5 | << MXNetLink` 6 | << DeepMath` 7 | DateString[] 8 | 9 | 10 | (* ::Subitem:: *) 11 | (*Sat 22 Dec 2018 20:28:50*) 12 | 13 | 14 | (* ::Subchapter:: *) 15 | (*Import Weights*) 16 | 17 | 18 | params = Import["illust2vec_tag_ver200.caffemodel.wxf"]; 19 | 20 | 21 | (* ::Subchapter:: *) 22 | (*Encoder & Decoder*) 23 | 24 | 25 | meanImage = Image[Normal[Import@"image_mean.npy.wxf"] / 255, Interleaving -> False]; 26 | meanChannel = { 27 | 0.6461231078823529`, 28 | 0.6567790045882352`, 29 | 0.7103466105490196` 30 | }; 31 | tags = Import["tag_list.json"]; 32 | 33 | 34 | (* ::Subchapter::Closed:: *) 35 | (*Pre-defined Structure*) 36 | 37 | 38 | ReLU = ElementwiseLayer["ReLU"]; 39 | Pooling = PoolingLayer[{2, 2}, "Stride" -> 2, "Function" -> Max]; 40 | getCN[name_String, p_ : 1, s_ : 1] := ConvolutionLayer[ 41 | "Weights" -> params[name <> "_1"], 42 | "Biases" -> params[name <> "_2"], 43 | "PaddingSize" -> p, "Stride" -> s 44 | ]; 45 | getBlock[i_, j_] := NetChain@{ 46 | getCN["conv" <> ToString[i] <> "_" <> ToString@j, 1, 1], 47 | ReLU 48 | }; 49 | getBlock2[a_, b_] := NetChain[ 50 | { PartLayer[a ;; b], ElementwiseLayer[Clip]}, 51 | "Input" -> 1539, 52 | "Output" -> NetDecoder[{"Class", Capitalize@tags[[a ;; b]]}] 53 | ]; 54 | 55 | 56 | (* ::Subchapter:: *) 57 | (*Main*) 58 | 59 | 60 | input = NetEncoder[{"Image", 224, "MeanImage" -> meanImage, "VarianceImage" -> 1 / 255}]; 61 | extractor = NetChain[{ 62 | Table[getBlock[1, j], {j, 1}], 63 | Pooling, 64 | Table[getBlock[2, j], {j, 1}], 65 | Pooling, 66 | Table[getBlock[3, j], {j, 2}], 67 | Pooling, 68 | Table[getBlock[4, j], {j, 2}], 69 | Pooling, 70 | Table[getBlock[5, j], {j, 2}], 71 | Pooling, 72 | Table[getBlock[6, j], {j, 3}] 73 | }, 74 | "Input" -> input 75 | ]; 76 | classifier = { 77 | DropoutLayer[0.5], 78 | getBlock[6, 4], 79 | AggregationLayer@Mean 80 | }; 81 | general = getBlock2[1, 512]; 82 | character = getBlock2[513, 1024]; 83 | copyright = getBlock2[1025, 1536]; 84 | rating = getBlock2[1537, 1539]; 85 | 86 | mainNet = NetGraph[{ 87 | "Extractor" -> extractor, 88 | "Classifier" -> classifier, 89 | "General" -> general, 90 | "Character" -> character, 91 | "Copyright" -> copyright, 92 | "Rating" -> rating 93 | }, 94 | { 95 | "Extractor" -> "Classifier" -> { 96 | "General" -> NetPort["General"], 97 | "Character" -> NetPort["Character"], 98 | "Copyright" -> NetPort["Copyright"], 99 | "Rating" -> NetPort["Rating"] 100 | } 101 | } 102 | ] 103 | 104 | 105 | (* ::Subchapter:: *) 106 | (*Export Model*) 107 | 108 | 109 | Export["Illustration2Vec trained on Danbooru.WLNet", mainNet] 110 | -------------------------------------------------------------------------------- /ImageEnhancement/Mixed/CBDNet on CommonNoise/CBDNet trained on CommonNoise.m: -------------------------------------------------------------------------------- 1 | (* ::Package:: *) 2 | 3 | SetDirectory@NotebookDirectory[]; 4 | << NeuralNetworks` 5 | << MXNetLink` 6 | << DeepMath` 7 | DateString[] 8 | 9 | 10 | (* ::Subitem:: *) 11 | (*Tue 25 Nov 2018 21:16:33*) 12 | 13 | 14 | (* ::Subchapter:: *) 15 | (*Import Weights*) 16 | 17 | 18 | params = NDArrayImport["CBDNet-0000.params"]; 19 | 20 | 21 | (* ::Subchapter:: *) 22 | (*Encoder & Decoder*) 23 | 24 | 25 | encoder = NetEncoder[{"Image", {640, 360}}] 26 | decoder = NetDecoder["Image"] 27 | 28 | 29 | (* ::Subchapter:: *) 30 | (*Pre-defined Structure*) 31 | 32 | 33 | getCN[i_, p_, s_] := ConvolutionLayer[ 34 | "Weights" -> params["arg:convolution" <> ToString@i <> "_weight"], 35 | "Biases" -> params["arg:convolution" <> ToString@i <> "_bias"], 36 | "PaddingSize" -> p, "Stride" -> s 37 | ]; 38 | getDN[i_, p_, s_] := DeconvolutionLayer[ 39 | "Weights" -> params["arg:deconvolution" <> ToString@i <> "_weight"], 40 | "Biases" -> None, "PaddingSize" -> p, "Stride" -> s 41 | ]; 42 | 43 | 44 | sub = Flatten@Table[{getCN[i, 1, 1], ElementwiseLayer["ReLU"]}, {i, 0, 4}]; 45 | estimate = NetMerge[NetChain@sub, Join, Expand -> All]; 46 | line1 = NetFlatten@NetChain@Table[{getCN[i, 1, 1], ElementwiseLayer["ReLU"]}, {i, 5, 8}]; 47 | line2 = NetChain@Flatten@{ 48 | {getCN[9, 0, 2], ElementwiseLayer["ReLU"]}, 49 | {getCN[10, 0, 1], ElementwiseLayer["ReLU"]}, 50 | Table[{getCN[i, 1, 1], ElementwiseLayer["ReLU"]}, {i, 11, 13}] 51 | }; 52 | line3 = NetChain@Flatten@{ 53 | {getCN[14, 0, 2], ElementwiseLayer["ReLU"]}, 54 | {getCN[15, 0, 1], ElementwiseLayer["ReLU"]}, 55 | Table[{getCN[i, 1, 1], ElementwiseLayer["ReLU"]}, {i, 16, 22}], 56 | getDN[0, 0, 2] 57 | }; 58 | line4 = NetChain@Flatten@{ 59 | Table[{getCN[i, 1, 1], ElementwiseLayer["ReLU"]}, {i, 23, 25}], 60 | getDN[1, 0, 2] 61 | }; 62 | line5 = NetChain@Flatten@Table[{getCN[i, 1, 1], ElementwiseLayer["ReLU"]}, {i, 26, 28}]; 63 | 64 | 65 | (* ::Subchapter:: *) 66 | (*Main*) 67 | 68 | 69 | mainNet = NetGraph[{ 70 | "Estimate" -> sub, 71 | "Join" -> CatenateLayer[], 72 | "EncoderA" -> line1, 73 | "EncoderB" -> line2, 74 | "Main" -> line3, 75 | "DecoderB" -> line4, 76 | "DecoderA" -> line5, 77 | "Add_1" -> ThreadingLayer[Plus], 78 | "Add_2" -> ThreadingLayer[Plus], 79 | "Add_3" -> ThreadingLayer[Plus] 80 | }, { 81 | NetPort["Input"] -> "Estimate", 82 | {NetPort["Input"], "Estimate"} -> "Join" -> "EncoderA" -> "EncoderB" -> "Main", 83 | {"EncoderB", "Main"} -> "Add_1" -> "DecoderB", 84 | {"EncoderA", "DecoderB"} -> "Add_2" -> "DecoderA", 85 | {NetPort["Input"], "DecoderA"} -> "Add_3" -> NetPort["Output"] 86 | }] 87 | 88 | 89 | testNet = NetReplacePart[ 90 | mainNet, { 91 | "Input" -> encoder, 92 | "Output" -> decoder 93 | } 94 | ] 95 | 96 | 97 | (* ::Subchapter:: *) 98 | (*Export Model*) 99 | 100 | 101 | Export["CBDNet trained on CommonNoise.WXF", mainNet] 102 | -------------------------------------------------------------------------------- /ImageEnhancement/Mixed/CBDNet on CommonNoise/CBDNet-JPEG trained on CommonNoise.m: -------------------------------------------------------------------------------- 1 | (* ::Package:: *) 2 | 3 | SetDirectory@NotebookDirectory[]; 4 | << NeuralNetworks` 5 | << MXNetLink` 6 | << DeepMath` 7 | DateString[] 8 | 9 | 10 | (* ::Subitem:: *) 11 | (*Tue 25 Nov 2018 21:16:33*) 12 | 13 | 14 | (* ::Subchapter:: *) 15 | (*Import Weights*) 16 | 17 | 18 | params = NDArrayImport["CBDNet_JPEG-0000.params"]; 19 | 20 | 21 | (* ::Subchapter:: *) 22 | (*Encoder & Decoder*) 23 | 24 | 25 | encoder = NetEncoder[{"Image", {640, 360}}] 26 | decoder = NetDecoder["Image"] 27 | 28 | 29 | (* ::Subchapter:: *) 30 | (*Pre-defined Structure*) 31 | 32 | 33 | getCN[i_, p_, s_] := ConvolutionLayer[ 34 | "Weights" -> params["arg:convolution" <> ToString@i <> "_weight"], 35 | "Biases" -> params["arg:convolution" <> ToString@i <> "_bias"], 36 | "PaddingSize" -> p, "Stride" -> s 37 | ]; 38 | getDN[i_, p_, s_] := DeconvolutionLayer[ 39 | "Weights" -> params["arg:deconvolution" <> ToString@i <> "_weight"], 40 | "Biases" -> None, "PaddingSize" -> p, "Stride" -> s 41 | ]; 42 | 43 | 44 | sub = Flatten@Table[{getCN[i, 1, 1], ElementwiseLayer["ReLU"]}, {i, 0, 4}]; 45 | estimate = NetMerge[NetChain@sub, Join, Expand -> All]; 46 | line1 = NetFlatten@NetChain@Table[{getCN[i, 1, 1], ElementwiseLayer["ReLU"]}, {i, 5, 8}]; 47 | line2 = NetChain@Flatten@{ 48 | {getCN[9, 0, 2], ElementwiseLayer["ReLU"]}, 49 | {getCN[10, 0, 1], ElementwiseLayer["ReLU"]}, 50 | Table[{getCN[i, 1, 1], ElementwiseLayer["ReLU"]}, {i, 11, 13}] 51 | }; 52 | line3 = NetChain@Flatten@{ 53 | {getCN[14, 0, 2], ElementwiseLayer["ReLU"]}, 54 | {getCN[15, 0, 1], ElementwiseLayer["ReLU"]}, 55 | Table[{getCN[i, 1, 1], ElementwiseLayer["ReLU"]}, {i, 16, 22}], 56 | getDN[0, 0, 2] 57 | }; 58 | line4 = NetChain@Flatten@{ 59 | Table[{getCN[i, 1, 1], ElementwiseLayer["ReLU"]}, {i, 23, 25}], 60 | getDN[1, 0, 2] 61 | }; 62 | line5 = NetChain@Flatten@Table[{getCN[i, 1, 1], ElementwiseLayer["ReLU"]}, {i, 26, 28}]; 63 | 64 | 65 | (* ::Subchapter:: *) 66 | (*Main*) 67 | 68 | 69 | mainNet = NetGraph[{ 70 | "Estimate" -> sub, 71 | "Join" -> CatenateLayer[], 72 | "EncoderA" -> line1, 73 | "EncoderB" -> line2, 74 | "Main" -> line3, 75 | "DecoderB" -> line4, 76 | "DecoderA" -> line5, 77 | "Add_1" -> ThreadingLayer[Plus], 78 | "Add_2" -> ThreadingLayer[Plus], 79 | "Add_3" -> ThreadingLayer[Plus] 80 | }, { 81 | NetPort["Input"] -> "Estimate", 82 | {NetPort["Input"], "Estimate"} -> "Join" -> "EncoderA" -> "EncoderB" -> "Main", 83 | {"EncoderB", "Main"} -> "Add_1" -> "DecoderB", 84 | {"EncoderA", "DecoderB"} -> "Add_2" -> "DecoderA", 85 | {NetPort["Input"], "DecoderA"} -> "Add_3" -> NetPort["Output"] 86 | }] 87 | 88 | 89 | testNet = NetReplacePart[ 90 | mainNet, { 91 | "Input" -> encoder, 92 | "Output" -> decoder 93 | } 94 | ] 95 | 96 | 97 | (* ::Subchapter:: *) 98 | (*Export Model*) 99 | 100 | 101 | Export["CBDNet-JPEG trained on CommonNoise.WXF", mainNet] 102 | -------------------------------------------------------------------------------- /ImageRecognition/Anime/VGGs trained on Danbooru2017/Illustration2Vec Mega trained on Danbooru.m: -------------------------------------------------------------------------------- 1 | (* ::Package:: *) 2 | 3 | SetDirectory@NotebookDirectory[]; 4 | << NeuralNetworks` 5 | << MXNetLink` 6 | << DeepMath` 7 | DateString[] 8 | 9 | 10 | (* ::Subitem:: *) 11 | (*Sat 22 Dec 2018 21:22:36*) 12 | 13 | 14 | (* ::Subchapter:: *) 15 | (*Import Weights*) 16 | 17 | 18 | params = Import["illust2vec_ver200.caffemodel.wxf"]; 19 | 20 | 21 | (* ::Subchapter:: *) 22 | (*Encoder & Decoder*) 23 | 24 | 25 | meanImage = Image[Normal[Import@"image_mean.npy.wxf"] / 255, Interleaving -> False]; 26 | meanChannel = { 27 | 0.6461231078823529`, 28 | 0.6567790045882352`, 29 | 0.7103466105490196` 30 | }; 31 | tags = Import["tag_list.json"]; 32 | 33 | 34 | (* ::Subchapter::Closed:: *) 35 | (*Pre-defined Structure*) 36 | 37 | 38 | ReLU = ElementwiseLayer["ReLU"]; 39 | Pooling = PoolingLayer[{2, 2}, "Stride" -> 2, "Function" -> Max]; 40 | getCN[name_String, p_ : 1, s_ : 1] := ConvolutionLayer[ 41 | "Weights" -> params[name <> "_1"], 42 | "Biases" -> params[name <> "_2"], 43 | "PaddingSize" -> p, "Stride" -> s 44 | ]; 45 | getDN[name_String, c_] := LinearLayer[c, 46 | "Weights" -> params[name <> "_1"], 47 | "Biases" -> params[name <> "_2"] 48 | ]; 49 | getBlock[i_, j_] := NetChain@{ 50 | getCN["conv" <> ToString[i] <> "_" <> ToString@j, 1, 1], 51 | ReLU 52 | }; 53 | getBlock2[a_, b_] := NetChain[ 54 | { PartLayer[a ;; b], ElementwiseLayer[Clip]}, 55 | "Input" -> 1539, 56 | "Output" -> NetDecoder[{"Class", Capitalize@tags[[a ;; b]]}] 57 | ]; 58 | 59 | 60 | (* ::Subchapter:: *) 61 | (*Main*) 62 | 63 | 64 | input = NetEncoder[{"Image", 224, "MeanImage" -> meanImage, "VarianceImage" -> 1 / 255}]; 65 | extractor = NetChain[{ 66 | Table[getBlock[1, j], {j, 1}], 67 | Pooling, 68 | Table[getBlock[2, j], {j, 1}], 69 | Pooling, 70 | Table[getBlock[3, j], {j, 2}], 71 | Pooling, 72 | Table[getBlock[4, j], {j, 2}], 73 | Pooling, 74 | Table[getBlock[5, j], {j, 2}], 75 | Pooling, 76 | Table[getBlock[6, j], {j, 3}] 77 | }, 78 | "Input" -> input 79 | ]; 80 | classifier = { 81 | DropoutLayer[0.5], 82 | getDN["encode1", 4096], 83 | LogisticSigmoid, 84 | getDN["encode2", 1539] 85 | }; 86 | general = getBlock2[1, 512]; 87 | character = getBlock2[513, 1024]; 88 | copyright = getBlock2[1025, 1536]; 89 | rating = getBlock2[1537, 1539]; 90 | 91 | mainNet = NetGraph[{ 92 | "Extractor" -> extractor, 93 | "Classifier" -> classifier, 94 | "General" -> general, 95 | "Character" -> character, 96 | "Copyright" -> copyright, 97 | "Rating" -> rating 98 | }, 99 | { 100 | "Extractor" -> "Classifier" -> { 101 | "General" -> NetPort["General"], 102 | "Character" -> NetPort["Character"], 103 | "Copyright" -> NetPort["Copyright"], 104 | "Rating" -> NetPort["Rating"] 105 | } 106 | } 107 | ] 108 | 109 | 110 | (* ::Subchapter:: *) 111 | (*Export Model*) 112 | 113 | 114 | Export["Illustration2Vec Mega trained on Danbooru.WLNet", mainNet] 115 | -------------------------------------------------------------------------------- /ImageEnhancement/SuperResolution/EDSR on DIV2K/EDSR2x trained on DIV2K.m: -------------------------------------------------------------------------------- 1 | (* ::Package:: *) 2 | 3 | SetDirectory@NotebookDirectory[]; 4 | Needs["MXNetLink`"] 5 | Needs["NeuralNetworks`"] 6 | DateString[] 7 | 8 | 9 | (* ::Subitem:: *) 10 | (*Thu 25 Oct 2018 13:11:36*) 11 | 12 | 13 | (* ::Subchapter:: *) 14 | (*Import Weights*) 15 | 16 | 17 | params = NDArrayImport["EDSR_x2-0000.params"]; 18 | 19 | 20 | (* ::Subchapter:: *) 21 | (*Encoder & Decoder*) 22 | 23 | 24 | encoder = NetEncoder[{"Image", {640, 360}}] 25 | decoder = NetDecoder["Image"] 26 | 27 | "Use this encoder if not use shift convolution"; 28 | NetEncoder[{ 29 | "Image", {640, 360}, 30 | "MeanImage" -> {0.4488, 0.4371, 0.4040}, 31 | "VarianceImage" -> 1 / 255^2 32 | }]; 33 | 34 | 35 | (* ::Subchapter::Closed:: *) 36 | (*Pre-defined Structure*) 37 | 38 | 39 | getBlock[i_] := NetGraph[{ 40 | ConvolutionLayer[ 41 | "Weights" -> params["arg:body." <> ToString[i] <> ".body.0.weight"], 42 | "Biases" -> params["arg:body." <> ToString[i] <> ".body.0.bias"], 43 | "PaddingSize" -> 1, "Stride" -> 1 44 | ], 45 | ElementwiseLayer["ReLU"], 46 | ConvolutionLayer[ 47 | "Weights" -> params["arg:body." <> ToString[i] <> ".body.2.weight"], 48 | "Biases" -> params["arg:body." <> ToString[i] <> ".body.2.bias"], 49 | "PaddingSize" -> 1, "Stride" -> 1 50 | ], 51 | ThreadingLayer[#1 + 0.1#2&] 52 | }, { 53 | NetPort["Input"] -> 1 -> 2 -> 3, 54 | {NetPort["Input"], 3} -> 4 55 | }]; 56 | $body = NetGraph[{ 57 | NetChain@Array[getBlock, 31], 58 | ConvolutionLayer[ 59 | "Weights" -> params["arg:body.32.weight"], 60 | "Biases" -> params["arg:body.32.bias"], 61 | "PaddingSize" -> 1, "Stride" -> 1 62 | ], 63 | ThreadingLayer[Plus] 64 | }, { 65 | NetPort["Input"] -> 1 -> 2, 66 | {NetPort["Input"], 2} -> 3 67 | }]; 68 | 69 | 70 | (* ::Subchapter:: *) 71 | (*Main*) 72 | 73 | 74 | mainNet = NetChain[{ 75 | ConvolutionLayer[ 76 | "Weights" -> params["arg:sub_mean.weight"], 77 | "Biases" -> params["arg:sub_mean.bias"], 78 | "PaddingSize" -> 0, "Stride" -> 1 79 | ], 80 | ConvolutionLayer[ 81 | "Weights" -> params["arg:head.0.weight"], 82 | "Biases" -> params["arg:head.0.bias"], 83 | "PaddingSize" -> 1, "Stride" -> 1 84 | ], 85 | $body, 86 | ConvolutionLayer[ 87 | "Weights" -> params["arg:tail.0.0.weight"], 88 | "Biases" -> params["arg:tail.0.0.bias"], 89 | "PaddingSize" -> 1, "Stride" -> 1 90 | ], 91 | PixelShuffleLayer[2], 92 | ConvolutionLayer[ 93 | "Weights" -> params["arg:tail.1.weight"], 94 | "Biases" -> params["arg:tail.1.bias"], 95 | "PaddingSize" -> 1, "Stride" -> 1 96 | ], 97 | ConvolutionLayer[ 98 | "Weights" -> params["arg:add_mean.weight"], 99 | "Biases" -> params["arg:add_mean.bias"], 100 | "PaddingSize" -> 0, "Stride" -> 1 101 | ] 102 | }, 103 | "Input" -> encoder, 104 | "Output" -> decoder 105 | ] 106 | 107 | 108 | (* ::Subchapter:: *) 109 | (*Export Model*) 110 | 111 | 112 | Export["EDSR2x trained on DIV2K.WXF", mainNet] 113 | -------------------------------------------------------------------------------- /ImageEnhancement/SuperResolution/EDSR on DIV2K/EDSR3x trained on DIV2K.m: -------------------------------------------------------------------------------- 1 | (* ::Package:: *) 2 | 3 | SetDirectory@NotebookDirectory[]; 4 | Needs["MXNetLink`"] 5 | Needs["NeuralNetworks`"] 6 | DateString[] 7 | 8 | 9 | (* ::Subitem:: *) 10 | (*Thu 25 Oct 2018 13:13:28*) 11 | 12 | 13 | (* ::Subchapter:: *) 14 | (*Import Weights*) 15 | 16 | 17 | params = NDArrayImport["EDSR_x3-0000.params"]; 18 | 19 | 20 | (* ::Subchapter:: *) 21 | (*Encoder & Decoder*) 22 | 23 | 24 | encoder = NetEncoder[{"Image", {640, 360}}] 25 | decoder = NetDecoder["Image"] 26 | 27 | "Use this encoder if not use shift convolution"; 28 | NetEncoder[{ 29 | "Image", {640, 360}, 30 | "MeanImage" -> {0.4488, 0.4371, 0.4040}, 31 | "VarianceImage" -> 1 / 255^2 32 | }]; 33 | 34 | 35 | (* ::Subchapter::Closed:: *) 36 | (*Pre-defined Structure*) 37 | 38 | 39 | getBlock[i_] := NetGraph[{ 40 | ConvolutionLayer[ 41 | "Weights" -> params["arg:body." <> ToString[i] <> ".body.0.weight"], 42 | "Biases" -> params["arg:body." <> ToString[i] <> ".body.0.bias"], 43 | "PaddingSize" -> 1, "Stride" -> 1 44 | ], 45 | ElementwiseLayer["ReLU"], 46 | ConvolutionLayer[ 47 | "Weights" -> params["arg:body." <> ToString[i] <> ".body.2.weight"], 48 | "Biases" -> params["arg:body." <> ToString[i] <> ".body.2.bias"], 49 | "PaddingSize" -> 1, "Stride" -> 1 50 | ], 51 | ThreadingLayer[#1 + 0.1#2&] 52 | }, { 53 | NetPort["Input"] -> 1 -> 2 -> 3, 54 | {NetPort["Input"], 3} -> 4 55 | }]; 56 | $body = NetGraph[{ 57 | NetChain@Array[getBlock, 31], 58 | ConvolutionLayer[ 59 | "Weights" -> params["arg:body.32.weight"], 60 | "Biases" -> params["arg:body.32.bias"], 61 | "PaddingSize" -> 1, "Stride" -> 1 62 | ], 63 | ThreadingLayer[Plus] 64 | }, { 65 | NetPort["Input"] -> 1 -> 2, 66 | {NetPort["Input"], 2} -> 3 67 | }]; 68 | 69 | 70 | (* ::Subchapter:: *) 71 | (*Main*) 72 | 73 | 74 | mainNet = NetChain[{ 75 | ConvolutionLayer[ 76 | "Weights" -> params["arg:sub_mean.weight"], 77 | "Biases" -> params["arg:sub_mean.bias"], 78 | "PaddingSize" -> 0, "Stride" -> 1 79 | ], 80 | ConvolutionLayer[ 81 | "Weights" -> params["arg:head.0.weight"], 82 | "Biases" -> params["arg:head.0.bias"], 83 | "PaddingSize" -> 1, "Stride" -> 1 84 | ], 85 | $body, 86 | ConvolutionLayer[ 87 | "Weights" -> params["arg:tail.0.0.weight"], 88 | "Biases" -> params["arg:tail.0.0.bias"], 89 | "PaddingSize" -> 1, "Stride" -> 1 90 | ], 91 | PixelShuffleLayer[3], 92 | ConvolutionLayer[ 93 | "Weights" -> params["arg:tail.1.weight"], 94 | "Biases" -> params["arg:tail.1.bias"], 95 | "PaddingSize" -> 1, "Stride" -> 1 96 | ], 97 | ConvolutionLayer[ 98 | "Weights" -> params["arg:add_mean.weight"], 99 | "Biases" -> params["arg:add_mean.bias"], 100 | "PaddingSize" -> 0, "Stride" -> 1 101 | ] 102 | }, 103 | "Input" -> encoder, 104 | "Output" -> decoder 105 | ] 106 | 107 | 108 | (* ::Subchapter:: *) 109 | (*Export Model*) 110 | 111 | 112 | Export["EDSR3x trained on DIV2K.WXF", mainNet] 113 | -------------------------------------------------------------------------------- /ImageEnhancement/Debluring/DeblurGAN on GOPRO/DeblurGAN trained on GOPRO.m: -------------------------------------------------------------------------------- 1 | (* ::Package:: *) 2 | 3 | SetDirectory@NotebookDirectory[]; 4 | << NeuralNetworks` 5 | << MXNetLink` 6 | << DeepMath` 7 | DateString[] 8 | 9 | 10 | (* ::Subitem:: *) 11 | (*Thu 8 Nov 2018 19:05:21*) 12 | 13 | 14 | (* ::Subchapter:: *) 15 | (*Import Weights*) 16 | 17 | 18 | raw = Import["generator.h5", "Data"]; 19 | params[name_String] := Block[ 20 | {prefix, input}, 21 | prefix = StringJoin["/", First@StringSplit[name, "/"], "/"]; 22 | input = raw[prefix <> name]; 23 | Switch[ 24 | Length@Dimensions@input, 25 | 1, RawArray["Real32", input], 26 | 4, RawArray["Real32", TransposeLayer[{1<->4, 2<->3, 3<->4}][input]], 27 | _, RawArray["Real32", input] 28 | ] 29 | ] 30 | 31 | 32 | (* ::Subchapter:: *) 33 | (*Encoder & Decoder*) 34 | 35 | 36 | mShift = {0, 0, 0}; 37 | vShift = {1, 1, 1}^2; 38 | encoder = NetEncoder[{"Image", {640, 360}}] 39 | decoder = NetDecoder["Image"] 40 | 41 | 42 | (* ::Subchapter:: *) 43 | (*Pre-defined Structure*) 44 | 45 | 46 | getCN[i_Integer, p_Integer, s_Integer] := ConvolutionLayer[ 47 | "Weights" -> params["conv2d_" <> ToString[i] <> "/kernel:0"], 48 | "Biases" -> params["conv2d_" <> ToString[i] <> "/bias:0"], 49 | "PaddingSize" -> p, "Stride" -> s 50 | ]; 51 | getBN[i_Integer] := BatchNormalizationLayer[ 52 | "Momentum" -> 0.99, 53 | "Beta" -> params["batch_normalization_" <> ToString[i] <> "/beta:0"], 54 | "Gamma" -> params["batch_normalization_" <> ToString[i] <> "/gamma:0"], 55 | "MovingMean" -> params["batch_normalization_" <> ToString[i] <> "/moving_mean:0"], 56 | "MovingVariance" -> params["batch_normalization_" <> ToString[i] <> "/moving_variance:0"] 57 | ]; 58 | getBlock[i_] := Block[ 59 | {path}, 60 | path = NetChain[{ 61 | PaddingLayer[Partition[{0, 0, 1, 1, 1, 1}, 2], "Padding" -> "Reflected"], 62 | getCN[i, 0, 1], 63 | getBN[i], 64 | ElementwiseLayer["ReLU"], 65 | DropoutLayer[0.5], 66 | PaddingLayer[Partition[{0, 0, 1, 1, 1, 1}, 2], "Padding" -> "Reflected"], 67 | getCN[i + 1, 0, 1], 68 | getBN[i + 1] 69 | }]; 70 | NetMerge[path, Expand -> All] 71 | ]; 72 | getBlock2[i_] := NetChain[{ 73 | ResizeLayer[Scaled /@ {2, 2}, Resampling -> "Nearest"], 74 | getCN[i, 1, 1], 75 | getBN[i], 76 | ElementwiseLayer["ReLU"] 77 | }]; 78 | 79 | 80 | (* ::Subchapter:: *) 81 | (*Main*) 82 | 83 | 84 | mainNet = NetChain[{ 85 | PaddingLayer[Partition[{0, 0, 3, 3, 3, 3}, 2], "Padding" -> "Reflected"], 86 | getCN[1, 0, 1], 87 | getBN[1], 88 | ElementwiseLayer["ReLU"], 89 | {getCN[2, 1, 2], getBN[2], ElementwiseLayer["ReLU"]}, 90 | {getCN[3, 1, 2], getBN[3], ElementwiseLayer["ReLU"]}, 91 | NetChain@Table[getBlock[i], {i, 4, 20, 2}], 92 | getBlock2[22], 93 | getBlock2[23], 94 | PaddingLayer[Partition[{0, 0, 3, 3, 3, 3}, 2], "Padding" -> "Reflected"], 95 | getCN[24, 0, 1], 96 | Tanh 97 | }, 98 | "Input" -> encoder, 99 | "Output" -> decoder 100 | ] 101 | 102 | 103 | (* ::Subchapter:: *) 104 | (*Export Model*) 105 | 106 | 107 | Export["DeblurGAN trained on GOPRO.WXF", mainNet] 108 | -------------------------------------------------------------------------------- /ImageGeneration/Anime/ComixGANs Photo2Comic Style Transfer/ComixGAN Comic Style Transfer Alpha.m: -------------------------------------------------------------------------------- 1 | (* ::Package:: *) 2 | 3 | (* ::Subchapter:: *) 4 | (*Import Weights*) 5 | 6 | 7 | SetDirectory@NotebookDirectory[]; 8 | Clear["Global`*"]; 9 | << DeepMath`; 10 | DeepMath`NetMerge; 11 | $name = "ComixGAN Comic Style Transfer Alpha"; 12 | params = Import[$name <> ".hdf5", "Data"]; 13 | 14 | 15 | (* ::Subchapter:: *) 16 | (*Pre-defined Structure*) 17 | 18 | 19 | $NCHW = TransposeLayer[{1<->4, 2<->3, 3<->4}]; 20 | getName[s_] := TemplateApply["/`1`/`1`", {s}]; 21 | getPad[n_] := PaddingLayer[{{0, 0}, {n, n}, {n, n}}, Padding -> "Reflected"]; 22 | getCW[name_, s_, p_, k_ : 1] := ConvolutionLayer[ 23 | "Weights" -> k * $NCHW@params[getName[name] <> "/kernel:0"], 24 | "Biases" -> None, "Stride" -> s, "PaddingSize" -> p 25 | ]; 26 | getCN[name_, s_, p_, k_ : 1] := ConvolutionLayer[ 27 | "Weights" -> k * $NCHW@params[getName[name] <> "/kernel:0"], 28 | "Biases" -> k * params[getName[name] <> "/bias:0"], 29 | "Stride" -> s, "PaddingSize" -> p 30 | ]; 31 | getDN[name_, s_, p_] := DeconvolutionLayer[ 32 | "Weights" -> $NCHW@params[getName[name] <> "/kernel:0"], 33 | "Biases" -> params[getName[name] <> "/bias:0"], 34 | "Stride" -> s, "PaddingSize" -> p 35 | ]; 36 | getIN[name_] := NormalizationLayer[ 37 | "Biases" -> params[getName[name] <> "/beta:0"], 38 | "Scaling" -> params[getName[name] <> "/gamma:0"], 39 | "Epsilon" -> 0.001 40 | ]; 41 | 42 | 43 | getBlock[i_] := GeneralUtilities`Scope[ 44 | path = NetChain@{ 45 | getCW["conv2d_" <> ToString[i], 1, 1], 46 | getIN["instance_normalization_" <> ToString[i - 6]], 47 | Ramp, 48 | getCW["conv2d_" <> ToString[i + 1], 1, 1], 49 | getIN["instance_normalization_" <> ToString[i - 5]] 50 | }; 51 | NetMerge[path, Plus, Expand -> True] 52 | ]; 53 | 54 | 55 | (* ::Subchapter:: *) 56 | (*Main*) 57 | 58 | 59 | mainNet = NetChain[{ 60 | { 61 | getCW["conv2d_8", 1, 3], 62 | getIN["instance_normalization_4"], 63 | Ramp 64 | }, 65 | { 66 | getCN["conv2d_9", 2, 1], 67 | getCW["conv2d_10", 1, 1], 68 | getIN["instance_normalization_5"], 69 | Ramp 70 | }, 71 | { 72 | getCN["conv2d_11", 2, 1], 73 | getCW["conv2d_12", 1, 1], 74 | getIN["instance_normalization_6"], 75 | Ramp 76 | }, 77 | NetChain@Table[getBlock[i], {i, 13, 27, 2}], 78 | { 79 | ResizeLayer[Scaled /@ {2, 2}], 80 | getCW["conv2d_29", 1, 1], 81 | getIN["instance_normalization_23"], 82 | Ramp 83 | }, 84 | { 85 | ResizeLayer[Scaled /@ {2, 2}], 86 | getCW["conv2d_30", 1, 1], 87 | getIN["instance_normalization_24"], 88 | Ramp 89 | }, 90 | getCN["conv2d_31", 1, 3], 91 | LogisticSigmoid 92 | }, 93 | "Output" -> "Image" 94 | ] 95 | 96 | 97 | (* ::Subchapter:: *) 98 | (*Testing*) 99 | 100 | 101 | img = ExampleData[{"TestImage", "House"}] 102 | newNet = NetReplacePart[mainNet, "Input" -> NetEncoder[{"Image", ImageDimensions@img}]]; 103 | newNet[img, TargetDevice -> "GPU"] 104 | 105 | 106 | (* ::Subchapter:: *) 107 | (*Export Model*) 108 | 109 | 110 | Export[$name <> ".MAT", mainNet, "WXF", PerformanceGoal -> "Speed"] 111 | -------------------------------------------------------------------------------- /ImageRecognition/Classifation/LeNet on MNIST/LeNet tested on MNIST TestSet.md: -------------------------------------------------------------------------------- 1 | # LeNet trained on MNIST 2 | ![Task](https://img.shields.io/badge/Task-Classifation-Orange.svg) 3 | ![Size](https://img.shields.io/badge/Size-1.7243%20MB-blue.svg) 4 | ![Accuracy](https://img.shields.io/badge/Accuracy-98.480%25-brightgreen.svg) 5 | ![Speed](https://img.shields.io/badge/Speed-0.355%20ms-ff69b4.svg) 6 | 7 | Automatically generated on Sun 18 Nov 2018 20:07:43 8 | 9 | ## Network structure: 10 | - Network Size: **1.72432 MB** 11 | - Parameters: **431 080** 12 | - Nodes Count: **11** 13 | - Speed: **0.355 ms/sample** 14 | - Layers: 15 | - ConvolutionLayer: **2** 16 | - ElementwiseLayer: **3** 17 | - FlattenLayer: **1** 18 | - LinearLayer: **2** 19 | - PoolingLayer: **2** 20 | - SoftmaxLayer: **1** 21 | 22 | 23 | ## Accuracy Curve 24 | ![Classification Curve.png](https://i.loli.net/2018/11/17/5bf004151b12a.png) 25 | 26 | ![High Precision Classification Curve.png](https://i.loli.net/2018/11/17/5bf0041535eab.png) 27 | 28 | ## Main Indicator 29 | - Top-1: **98.4800%** 30 | - Top-2: **99.7299%** 31 | - Top-3: **99.9100%** 32 | - Top-5: **99.9900%** 33 | - LogLikelihood: **-659.657** 34 | - CrossEntropyLoss: **0.0659657** 35 | - ProbabilityLoss: **0.00522322** 36 | - MeanProbability: **98.1506%** 37 | - GeometricMeanProbability: **93.6163%** 38 | - VarianceProbability: **0.0130536** 39 | - ScottPi: **0.983105** 40 | - CohenKappa: **0.983105** 41 | - RejectionRate: **0.00000%** 42 | 43 | ![Accuracy Rejection Curve.png](https://i.loli.net/2018/11/17/5bf00415503a1.png) 44 | 45 | ## Class Indicator 46 | | Class | Count | TPRate | TNRate | FPRate | FNRate | F1Score | 47 | |-------|-------|--------|--------|--------|--------|---------| 48 | | 0 | 980 | 99.3877% | 99.8558% | 0.14412% | 0.61224% | 0.99034 | 49 | | 1 | 1135 | 99.4713% | 99.9097% | 0.09024% | 0.52863% | 0.99383 | 50 | | 2 | 1032 | 97.4806% | 99.8996% | 0.10035% | 2.51937% | 0.98290 | 51 | | 3 | 1010 | 98.9108% | 99.8220% | 0.17797% | 1.08910% | 0.98666 | 52 | | 4 | 982 | 97.7596% | 99.9445% | 0.05544% | 2.24032% | 0.98613 | 53 | | 5 | 892 | 98.8789% | 99.7145% | 0.28546% | 1.12107% | 0.98000 | 54 | | 6 | 958 | 97.4947% | 99.9336% | 0.06635% | 2.50521% | 0.98419 | 55 | | 7 | 1028 | 98.3463% | 99.6879% | 0.31208% | 1.65369% | 0.97822 | 56 | | 8 | 974 | 98.4599% | 99.8448% | 0.15510% | 1.54004% | 0.98510 | 57 | | 9 | 1009 | 98.5133% | 99.6996% | 0.30030% | 1.48662% | 0.97931 | 58 | 59 | ## Hard Class 60 | ![ConfusionMatrix.png](https://i.loli.net/2018/11/17/5bf0041545c4e.png) 61 | 62 | ## Evaluation Report 63 | | Index | TestID | Result | Time | MemoryChange | 64 | |-------|--------|--------|------|--------------| 65 | | 1 | Dependency Check | Success | 2.18416 s | +5.10072 MB | 66 | | 2 | Pre-define | Success | 0.00000 s | +0.00170 MB | 67 | | 3 | GPU Warm-Up | Success | 4.97171 s | +65.3663 MB | 68 | | 4 | Loading Model | Success | 0.08876 s | +7.45142 MB | 69 | | 5 | Loading Data | Success | 2.02858 s | +36.3034 MB | 70 | | 6 | Benchmark Test | Success | 3.89352 s | +1.05823 MB | 71 | | 7 | Result Dump | Success | 0.08679 s | -0.05690 MB | 72 | | 8 | Analyzing | Success | 17.6138 s | +19.0384 MB | 73 | -------------------------------------------------------------------------------- /ImageRecognition/Detection/YOLO3 on COCO/coco_yolo3_darknet53.m: -------------------------------------------------------------------------------- 1 | (* ::Package:: *) 2 | 3 | SetDirectory@NotebookDirectory[]; Now 4 | << MXNetLink` 5 | << NeuralNetworks` 6 | 7 | 8 | file = "yolo3_darknet53_coco"; 9 | params = NDArrayImport[file <> "-0000.params"]; 10 | params = MXModelLoadParameters[file <> "-0000.params"]; 11 | 12 | 13 | input = NetEncoder[{"Image", 224, "MeanImage" -> {.485, .456, .406}, "VarianceImage" -> {.229, .224, .225}^2}] 14 | leayReLU[alpha_] := ElementwiseLayer[Ramp[#] - alpha * Ramp[-#]&] 15 | getCV$a[n_, p_ : 1, s_ : 1] := ConvolutionLayer[ 16 | "Weights" -> params["arg:darknetv30_conv" <> ToString[n] <> "_weight"], 17 | "Biases" -> None, "PaddingSize" -> p, "Stride" -> s 18 | ]; 19 | getBN$a[n_] := BatchNormalizationLayer[ 20 | "Epsilon" -> 1*^-5, 21 | "Beta" -> params["arg:darknetv30_batchnorm" <> ToString[n] <> "_beta"], 22 | "Gamma" -> params["arg:darknetv30_batchnorm" <> ToString[n] <> "_gamma"], 23 | "MovingMean" -> params["aux:darknetv30_batchnorm" <> ToString[n] <> "_running_mean"], 24 | "MovingVariance" -> params["aux:darknetv30_batchnorm" <> ToString[n] <> "_running_var"] 25 | ]; 26 | getCV$b[n_, p_ : 1, s_ : 1] := ConvolutionLayer[ 27 | "Weights" -> params["arg:yolov30_yolodetectionblockv30_conv" <> ToString[n] <> "_weight"], 28 | "Biases" -> None, "PaddingSize" -> p, "Stride" -> s 29 | ]; 30 | getBN$b[n_] := BatchNormalizationLayer[ 31 | "Epsilon" -> 1*^-5, 32 | "Beta" -> params["arg:yolov30_yolodetectionblockv30_batchnorm" <> ToString[n] <> "_beta"], 33 | "Gamma" -> params["arg:yolov30_yolodetectionblockv30_batchnorm" <> ToString[n] <> "_gamma"], 34 | "MovingMean" -> params["aux:yolov30_yolodetectionblockv30_batchnorm" <> ToString[n] <> "_running_mean"], 35 | "MovingVariance" -> params["aux:yolov30_yolodetectionblockv30_batchnorm" <> ToString[n] <> "_running_var"] 36 | ]; 37 | 38 | 39 | block1 = NetChain[{ 40 | getCV$a[0, 1, 1], getBN$a[0], leayReLU[0.1], 41 | getCV$a[1, 1, 2], getBN$a[1], leayReLU[0.1] 42 | }] 43 | 44 | 45 | getBlock$a[n_] := NetGraph[{ 46 | NetChain[{ 47 | getCV$a[n, 0, 1], getBN$a[n], leayReLU[0.1], 48 | getCV$a[n + 1, 1, 1], getBN$a[n + 1], leayReLU[0.1] 49 | }], 50 | ThreadingLayer[Plus] 51 | }, { 52 | NetPort["Input"] -> 1 -> 2, 53 | NetPort["Input"] -> 2 54 | }] 55 | getBlock$b[n_] := NetChain[{getCV$a[n, 1, 2], getBN$a[n], leayReLU[0.1]}] 56 | block2 = NetChain@Flatten[{ 57 | {getBlock$a[2], getBlock$b[4]}, 58 | {getBlock$a[5], getBlock$a[7], getBlock$b[9]}, 59 | getBlock$a /@ Range[10, 24, 2] 60 | }] 61 | block3 = NetChain@Join[{getBlock$b[26]}, getBlock$a /@ Range[27, 41, 2]] 62 | 63 | 64 | block4 = NetChain@Join[{getBlock$b[43]}, getBlock$a /@ Range[44, 50, 2]] 65 | block5 = NetChain[{ 66 | getCV$b[0, 0, 1], getBN$b[0], leayReLU[0.1], 67 | getCV$b[1, 1, 1], getBN$b[1], leayReLU[0.1], 68 | getCV$b[2, 0, 1], getBN$b[2], leayReLU[0.1], 69 | getCV$b[3, 1, 1], getBN$b[3], leayReLU[0.1], 70 | getCV$b[4, 0, 1], getBN$b[4], leayReLU[0.1] 71 | }] 72 | 73 | 74 | NetChain[{getCV$b[5, 0, 1], getBN$b[5], leayReLU[0.1]}] 75 | 76 | 77 | Take[Keys@params, {263, -1}] // TableForm 78 | 79 | 80 | NetModel[] 81 | 82 | 83 | NetModel["YOLO V2 Trained on MS-COCO Data", "ConstructionNotebook"] 84 | 85 | 86 | regionLayerNet[{w, h}, anchors, coord, classes] 87 | -------------------------------------------------------------------------------- /ImageEnhancement/SuperResolution/EDSR on DIV2K/EDSR4x trained on DIV2K.m: -------------------------------------------------------------------------------- 1 | (* ::Package:: *) 2 | 3 | SetDirectory@NotebookDirectory[]; 4 | Needs["MXNetLink`"] 5 | Needs["NeuralNetworks`"] 6 | DateString[] 7 | 8 | 9 | (* ::Subitem:: *) 10 | (*Thu 25 Oct 2018 13:10:02*) 11 | 12 | 13 | (* ::Subchapter:: *) 14 | (*Import Weights*) 15 | 16 | 17 | params = NDArrayImport["EDSR_x4-0000.params"]; 18 | 19 | 20 | (* ::Subchapter:: *) 21 | (*Encoder & Decoder*) 22 | 23 | 24 | encoder = NetEncoder[{"Image", {640, 360}}] 25 | decoder = NetDecoder["Image"] 26 | 27 | "Use this encoder if not use shift convolution"; 28 | NetEncoder[{ 29 | "Image", {640, 360}, 30 | "MeanImage" -> {0.4488, 0.4371, 0.4040}, 31 | "VarianceImage" -> 1 / 255^2 32 | }]; 33 | 34 | 35 | (* ::Subchapter::Closed:: *) 36 | (*Pre-defined Structure*) 37 | 38 | 39 | getBlock[i_] := NetGraph[{ 40 | ConvolutionLayer[ 41 | "Weights" -> params["arg:body." <> ToString[i] <> ".body.0.weight"], 42 | "Biases" -> params["arg:body." <> ToString[i] <> ".body.0.bias"], 43 | "PaddingSize" -> 1, "Stride" -> 1 44 | ], 45 | ElementwiseLayer["ReLU"], 46 | ConvolutionLayer[ 47 | "Weights" -> params["arg:body." <> ToString[i] <> ".body.2.weight"], 48 | "Biases" -> params["arg:body." <> ToString[i] <> ".body.2.bias"], 49 | "PaddingSize" -> 1, "Stride" -> 1 50 | ], 51 | ThreadingLayer[#1 + 0.1#2&] 52 | }, { 53 | NetPort["Input"] -> 1 -> 2 -> 3, 54 | {NetPort["Input"], 3} -> 4 55 | }]; 56 | $body = NetGraph[{ 57 | NetChain@Array[getBlock, 31], 58 | ConvolutionLayer[ 59 | "Weights" -> params["arg:body.32.weight"], 60 | "Biases" -> params["arg:body.32.bias"], 61 | "PaddingSize" -> 1, "Stride" -> 1 62 | ], 63 | ThreadingLayer[Plus] 64 | }, { 65 | NetPort["Input"] -> 1 -> 2, 66 | {NetPort["Input"], 2} -> 3 67 | }]; 68 | 69 | 70 | (* ::Subchapter:: *) 71 | (*Main*) 72 | 73 | 74 | mainNet = NetChain[{ 75 | ConvolutionLayer[ 76 | "Weights" -> params["arg:sub_mean.weight"], 77 | "Biases" -> params["arg:sub_mean.bias"], 78 | "PaddingSize" -> 0, "Stride" -> 1 79 | ], 80 | ConvolutionLayer[ 81 | "Weights" -> params["arg:head.0.weight"], 82 | "Biases" -> params["arg:head.0.bias"], 83 | "PaddingSize" -> 1, "Stride" -> 1 84 | ], 85 | $body, 86 | ConvolutionLayer[ 87 | "Weights" -> params["arg:tail.0.0.weight"], 88 | "Biases" -> params["arg:tail.0.0.bias"], 89 | "PaddingSize" -> 1, "Stride" -> 1 90 | ], 91 | PixelShuffleLayer[2], 92 | ConvolutionLayer[ 93 | "Weights" -> params["arg:tail.0.2.weight"], 94 | "Biases" -> params["arg:tail.0.2.bias"], 95 | "PaddingSize" -> 1, "Stride" -> 1 96 | ], 97 | PixelShuffleLayer[2], 98 | ConvolutionLayer[ 99 | "Weights" -> params["arg:tail.1.weight"], 100 | "Biases" -> params["arg:tail.1.bias"], 101 | "PaddingSize" -> 1, "Stride" -> 1 102 | ], 103 | ConvolutionLayer[ 104 | "Weights" -> params["arg:add_mean.weight"], 105 | "Biases" -> params["arg:add_mean.bias"], 106 | "PaddingSize" -> 0, "Stride" -> 1 107 | ] 108 | }, 109 | "Input" -> encoder, 110 | "Output" -> decoder 111 | ] 112 | 113 | 114 | (* ::Subchapter:: *) 115 | (*Export Model*) 116 | 117 | 118 | Export["EDSR4x trained on DIV2K.WXF", mainNet] 119 | -------------------------------------------------------------------------------- /ImageGeneration/Anime/CartoonGANs trained on Multi-Style/CartoonGAN trained on Hosoda Style.m: -------------------------------------------------------------------------------- 1 | (* ::Package:: *) 2 | 3 | (* ::Subchapter:: *) 4 | (*Import Weights*) 5 | 6 | 7 | Clear["Global`*"]; 8 | SetDirectory@NotebookDirectory[]; 9 | $name = "Hosoda"; 10 | params = Import[$name <> ".h5", "Data"]; 11 | 12 | 13 | (* ::Subchapter:: *) 14 | (*Pre-defined Structure*) 15 | 16 | 17 | $NCHW = TransposeLayer[{1<->4, 2<->3, 3<->4}]; 18 | getPad[n_] := PaddingLayer[{{0, 0}, {n, n}, {n, n}}, Padding -> "Reflected"] 19 | getCN[name_, s_, p_, k_ : 1] := ConvolutionLayer[ 20 | "Weights" -> k * $NCHW@params[ToString[name] <> "/kernel:0"], 21 | "Biases" -> k * params[ToString[name] <> "/bias:0"], 22 | "Stride" -> s, "PaddingSize" -> p 23 | ]; 24 | getDN[name_, s_, p_] := DeconvolutionLayer[ 25 | "Weights" -> $NCHW@params[ToString[name] <> "/kernel:0"], 26 | "Biases" -> params[ToString[name] <> "/bias:0"], 27 | "Stride" -> s, "PaddingSize" -> p 28 | ]; 29 | getIN[name_] := NormalizationLayer[ 30 | "Biases" -> params[ToString[name] <> "/beta:0"], 31 | "Scaling" -> params[ToString[name] <> "/gamma:0"], 32 | "Epsilon" -> 1*^-9 33 | ]; 34 | getBlock[i_] := NetFlatten@NetGraph[ 35 | { 36 | { 37 | getPad[1], 38 | getCN["/conv" <> ToString[i] <> "_1/conv" <> ToString[i] <> "_1", 1, 0], 39 | getIN["/in" <> ToString[i] <> "_1/in" <> ToString[i] <> "_1"], 40 | Ramp, 41 | getPad[1], 42 | getCN["/conv" <> ToString[i] <> "_2/conv" <> ToString[i] <> "_2", 1, 0], 43 | getIN["/in" <> ToString[i] <> "_2/in" <> ToString[i] <> "_2"] 44 | }, 45 | ThreadingLayer[Plus] 46 | }, 47 | { 48 | NetPort["Input"] -> 1, 49 | {NetPort["Input"], 1} -> 2 -> NetPort["Output"] 50 | } 51 | ]; 52 | 53 | 54 | (* ::Subchapter:: *) 55 | (*Main*) 56 | 57 | 58 | mainNet = NetChain[{ 59 | { 60 | getPad[3], 61 | getCN["/conv1/conv1", 1, 0], 62 | getIN["/in1/in1"], 63 | Ramp 64 | }, 65 | { 66 | getCN["/conv2_1/conv2_1", 2, 1], 67 | getCN["/conv2_2/conv2_2", 1, 1], 68 | getIN["/in2/in2"], 69 | Ramp 70 | }, 71 | { 72 | getCN["/conv3_1/conv3_1", 2, 1], 73 | getCN["/conv3_2/conv3_2", 1, 1], 74 | getIN["/in3/in3"], 75 | Ramp 76 | }, 77 | getBlock /@ Range[4, 11], 78 | { 79 | getDN["/deconv1_1/deconv1_1", 2, 1], 80 | PaddingLayer[{{0, 0}, {2, 1}, {2, 1}}, Padding -> "Reflected"], 81 | getCN["/deconv1_2/deconv1_2", 1, 0], 82 | getIN["/in_deconv1/in_deconv1"], 83 | Ramp 84 | }, 85 | { 86 | getDN["/deconv2_1/deconv2_1", 2, 1], 87 | PaddingLayer[{{0, 0}, {2, 1}, {2, 1}}, Padding -> "Reflected"], 88 | getCN["/deconv2_2/deconv2_2", 1, 0], 89 | getIN["/in_deconv2/in_deconv2"], 90 | Ramp 91 | }, 92 | getCN["/deconv3/deconv3", 1, 3, 2], 93 | LogisticSigmoid 94 | }, 95 | "Output" -> "Image" 96 | ] 97 | 98 | 99 | (* ::Subchapter:: *) 100 | (*Testing*) 101 | 102 | 103 | img = Import["Test.png"] 104 | newNet = NetReplacePart[mainNet, "Input" -> NetEncoder[{"Image", ImageDimensions@img}]]; 105 | newNet[img, TargetDevice -> "GPU"] 106 | 107 | 108 | (* ::Subchapter:: *) 109 | (*Export Model*) 110 | 111 | 112 | Export[ 113 | "CartoonGAN trained on " <> $name <> " Style.MAT", 114 | mainNet, "WXF", 115 | PerformanceGoal -> "Speed" 116 | ] 117 | -------------------------------------------------------------------------------- /ImageEnhancement/SuperResolution/PESR on DIV2K/PESR4x trained on DIV2K.m: -------------------------------------------------------------------------------- 1 | (* ::Package:: *) 2 | 3 | SetDirectory@NotebookDirectory[]; 4 | Needs["MXNetLink`"] 5 | Needs["NeuralNetworks`"] 6 | DateString[] 7 | 8 | 9 | (* ::Subitem:: *) 10 | (*Thu 25 Oct 2018 18:16:43*) 11 | 12 | 13 | (* ::Subchapter:: *) 14 | (*Import Weights*) 15 | 16 | 17 | params = NDArrayImport["PESR_Perceptual_4x-0000.params"]; 18 | 19 | 20 | (* ::Subchapter:: *) 21 | (*Encoder & Decoder*) 22 | 23 | 24 | encoder = NetEncoder[{"Image", {640, 360}}] 25 | decoder = NetDecoder["Image"] 26 | 27 | "Use this encoder if not use shift convolution"; 28 | NetEncoder[{ 29 | "Image", {640, 360}, 30 | "MeanImage" -> {0.4488, 0.4371, 0.4040}, 31 | "VarianceImage" -> 1 / 255^2 32 | }]; 33 | 34 | 35 | (* ::Subchapter:: *) 36 | (*Pre-defined Structure*) 37 | 38 | 39 | getBlock[i_] := NetGraph[{ 40 | ConvolutionLayer[ 41 | "Weights" -> params["arg:body." <> ToString[i] <> ".body.0.weight"], 42 | "Biases" -> params["arg:body." <> ToString[i] <> ".body.0.bias"], 43 | "PaddingSize" -> 1, "Stride" -> 1 44 | ], 45 | ElementwiseLayer["ReLU"], 46 | ConvolutionLayer[ 47 | "Weights" -> params["arg:body." <> ToString[i] <> ".body.2.weight"], 48 | "Biases" -> params["arg:body." <> ToString[i] <> ".body.2.bias"], 49 | "PaddingSize" -> 1, "Stride" -> 1 50 | ], 51 | ThreadingLayer[#1 + 0.1#2&] 52 | }, { 53 | NetPort["Input"] -> 1 -> 2 -> 3, 54 | {NetPort["Input"], 3} -> 4 55 | }]; 56 | $body = NetGraph[{ 57 | NetChain@Array[getBlock, 31], 58 | ConvolutionLayer[ 59 | "Weights" -> params["arg:body.32.weight"], 60 | "Biases" -> params["arg:body.32.bias"], 61 | "PaddingSize" -> 1, "Stride" -> 1 62 | ], 63 | ThreadingLayer[Plus] 64 | }, { 65 | NetPort["Input"] -> 1 -> 2, 66 | {NetPort["Input"], 2} -> 3 67 | }]; 68 | 69 | 70 | (* ::Subchapter:: *) 71 | (*Main*) 72 | 73 | 74 | mainNet = NetChain[{ 75 | ConvolutionLayer[ 76 | "Weights" -> params["arg:sub_mean.weight"], 77 | "Biases" -> params["arg:sub_mean.bias"], 78 | "PaddingSize" -> 0, "Stride" -> 1 79 | ], 80 | ConvolutionLayer[ 81 | "Weights" -> params["arg:embed.weight"], 82 | "Biases" -> params["arg:embed.bias"], 83 | "PaddingSize" -> 1, "Stride" -> 1 84 | ], 85 | $body, 86 | ConvolutionLayer[ 87 | "Weights" -> params["arg:upsample.0.weight"], 88 | "Biases" -> params["arg:upsample.0.bias"], 89 | "PaddingSize" -> 1, "Stride" -> 1 90 | ], 91 | PixelShuffleLayer[2], 92 | ConvolutionLayer[ 93 | "Weights" -> params["arg:upsample.2.weight"], 94 | "Biases" -> params["arg:upsample.2.bias"], 95 | "PaddingSize" -> 1, "Stride" -> 1 96 | ], 97 | PixelShuffleLayer[2], 98 | ConvolutionLayer[ 99 | "Weights" -> params["arg:upsample.4.weight"], 100 | "Biases" -> params["arg:upsample.4.bias"], 101 | "PaddingSize" -> 1, "Stride" -> 1 102 | ], 103 | ConvolutionLayer[ 104 | "Weights" -> params["arg:add_mean.weight"], 105 | "Biases" -> params["arg:add_mean.bias"], 106 | "PaddingSize" -> 0, "Stride" -> 1 107 | ] 108 | }, 109 | "Input" -> encoder, 110 | "Output" -> decoder 111 | ] 112 | 113 | 114 | (* ::Subchapter:: *) 115 | (*Export Model*) 116 | 117 | 118 | Export["PESR4x trained on DIV2K.WXF", mainNet] 119 | -------------------------------------------------------------------------------- /ImageGeneration/Anime/CartoonGANs trained on Multi-Style/CartoonGAN trained on Paprika Style.m: -------------------------------------------------------------------------------- 1 | (* ::Package:: *) 2 | 3 | (* ::Subchapter:: *) 4 | (*Import Weights*) 5 | 6 | 7 | Clear["Global`*"]; 8 | SetDirectory@NotebookDirectory[]; 9 | $name = "Paprika"; 10 | params = Import[$name <> ".h5", "Data"]; 11 | 12 | 13 | (* ::Subchapter:: *) 14 | (*Pre-defined Structure*) 15 | 16 | 17 | $NCHW = TransposeLayer[{1<->4, 2<->3, 3<->4}]; 18 | getPad[n_] := PaddingLayer[{{0, 0}, {n, n}, {n, n}}, Padding -> "Reflected"] 19 | getCN[name_, s_, p_, k_ : 1] := ConvolutionLayer[ 20 | "Weights" -> k * $NCHW@params[ToString[name] <> "/kernel:0"], 21 | "Biases" -> k * params[ToString[name] <> "/bias:0"], 22 | "Stride" -> s, "PaddingSize" -> p 23 | ]; 24 | getDN[name_, s_, p_] := DeconvolutionLayer[ 25 | "Weights" -> $NCHW@params[ToString[name] <> "/kernel:0"], 26 | "Biases" -> params[ToString[name] <> "/bias:0"], 27 | "Stride" -> s, "PaddingSize" -> p 28 | ]; 29 | getIN[name_] := NormalizationLayer[ 30 | "Biases" -> params[ToString[name] <> "/beta:0"], 31 | "Scaling" -> params[ToString[name] <> "/gamma:0"], 32 | "Epsilon" -> 1*^-9 33 | ]; 34 | getBlock[i_] := NetFlatten@NetGraph[ 35 | { 36 | { 37 | getPad[1], 38 | getCN["/conv" <> ToString[i] <> "_1/conv" <> ToString[i] <> "_1", 1, 0], 39 | getIN["/in" <> ToString[i] <> "_1/in" <> ToString[i] <> "_1"], 40 | Ramp, 41 | getPad[1], 42 | getCN["/conv" <> ToString[i] <> "_2/conv" <> ToString[i] <> "_2", 1, 0], 43 | getIN["/in" <> ToString[i] <> "_2/in" <> ToString[i] <> "_2"] 44 | }, 45 | ThreadingLayer[Plus] 46 | }, 47 | { 48 | NetPort["Input"] -> 1, 49 | {NetPort["Input"], 1} -> 2 -> NetPort["Output"] 50 | } 51 | ]; 52 | 53 | 54 | (* ::Subchapter:: *) 55 | (*Main*) 56 | 57 | 58 | mainNet = NetChain[{ 59 | { 60 | getPad[3], 61 | getCN["/conv1/conv1", 1, 0], 62 | getIN["/in1/in1"], 63 | Ramp 64 | }, 65 | { 66 | getCN["/conv2_1/conv2_1", 2, 1], 67 | getCN["/conv2_2/conv2_2", 1, 1], 68 | getIN["/in2/in2"], 69 | Ramp 70 | }, 71 | { 72 | getCN["/conv3_1/conv3_1", 2, 1], 73 | getCN["/conv3_2/conv3_2", 1, 1], 74 | getIN["/in3/in3"], 75 | Ramp 76 | }, 77 | getBlock /@ Range[4, 11], 78 | { 79 | getDN["/deconv1_1/deconv1_1", 2, 1], 80 | PaddingLayer[{{0, 0}, {2, 1}, {2, 1}}, Padding -> "Reflected"], 81 | getCN["/deconv1_2/deconv1_2", 1, 0], 82 | getIN["/in_deconv1/in_deconv1"], 83 | Ramp 84 | }, 85 | { 86 | getDN["/deconv2_1/deconv2_1", 2, 1], 87 | PaddingLayer[{{0, 0}, {2, 1}, {2, 1}}, Padding -> "Reflected"], 88 | getCN["/deconv2_2/deconv2_2", 1, 0], 89 | getIN["/in_deconv2/in_deconv2"], 90 | Ramp 91 | }, 92 | getCN["/deconv3/deconv3", 1, 3, 2], 93 | LogisticSigmoid 94 | }, 95 | "Output" -> "Image" 96 | ] 97 | 98 | 99 | (* ::Subchapter:: *) 100 | (*Testing*) 101 | 102 | 103 | img = Import["Test.png"] 104 | newNet = NetReplacePart[mainNet, "Input" -> NetEncoder[{"Image", ImageDimensions@img}]]; 105 | newNet[img, TargetDevice -> "GPU"] 106 | 107 | 108 | (* ::Subchapter:: *) 109 | (*Export Model*) 110 | 111 | 112 | Export[ 113 | "CartoonGAN trained on " <> $name <> " Style.MAT", 114 | mainNet, "WXF", 115 | PerformanceGoal -> "Speed" 116 | ] 117 | -------------------------------------------------------------------------------- /ImageGeneration/Anime/CartoonGANs trained on Multi-Style/CartoonGAN trained on Shinkai Style.m: -------------------------------------------------------------------------------- 1 | (* ::Package:: *) 2 | 3 | (* ::Subchapter:: *) 4 | (*Import Weights*) 5 | 6 | 7 | Clear["Global`*"]; 8 | SetDirectory@NotebookDirectory[]; 9 | $name = "Shinkai"; 10 | params = Import[$name <> ".h5", "Data"]; 11 | 12 | 13 | (* ::Subchapter:: *) 14 | (*Pre-defined Structure*) 15 | 16 | 17 | $NCHW = TransposeLayer[{1<->4, 2<->3, 3<->4}]; 18 | getPad[n_] := PaddingLayer[{{0, 0}, {n, n}, {n, n}}, Padding -> "Reflected"] 19 | getCN[name_, s_, p_, k_ : 1] := ConvolutionLayer[ 20 | "Weights" -> k * $NCHW@params[ToString[name] <> "/kernel:0"], 21 | "Biases" -> k * params[ToString[name] <> "/bias:0"], 22 | "Stride" -> s, "PaddingSize" -> p 23 | ]; 24 | getDN[name_, s_, p_] := DeconvolutionLayer[ 25 | "Weights" -> $NCHW@params[ToString[name] <> "/kernel:0"], 26 | "Biases" -> params[ToString[name] <> "/bias:0"], 27 | "Stride" -> s, "PaddingSize" -> p 28 | ]; 29 | getIN[name_] := NormalizationLayer[ 30 | "Biases" -> params[ToString[name] <> "/beta:0"], 31 | "Scaling" -> params[ToString[name] <> "/gamma:0"], 32 | "Epsilon" -> 1*^-9 33 | ]; 34 | getBlock[i_] := NetFlatten@NetGraph[ 35 | { 36 | { 37 | getPad[1], 38 | getCN["/conv" <> ToString[i] <> "_1/conv" <> ToString[i] <> "_1", 1, 0], 39 | getIN["/in" <> ToString[i] <> "_1/in" <> ToString[i] <> "_1"], 40 | Ramp, 41 | getPad[1], 42 | getCN["/conv" <> ToString[i] <> "_2/conv" <> ToString[i] <> "_2", 1, 0], 43 | getIN["/in" <> ToString[i] <> "_2/in" <> ToString[i] <> "_2"] 44 | }, 45 | ThreadingLayer[Plus] 46 | }, 47 | { 48 | NetPort["Input"] -> 1, 49 | {NetPort["Input"], 1} -> 2 -> NetPort["Output"] 50 | } 51 | ]; 52 | 53 | 54 | (* ::Subchapter:: *) 55 | (*Main*) 56 | 57 | 58 | mainNet = NetChain[{ 59 | { 60 | getPad[3], 61 | getCN["/conv1/conv1", 1, 0], 62 | getIN["/in1/in1"], 63 | Ramp 64 | }, 65 | { 66 | getCN["/conv2_1/conv2_1", 2, 1], 67 | getCN["/conv2_2/conv2_2", 1, 1], 68 | getIN["/in2/in2"], 69 | Ramp 70 | }, 71 | { 72 | getCN["/conv3_1/conv3_1", 2, 1], 73 | getCN["/conv3_2/conv3_2", 1, 1], 74 | getIN["/in3/in3"], 75 | Ramp 76 | }, 77 | getBlock /@ Range[4, 11], 78 | { 79 | getDN["/deconv1_1/deconv1_1", 2, 1], 80 | PaddingLayer[{{0, 0}, {2, 1}, {2, 1}}, Padding -> "Reflected"], 81 | getCN["/deconv1_2/deconv1_2", 1, 0], 82 | getIN["/in_deconv1/in_deconv1"], 83 | Ramp 84 | }, 85 | { 86 | getDN["/deconv2_1/deconv2_1", 2, 1], 87 | PaddingLayer[{{0, 0}, {2, 1}, {2, 1}}, Padding -> "Reflected"], 88 | getCN["/deconv2_2/deconv2_2", 1, 0], 89 | getIN["/in_deconv2/in_deconv2"], 90 | Ramp 91 | }, 92 | getCN["/deconv3/deconv3", 1, 3, 2], 93 | LogisticSigmoid 94 | }, 95 | "Output" -> "Image" 96 | ] 97 | 98 | 99 | (* ::Subchapter:: *) 100 | (*Testing*) 101 | 102 | 103 | img = Import["Test.png"] 104 | newNet = NetReplacePart[mainNet, "Input" -> NetEncoder[{"Image", ImageDimensions@img}]]; 105 | newNet[img, TargetDevice -> "GPU"] 106 | 107 | 108 | (* ::Subchapter:: *) 109 | (*Export Model*) 110 | 111 | 112 | Export[ 113 | "CartoonGAN trained on " <> $name <> " Style.MAT", 114 | mainNet, "WXF", 115 | PerformanceGoal -> "Speed" 116 | ] 117 | -------------------------------------------------------------------------------- /ImageGeneration/Anime/ComixGANs Photo2Comic Style Transfer/ComixGAN Comic Style Transfer Beta.m: -------------------------------------------------------------------------------- 1 | (* ::Package:: *) 2 | 3 | (* ::Subchapter:: *) 4 | (*Import Weights*) 5 | 6 | 7 | SetDirectory@NotebookDirectory[]; 8 | Clear["Global`*"]; 9 | << DeepMath`; 10 | DeepMath`NetMerge; 11 | $name = "ComixGAN Comic Style Transfer Beta"; 12 | params = Import[$name <> ".h5", "Data"]; 13 | 14 | 15 | (* ::Subchapter:: *) 16 | (*Pre-defined Structure*) 17 | 18 | 19 | $NCHW = TransposeLayer[{1<->4, 2<->3, 3<->4}]; 20 | getName[s_] := TemplateApply["/model_weights/`1`/`1`", {s}] 21 | getPad[n_] := PaddingLayer[{{0, 0}, {n, n}, {n, n}}, Padding -> "Reflected"]; 22 | getCW[name_, s_, p_, k_ : 1] := ConvolutionLayer[ 23 | "Weights" -> k * $NCHW@params[getName[name] <> "/kernel:0"], 24 | "Biases" -> None, "Stride" -> s, "PaddingSize" -> p 25 | ]; 26 | getCN[name_, s_, p_, k_ : 1] := ConvolutionLayer[ 27 | "Weights" -> k * $NCHW@params[getName[name] <> "/kernel:0"], 28 | "Biases" -> k * params[getName[name] <> "/bias:0"], 29 | "Stride" -> s, "PaddingSize" -> p 30 | ]; 31 | getDN[name_, s_, p_] := DeconvolutionLayer[ 32 | "Weights" -> $NCHW@params[getName[name] <> "/kernel:0"], 33 | "Biases" -> params[getName[name] <> "/bias:0"], 34 | "Stride" -> s, "PaddingSize" -> p 35 | ]; 36 | getIN[name_] := NormalizationLayer[ 37 | "Biases" -> params[getName[name] <> "/beta:0"], 38 | "Scaling" -> params[getName[name] <> "/gamma:0"], 39 | "Epsilon" -> 0.001 40 | ]; 41 | 42 | 43 | getBlock[i_] := GeneralUtilities`Scope[ 44 | path = NetChain@{ 45 | getCW["conv2d_" <> ToString[i], 1, 1], 46 | getIN["instance_normalization_" <> ToString[i - 6]], 47 | Ramp, 48 | getCW["conv2d_" <> ToString[i + 1], 1, 1], 49 | getIN["instance_normalization_" <> ToString[i - 5]] 50 | }; 51 | NetMerge[path, Plus, Expand -> True] 52 | ]; 53 | 54 | 55 | (* ::Subchapter:: *) 56 | (*Main*) 57 | 58 | 59 | mainNet = NetChain[{ 60 | { 61 | getCW["conv2d_8", 1, 3], 62 | getIN["instance_normalization_4"], 63 | Ramp 64 | }, 65 | { 66 | getCN["conv2d_9", 2, 1], 67 | getCW["conv2d_10", 1, 1], 68 | getIN["instance_normalization_5"], 69 | Ramp 70 | }, 71 | { 72 | getCN["conv2d_11", 2, 1], 73 | getCW["conv2d_12", 1, 1], 74 | getIN["instance_normalization_6"], 75 | Ramp 76 | }, 77 | NetChain@Table[getBlock[i], {i, 13, 27, 2}], 78 | { 79 | getDN["conv2d_transpose_1", 2, 0], 80 | PartLayer[{All, 2 ;; All, 2 ;; All}], 81 | getCW["conv2d_29", 1, 1], 82 | getIN["instance_normalization_23"], 83 | Ramp 84 | }, 85 | { 86 | getDN["conv2d_transpose_2", 2, 0], 87 | getCW["conv2d_30", 1, 1], 88 | PartLayer[{All, 2 ;; All, 2 ;; All}], 89 | getIN["instance_normalization_24"], 90 | Ramp 91 | }, 92 | getCN["conv2d_31", 1, 3], 93 | LogisticSigmoid 94 | }, 95 | "Output" -> "Image" 96 | ] 97 | 98 | 99 | (* ::Subchapter:: *) 100 | (*Testing*) 101 | 102 | 103 | img = ExampleData[{"TestImage", "House"}] 104 | newNet = NetReplacePart[mainNet, "Input" -> NetEncoder[{"Image", ImageDimensions@img}]]; 105 | newNet[img, TargetDevice -> "GPU"] 106 | 107 | 108 | (* ::Subchapter:: *) 109 | (*Export Model*) 110 | 111 | 112 | Export[$name <> ".MAT", mainNet, "WXF", PerformanceGoal -> "Speed"] 113 | -------------------------------------------------------------------------------- /ImageGeneration/Anime/CartoonGANs trained on Multi-Style/CartoonGAN trained on Hayao Style.m: -------------------------------------------------------------------------------- 1 | (* ::Package:: *) 2 | 3 | (* ::Subchapter:: *) 4 | (*Import Weights*) 5 | 6 | 7 | Clear["Global`*"]; 8 | SetDirectory@NotebookDirectory[]; 9 | $name = "Hayao"; 10 | params = Import[$name <> ".h5", "Data"]; 11 | 12 | 13 | (* ::Subchapter:: *) 14 | (*Pre-defined Structure*) 15 | 16 | 17 | $NCHW = TransposeLayer[{1<->4, 2<->3, 3<->4}]; 18 | getPad[n_] := PaddingLayer[{{0, 0}, {n, n}, {n, n}}, Padding -> "Reflected"] 19 | getCN[name_, s_, p_, k_ : 1] := ConvolutionLayer[ 20 | "Weights" -> k * $NCHW@params[ToString[name] <> "/kernel:0"], 21 | "Biases" -> k * params[ToString[name] <> "/bias:0"], 22 | "Stride" -> s, "PaddingSize" -> p 23 | ]; 24 | getDN[name_, s_, p_] := DeconvolutionLayer[ 25 | "Weights" -> $NCHW@params[ToString[name] <> "/kernel:0"], 26 | "Biases" -> params[ToString[name] <> "/bias:0"], 27 | "Stride" -> s, "PaddingSize" -> p 28 | ]; 29 | getIN[name_] := NormalizationLayer[ 30 | "Biases" -> params[ToString[name] <> "/beta:0"], 31 | "Scaling" -> params[ToString[name] <> "/gamma:0"], 32 | "Epsilon" -> 1*^-9 33 | ]; 34 | 35 | 36 | getBlock[i_] := NetFlatten@NetGraph[ 37 | { 38 | { 39 | getPad[1], 40 | getCN["/conv" <> ToString[i] <> "_1/conv" <> ToString[i] <> "_1", 1, 0], 41 | getIN["/in" <> ToString[i] <> "_1/in" <> ToString[i] <> "_1"], 42 | Ramp, 43 | getPad[1], 44 | getCN["/conv" <> ToString[i] <> "_2/conv" <> ToString[i] <> "_2", 1, 0], 45 | getIN["/in" <> ToString[i] <> "_2/in" <> ToString[i] <> "_2"] 46 | }, 47 | ThreadingLayer[Plus] 48 | }, 49 | { 50 | NetPort["Input"] -> 1, 51 | {NetPort["Input"], 1} -> 2 -> NetPort["Output"] 52 | } 53 | ]; 54 | 55 | 56 | (* ::Subchapter:: *) 57 | (*Main*) 58 | 59 | 60 | mainNet = NetChain[{ 61 | { 62 | getPad[3], 63 | getCN["/conv1/conv1", 1, 0], 64 | getIN["/in1/in1"], 65 | Ramp 66 | }, 67 | { 68 | getCN["/conv2_1/conv2_1", 2, 1], 69 | getCN["/conv2_2/conv2_2", 1, 1], 70 | getIN["/in2/in2"], 71 | Ramp 72 | }, 73 | { 74 | getCN["/conv3_1/conv3_1", 2, 1], 75 | getCN["/conv3_2/conv3_2", 1, 1], 76 | getIN["/in3/in3"], 77 | Ramp 78 | }, 79 | getBlock /@ Range[4, 11], 80 | { 81 | getDN["/deconv1_1/deconv1_1", 2, 1], 82 | PaddingLayer[{{0, 0}, {2, 1}, {2, 1}}, Padding -> "Reflected"], 83 | getCN["/deconv1_2/deconv1_2", 1, 0], 84 | getIN["/in_deconv1/in_deconv1"], 85 | Ramp 86 | }, 87 | { 88 | getDN["/deconv2_1/deconv2_1", 2, 1], 89 | PaddingLayer[{{0, 0}, {2, 1}, {2, 1}}, Padding -> "Reflected"], 90 | getCN["/deconv2_2/deconv2_2", 1, 0], 91 | getIN["/in_deconv2/in_deconv2"], 92 | Ramp 93 | }, 94 | getCN["/deconv3/deconv3", 1, 3, 2], 95 | LogisticSigmoid 96 | }, 97 | "Output" -> "Image" 98 | ] 99 | 100 | 101 | (* ::Subchapter:: *) 102 | (*Testing*) 103 | 104 | 105 | img = Import["Test.png"] 106 | newNet = NetReplacePart[mainNet, "Input" -> NetEncoder[{"Image", ImageDimensions@img}]]; 107 | newNet[img, TargetDevice -> "GPU"] 108 | 109 | 110 | (* ::Subchapter:: *) 111 | (*Export Model*) 112 | 113 | 114 | Export[ 115 | "CartoonGAN trained on " <> $name <> " Style.MAT", 116 | mainNet, "WXF", 117 | PerformanceGoal -> "Speed" 118 | ] 119 | -------------------------------------------------------------------------------- /ImageGeneration/Anime/ComixGANs Photo2Comic Style Transfer/ComixGAN Comic Style Transfer Delta.m: -------------------------------------------------------------------------------- 1 | (* ::Package:: *) 2 | 3 | (* ::Subchapter:: *) 4 | (*Import Weights*) 5 | 6 | 7 | SetDirectory@NotebookDirectory[]; 8 | Clear["Global`*"]; 9 | << DeepMath`; 10 | DeepMath`NetMerge; 11 | $name = "ComixGAN Comic Style Transfer Delta"; 12 | params = Import[$name <> ".h5", "Data"]; 13 | 14 | 15 | (* ::Subchapter:: *) 16 | (*Pre-defined Structure*) 17 | 18 | 19 | $NCHW = TransposeLayer[{1<->4, 2<->3, 3<->4}]; 20 | getName[s_] := TemplateApply["/model_weights/`1`/`1`", {s}]; 21 | getPad[n_] := PaddingLayer[{{0, 0}, {n, n}, {n, n}}, Padding -> "Reflected"]; 22 | getCW[name_, s_, p_, k_ : 1] := ConvolutionLayer[ 23 | "Weights" -> k * $NCHW@params[getName[name] <> "/kernel:0"], 24 | "Biases" -> None, "Stride" -> s, "PaddingSize" -> p 25 | ]; 26 | getCN[name_, s_, p_, k_ : 1] := ConvolutionLayer[ 27 | "Weights" -> k * $NCHW@params[getName[name] <> "/kernel:0"], 28 | "Biases" -> k * params[getName[name] <> "/bias:0"], 29 | "Stride" -> s, "PaddingSize" -> p 30 | ]; 31 | getDN[name_, s_, p_] := DeconvolutionLayer[ 32 | "Weights" -> $NCHW@params[getName[name] <> "/kernel:0"], 33 | "Biases" -> params[getName[name] <> "/bias:0"], 34 | "Stride" -> s, "PaddingSize" -> p 35 | ]; 36 | getIN[name_] := NormalizationLayer[ 37 | "Biases" -> params[getName[name] <> "/beta:0"], 38 | "Scaling" -> params[getName[name] <> "/gamma:0"], 39 | "Epsilon" -> 0.001 40 | ]; 41 | 42 | 43 | getBlock[i_] := GeneralUtilities`Scope[ 44 | path = NetChain@{ 45 | getCW["conv2d_" <> ToString[i], 1, 1], 46 | getIN["instance_normalization_" <> ToString[i - 6]], 47 | Ramp, 48 | getCW["conv2d_" <> ToString[i + 1], 1, 1], 49 | getIN["instance_normalization_" <> ToString[i - 5]] 50 | }; 51 | NetMerge[path, Plus, Expand -> True] 52 | ]; 53 | 54 | 55 | (* ::Subchapter:: *) 56 | (*Main*) 57 | 58 | 59 | mainNet = NetChain[{ 60 | { 61 | getCW["conv2d_8", 1, 3], 62 | getIN["instance_normalization_4"], 63 | Ramp 64 | }, 65 | { 66 | getCN["conv2d_9", 2, 1], 67 | getCW["conv2d_10", 1, 1], 68 | getIN["instance_normalization_5"], 69 | Ramp 70 | }, 71 | { 72 | getCN["conv2d_11", 2, 1], 73 | getCW["conv2d_12", 1, 1], 74 | getIN["instance_normalization_6"], 75 | Ramp 76 | }, 77 | NetChain@Table[getBlock[i], {i, 13, 27, 2}], 78 | { 79 | getDN["conv2d_transpose_1", 2, 0], 80 | PartLayer[{All, 2 ;; All, 2 ;; All}], 81 | getCW["conv2d_29", 1, 1], 82 | getIN["instance_normalization_23"], 83 | Ramp 84 | }, 85 | { 86 | getDN["conv2d_transpose_2", 2, 0], 87 | getCW["conv2d_30", 1, 1], 88 | PartLayer[{All, 2 ;; All, 2 ;; All}], 89 | getIN["instance_normalization_24"], 90 | Ramp 91 | }, 92 | getCN["conv2d_31", 1, 3], 93 | LogisticSigmoid 94 | }, 95 | "Output" -> "Image" 96 | ] 97 | 98 | 99 | (* ::Subchapter:: *) 100 | (*Testing*) 101 | 102 | 103 | img = ExampleData[{"TestImage", "House"}] 104 | newNet = NetReplacePart[mainNet, "Input" -> NetEncoder[{"Image", ImageDimensions@img}]]; 105 | newNet[img, TargetDevice -> "GPU"] 106 | 107 | 108 | (* ::Subchapter:: *) 109 | (*Export Model*) 110 | 111 | 112 | Export[$name <> ".MAT", mainNet, "WXF", PerformanceGoal -> "Speed"] 113 | -------------------------------------------------------------------------------- /ImageRecognition/Classifation/Wide-ResNet on CIFAR10/WRN28-10 tested on CIFAR10 TestSet.md: -------------------------------------------------------------------------------- 1 | # WRN28-10 trained on CIFAR10 2 | ![Task](https://img.shields.io/badge/Task-Classifation-Orange.svg) 3 | ![Size](https://img.shields.io/badge/Size-145.98%20MB-blue.svg) 4 | ![Accuracy](https://img.shields.io/badge/Accuracy-97.119%25-brightgreen.svg) 5 | ![Speed](https://img.shields.io/badge/Speed-3.713%20ms-ff69b4.svg) 6 | 7 | Automatically generated on Mon 19 Nov 2018 12:49:42 8 | 9 | ## Network structure: 10 | - Network Size: **145.989 MB** 11 | - Parameters: **36 497 222** 12 | - Nodes Count: **95** 13 | - Speed: **3.713 ms/sample** 14 | - Layers: 15 | - AggregationLayer: **1** 16 | - BatchNormalizationLayer: **27** 17 | - ConvolutionLayer: **28** 18 | - ElementwiseLayer: **25** 19 | - LinearLayer: **1** 20 | - SoftmaxLayer: **1** 21 | - ThreadingLayer: **12** 22 | 23 | 24 | ## Accuracy Curve 25 | ![Classification Curve.png](https://i.loli.net/2018/11/19/5bf240f955722.png) 26 | 27 | ![High Precision Classification Curve.png](https://i.loli.net/2018/11/19/5bf240f9cf50e.png) 28 | 29 | ## Main Indicator 30 | - Top-1: **97.1199%** 31 | - Top-2: **99.1600%** 32 | - Top-3: **99.6900%** 33 | - Top-5: **99.9400%** 34 | - LogLikelihood: **-1246.3** 35 | - CrossEntropyLoss: **0.12463** 36 | - ProbabilityLoss: **0.00663905** 37 | - MeanProbability: **96.7439%** 38 | - GeometricMeanProbability: **88.2823%** 39 | - VarianceProbability: **0.0246335** 40 | - ScottPi: **0.968** 41 | - CohenKappa: **0.968** 42 | - RejectionRate: **0.00000%** 43 | 44 | ![Accuracy Rejection Curve.png](https://i.loli.net/2018/11/19/5bf240f9d0c64.png) 45 | 46 | ## Class Indicator 47 | | Class | Count | TPRate | TNRate | FPRate | FNRate | F1Score | 48 | |-------|-------|--------|--------|--------|--------|---------| 49 | | airplane | 1000 | 97.5000% | 99.6666% | 0.33333% | 2.50000% | 0.97256 | 50 | | automobile | 1000 | 98.7000% | 99.8666% | 0.13333% | 1.30000% | 0.98749 | 51 | | bird | 1000 | 96.8999% | 99.6777% | 0.32222% | 3.10000% | 0.96996 | 52 | | cat | 1000 | 92.5000% | 99.3222% | 0.67777% | 7.50000% | 0.93152 | 53 | | deer | 1000 | 98.3000% | 99.6222% | 0.37777% | 1.70000% | 0.97471 | 54 | | dog | 1000 | 93.7000% | 99.3888% | 0.61111% | 6.30000% | 0.94076 | 55 | | frog | 1000 | 98.5000% | 99.8555% | 0.14444% | 1.50000% | 0.98598 | 56 | | horse | 1000 | 98.4000% | 99.8555% | 0.14444% | 1.60000% | 0.98547 | 57 | | ship | 1000 | 98.6000% | 99.6777% | 0.32222% | 1.40000% | 0.97866 | 58 | | truck | 1000 | 98.1000% | 99.8666% | 0.13333% | 1.90000% | 0.98444 | 59 | 60 | ## Hard Class 61 | ![ConfusionMatrix.png](https://i.loli.net/2018/11/19/5bf240f9d0399.png) 62 | 63 | ## Evaluation Report 64 | | Index | TestID | Result | Time | MemoryChange | 65 | |-------|--------|--------|------|--------------| 66 | | 1 | Dependency Check | Success | 2.13078 s | +5.09124 MB | 67 | | 2 | Pre-define | Success | 0.00000 s | +0.00159 MB | 68 | | 3 | GPU Warm-Up | Success | 4.23514 s | +54.1124 MB | 69 | | 4 | Loading Model | Success | 1.82257 s | +146.422 MB | 70 | | 5 | Loading Data | Success | 0.35257 s | +42.3258 MB | 71 | | 6 | Benchmark Test | Success | 37.7018 s | +1.54736 MB | 72 | | 7 | Result Dump | Success | 0.10576 s | -0.08278 MB | 73 | | 8 | Analyzing | Success | 9.34498 s | +26.2562 MB | 74 | -------------------------------------------------------------------------------- /ImageRecognition/Classifation/ResNet on CIFAR10/ResnetV2-20 tested on CIFAR10 TestSet.md: -------------------------------------------------------------------------------- 1 | # ResnetV2-20 trained on CIFAR10 2 | ![Task](https://img.shields.io/badge/Task-Classifation-Orange.svg) 3 | ![Size](https://img.shields.io/badge/Size-1.0946%20MB-blue.svg) 4 | ![Accuracy](https://img.shields.io/badge/Accuracy-92.390%25-brightgreen.svg) 5 | ![Speed](https://img.shields.io/badge/Speed-0.210%20ms-ff69b4.svg) 6 | 7 | Automatically generated on Sun 18 Nov 2018 20:20:15 8 | 9 | ## Network structure: 10 | - Network Size: **1.09468 MB** 11 | - Parameters: **273 670** 12 | - Nodes Count: **72** 13 | - Speed: **0.210 ms/sample** 14 | - Layers: 15 | - AggregationLayer: **1** 16 | - BatchNormalizationLayer: **20** 17 | - ConvolutionLayer: **21** 18 | - ElementwiseLayer: **19** 19 | - LinearLayer: **1** 20 | - SoftmaxLayer: **1** 21 | - ThreadingLayer: **9** 22 | 23 | 24 | ## Accuracy Curve 25 | ![Classification Curve.png](https://i.loli.net/2018/11/17/5bf021480f2eb.png) 26 | 27 | ![High Precision Classification Curve.png](https://i.loli.net/2018/11/17/5bf02148da6f8.png) 28 | 29 | ## Main Indicator 30 | - Top-1: **92.3900%** 31 | - Top-2: **97.6900%** 32 | - Top-3: **99.0200%** 33 | - Top-5: **99.7900%** 34 | - LogLikelihood: **-2438.72** 35 | - CrossEntropyLoss: **0.243872** 36 | - ProbabilityLoss: **0.0338326** 37 | - MeanProbability: **90.4425%** 38 | - GeometricMeanProbability: **78.3588%** 39 | - VarianceProbability: **0.0555114** 40 | - ScottPi: **0.915443** 41 | - CohenKappa: **0.915444** 42 | - RejectionRate: **0.00000%** 43 | 44 | ![Accuracy Rejection Curve.png](https://i.loli.net/2018/11/17/5bf0214b8b2b2.png) 45 | 46 | ## Class Indicator 47 | | Class | Count | TPRate | TNRate | FPRate | FNRate | F1Score | 48 | |-------|-------|--------|--------|--------|--------|---------| 49 | | airplane | 1000 | 94.6000% | 98.8777% | 1.12222% | 5.40000% | 0.92427 | 50 | | automobile | 1000 | 96.5000% | 99.6333% | 0.36666% | 3.50000% | 0.96596 | 51 | | bird | 1000 | 89.8000% | 98.8888% | 1.11111% | 10.2000% | 0.89889 | 52 | | cat | 1000 | 83.3999% | 98.3777% | 1.62222% | 16.6000% | 0.84242 | 53 | | deer | 1000 | 95.1000% | 98.9777% | 1.02222% | 4.90000% | 0.93098 | 54 | | dog | 1000 | 87.2000% | 98.6666% | 1.33333% | 12.8000% | 0.87550 | 55 | | frog | 1000 | 91.7000% | 99.6222% | 0.37777% | 8.30000% | 0.94003 | 56 | | horse | 1000 | 93.8999% | 99.5777% | 0.42222% | 6.10000% | 0.94992 | 57 | | ship | 1000 | 96.6000% | 99.3666% | 0.63333% | 3.40000% | 0.95501 | 58 | | truck | 1000 | 95.1000% | 99.5555% | 0.44444% | 4.90000% | 0.95529 | 59 | 60 | ## Hard Class 61 | ![ConfusionMatrix.png](https://i.loli.net/2018/11/17/5bf0214b8cf8b.png) 62 | 63 | ## Evaluation Report 64 | | Index | TestID | Result | Time | MemoryChange | 65 | |-------|--------|--------|------|--------------| 66 | | 1 | Dependency Check | Success | 2.24699 s | +5.10072 MB | 67 | | 2 | Pre-define | Success | 0.00000 s | +0.00170 MB | 68 | | 3 | GPU Warm-Up | Success | 4.95176 s | +65.3663 MB | 69 | | 4 | Loading Model | Success | 0.03194 s | +1.45080 MB | 70 | | 5 | Loading Data | Success | 0.34408 s | +42.3258 MB | 71 | | 6 | Benchmark Test | Success | 2.58708 s | +0.60913 MB | 72 | | 7 | Result Dump | Success | 0.01900 s | -0.06256 MB | 73 | | 8 | Analyzing | Success | 8.74064 s | +25.9704 MB | 74 | -------------------------------------------------------------------------------- /ImageRecognition/Classifation/ResNet on CIFAR10/ResnetV2-56 tested on CIFAR10 TestSet.md: -------------------------------------------------------------------------------- 1 | # ResnetV2-56 trained on CIFAR10 2 | ![Task](https://img.shields.io/badge/Task-Classifation-Orange.svg) 3 | ![Size](https://img.shields.io/badge/Size-3.4386%20MB-blue.svg) 4 | ![Accuracy](https://img.shields.io/badge/Accuracy-94.460%25-brightgreen.svg) 5 | ![Speed](https://img.shields.io/badge/Speed-0.354%20ms-ff69b4.svg) 6 | 7 | Automatically generated on Sun 18 Nov 2018 20:25:59 8 | 9 | ## Network structure: 10 | - Network Size: **3.43862 MB** 11 | - Parameters: **859 654** 12 | - Nodes Count: **198** 13 | - Speed: **0.354 ms/sample** 14 | - Layers: 15 | - AggregationLayer: **1** 16 | - BatchNormalizationLayer: **56** 17 | - ConvolutionLayer: **57** 18 | - ElementwiseLayer: **55** 19 | - LinearLayer: **1** 20 | - SoftmaxLayer: **1** 21 | - ThreadingLayer: **27** 22 | 23 | 24 | ## Accuracy Curve 25 | ![Classification Curve.png](https://i.loli.net/2018/11/18/5bf15a6e7b995.png) 26 | 27 | ![High Precision Classification Curve.png](https://i.loli.net/2018/11/18/5bf15a69eb501.png) 28 | 29 | ## Main Indicator 30 | - Top-1: **94.4600%** 31 | - Top-2: **98.1100%** 32 | - Top-3: **99.2600%** 33 | - Top-5: **99.8900%** 34 | - LogLikelihood: **-2112.27** 35 | - CrossEntropyLoss: **0.211227** 36 | - ProbabilityLoss: **0.0172552** 37 | - MeanProbability: **93.4609%** 38 | - GeometricMeanProbability: **80.9590%** 39 | - VarianceProbability: **0.0440504** 40 | - ScottPi: **0.938444** 41 | - CohenKappa: **0.938444** 42 | - RejectionRate: **0.00000%** 43 | 44 | ![Accuracy Rejection Curve.png](https://i.loli.net/2018/11/18/5bf15a6e7d530.png) 45 | 46 | ## Class Indicator 47 | | Class | Count | TPRate | TNRate | FPRate | FNRate | F1Score | 48 | |-------|-------|--------|--------|--------|--------|---------| 49 | | airplane | 1000 | 95.7000% | 99.3000% | 0.70000% | 4.30000% | 0.94752 | 50 | | automobile | 1000 | 97.1000% | 99.7222% | 0.27777% | 2.90000% | 0.97294 | 51 | | bird | 1000 | 93.7000% | 99.0222% | 0.97777% | 6.30000% | 0.92543 | 52 | | cat | 1000 | 87.3000% | 98.7777% | 1.22222% | 12.7000% | 0.88048 | 53 | | deer | 1000 | 95.6000% | 99.4111% | 0.58888% | 4.39999% | 0.95171 | 54 | | dog | 1000 | 90.8000% | 99.0111% | 0.98888% | 9.20000% | 0.90936 | 55 | | frog | 1000 | 95.7000% | 99.7333% | 0.26666% | 4.30000% | 0.96617 | 56 | | horse | 1000 | 95.3999% | 99.6555% | 0.34444% | 4.60000% | 0.96120 | 57 | | ship | 1000 | 97.1000% | 99.5555% | 0.44444% | 2.90000% | 0.96568 | 58 | | truck | 1000 | 96.2000% | 99.6555% | 0.34444% | 3.80000% | 0.96537 | 59 | 60 | ## Hard Class 61 | ![ConfusionMatrix.png](https://i.loli.net/2018/11/18/5bf15a6f03734.png) 62 | 63 | ## Evaluation Report 64 | | Index | TestID | Result | Time | MemoryChange | 65 | |-------|--------|--------|------|--------------| 66 | | 1 | Dependency Check | Success | 2.19018 s | +5.10106 MB | 67 | | 2 | Pre-define | Success | 0.00000 s | +0.00170 MB | 68 | | 3 | GPU Warm-Up | Success | 5.06346 s | +65.3663 MB | 69 | | 4 | Loading Model | Success | 0.21841 s | +4.35850 MB | 70 | | 5 | Loading Data | Success | 0.34707 s | +42.3258 MB | 71 | | 6 | Benchmark Test | Success | 4.03325 s | +0.92709 MB | 72 | | 7 | Result Dump | Success | 0.01592 s | -0.17551 MB | 73 | | 8 | Analyzing | Success | 8.69576 s | +26.0071 MB | 74 | -------------------------------------------------------------------------------- /ImageRecognition/Classifation/Wide-ResNet on CIFAR10/WRN16-10 tested on CIFAR10 TestSet.md: -------------------------------------------------------------------------------- 1 | # WRN16-10 trained on CIFAR10 2 | ![Task](https://img.shields.io/badge/Task-Classifation-Orange.svg) 3 | ![Size](https://img.shields.io/badge/Size-68.502%20MB-blue.svg) 4 | ![Accuracy](https://img.shields.io/badge/Accuracy-96.500%25-brightgreen.svg) 5 | ![Speed](https://img.shields.io/badge/Speed-1.616%20ms-ff69b4.svg) 6 | 7 | Automatically generated on Mon 19 Nov 2018 12:46:10 8 | 9 | ## Network structure: 10 | - Network Size: **68.5028 MB** 11 | - Parameters: **17 125 702** 12 | - Nodes Count: **53** 13 | - Speed: **1.616 ms/sample** 14 | - Layers: 15 | - AggregationLayer: **1** 16 | - BatchNormalizationLayer: **15** 17 | - ConvolutionLayer: **16** 18 | - ElementwiseLayer: **13** 19 | - LinearLayer: **1** 20 | - SoftmaxLayer: **1** 21 | - ThreadingLayer: **6** 22 | 23 | 24 | ## Accuracy Curve 25 | ![Classification Curve.png](https://i.loli.net/2018/11/19/5bf2402dc95e8.png) 26 | 27 | ![High Precision Classification Curve.png](https://i.loli.net/2018/11/19/5bf2402decafc.png) 28 | 29 | ## Main Indicator 30 | - Top-1: **96.5000%** 31 | - Top-2: **99.0099%** 32 | - Top-3: **99.6300%** 33 | - Top-5: **99.8900%** 34 | - LogLikelihood: **-1152.98** 35 | - CrossEntropyLoss: **0.115298** 36 | - ProbabilityLoss: **0.0171053** 37 | - MeanProbability: **95.3702%** 38 | - GeometricMeanProbability: **89.1100%** 39 | - VarianceProbability: **0.0274391** 40 | - ScottPi: **0.961111** 41 | - CohenKappa: **0.961111** 42 | - RejectionRate: **0.00000%** 43 | 44 | ![Accuracy Rejection Curve.png](https://i.loli.net/2018/11/19/5bf2402e13374.png) 45 | 46 | ## Class Indicator 47 | | Class | Count | TPRate | TNRate | FPRate | FNRate | F1Score | 48 | |-------|-------|--------|--------|--------|--------|---------| 49 | | airplane | 1000 | 97.8000% | 99.5111% | 0.48888% | 2.19999% | 0.96735 | 50 | | automobile | 1000 | 97.7000% | 99.8222% | 0.17777% | 2.30000% | 0.98043 | 51 | | bird | 1000 | 94.8000% | 99.5222% | 0.47777% | 5.20000% | 0.95228 | 52 | | cat | 1000 | 93.0000% | 99.1222% | 0.87777% | 7.00000% | 0.92583 | 53 | | deer | 1000 | 97.7000% | 99.6111% | 0.38888% | 2.30000% | 0.97117 | 54 | | dog | 1000 | 92.7000% | 99.3888% | 0.61111% | 7.30000% | 0.93541 | 55 | | frog | 1000 | 98.2000% | 99.7666% | 0.23333% | 1.79999% | 0.98052 | 56 | | horse | 1000 | 97.3999% | 99.9222% | 0.07777% | 2.60000% | 0.98334 | 57 | | ship | 1000 | 98.2000% | 99.6666% | 0.33333% | 1.79999% | 0.97614 | 58 | | truck | 1000 | 97.5000% | 99.7777% | 0.22222% | 2.50000% | 0.97744 | 59 | 60 | ## Hard Class 61 | ![ConfusionMatrix.png](https://i.loli.net/2018/11/19/5bf2402e14fdc.png) 62 | 63 | ## Evaluation Report 64 | | Index | TestID | Result | Time | MemoryChange | 65 | |-------|--------|--------|------|--------------| 66 | | 1 | Dependency Check | Success | 2.15999 s | +5.22214 MB | 67 | | 2 | Pre-define | Success | 0.00000 s | +0.00177 MB | 68 | | 3 | GPU Warm-Up | Success | 5.75713 s | +52.9652 MB | 69 | | 4 | Loading Model | Success | 1.24082 s | +68.7620 MB | 70 | | 5 | Loading Data | Success | 0.37262 s | +42.3238 MB | 71 | | 6 | Benchmark Test | Success | 16.7268 s | +1.40838 MB | 72 | | 7 | Result Dump | Success | 0.09674 s | -0.04493 MB | 73 | | 8 | Analyzing | Success | 9.13358 s | +24.0519 MB | 74 | -------------------------------------------------------------------------------- /ImageRecognition/Classifation/Wide-ResNet on CIFAR10/WRN40-8 tested on CIFAR10 TestSet.md: -------------------------------------------------------------------------------- 1 | # WRN40-8 trained on CIFAR10 2 | ![Task](https://img.shields.io/badge/Task-Classifation-Orange.svg) 3 | ![Size](https://img.shields.io/badge/Size-143.07%20MB-blue.svg) 4 | ![Accuracy](https://img.shields.io/badge/Accuracy-97.170%25-brightgreen.svg) 5 | ![Speed](https://img.shields.io/badge/Speed-3.631%20ms-ff69b4.svg) 6 | 7 | Automatically generated on Mon 19 Nov 2018 12:52:01 8 | 9 | ## Network structure: 10 | - Network Size: **143.08 MB** 11 | - Parameters: **35 769 926** 12 | - Nodes Count: **137** 13 | - Speed: **3.631 ms/sample** 14 | - Layers: 15 | - AggregationLayer: **1** 16 | - BatchNormalizationLayer: **39** 17 | - ConvolutionLayer: **40** 18 | - ElementwiseLayer: **37** 19 | - LinearLayer: **1** 20 | - SoftmaxLayer: **1** 21 | - ThreadingLayer: **18** 22 | 23 | 24 | ## Accuracy Curve 25 | ![Classification Curve.png](https://i.loli.net/2018/11/19/5bf24181d8906.png) 26 | 27 | ![High Precision Classification Curve.png](https://i.loli.net/2018/11/19/5bf24182219a3.png) 28 | 29 | ## Main Indicator 30 | - Top-1: **97.1700%** 31 | - Top-2: **99.1500%** 32 | - Top-3: **99.6700%** 33 | - Top-5: **99.8900%** 34 | - LogLikelihood: **-1256.93** 35 | - CrossEntropyLoss: **0.125693** 36 | - ProbabilityLoss: **0.00630461** 37 | - MeanProbability: **96.8052%** 38 | - GeometricMeanProbability: **88.1885%** 39 | - VarianceProbability: **0.0241112** 40 | - ScottPi: **0.968555** 41 | - CohenKappa: **0.968556** 42 | - RejectionRate: **0.00000%** 43 | 44 | ![Accuracy Rejection Curve.png](https://i.loli.net/2018/11/19/5bf24182612a1.png) 45 | 46 | ## Class Indicator 47 | | Class | Count | TPRate | TNRate | FPRate | FNRate | F1Score | 48 | |-------|-------|--------|--------|--------|--------|---------| 49 | | airplane | 1000 | 98.3000% | 99.6666% | 0.33333% | 1.70000% | 0.97665 | 50 | | automobile | 1000 | 97.8999% | 99.8888% | 0.11111% | 2.10000% | 0.98441 | 51 | | bird | 1000 | 95.6000% | 99.7333% | 0.26666% | 4.39999% | 0.96565 | 52 | | cat | 1000 | 94.3000% | 99.2777% | 0.72222% | 5.70000% | 0.93924 | 53 | | deer | 1000 | 97.8999% | 99.5666% | 0.43333% | 2.10000% | 0.97026 | 54 | | dog | 1000 | 93.8999% | 99.4777% | 0.52222% | 6.10000% | 0.94561 | 55 | | frog | 1000 | 98.3000% | 99.8555% | 0.14444% | 1.70000% | 0.98496 | 56 | | horse | 1000 | 98.2000% | 99.9000% | 0.10000% | 1.79999% | 0.98643 | 57 | | ship | 1000 | 99.1000% | 99.7000% | 0.30000% | 0.89999% | 0.98216 | 58 | | truck | 1000 | 98.2000% | 99.7888% | 0.21111% | 1.79999% | 0.98150 | 59 | 60 | ## Hard Class 61 | ![ConfusionMatrix.png](https://i.loli.net/2018/11/19/5bf2418262dda.png) 62 | 63 | ## Evaluation Report 64 | | Index | TestID | Result | Time | MemoryChange | 65 | |-------|--------|--------|------|--------------| 66 | | 1 | Dependency Check | Success | 2.27033 s | +5.10068 MB | 67 | | 2 | Pre-define | Success | 0.00000 s | +0.00159 MB | 68 | | 3 | GPU Warm-Up | Success | 4.71256 s | +65.3660 MB | 69 | | 4 | Loading Model | Success | 1.77886 s | +143.682 MB | 70 | | 5 | Loading Data | Success | 0.34904 s | +42.3258 MB | 71 | | 6 | Benchmark Test | Success | 36.8359 s | +0.77343 MB | 72 | | 7 | Result Dump | Success | 0.07583 s | -0.12036 MB | 73 | | 8 | Analyzing | Success | 8.56566 s | +26.2763 MB | 74 | -------------------------------------------------------------------------------- /ImageRecognition/Classifation/ResNet on CIFAR10/ResnetV2-110 tested on CIFAR10 TestSet.md: -------------------------------------------------------------------------------- 1 | # ResnetV2-110 trained on CIFAR10 2 | ![Task](https://img.shields.io/badge/Task-Classifation-Orange.svg) 3 | ![Size](https://img.shields.io/badge/Size-6.9545%20MB-blue.svg) 4 | ![Accuracy](https://img.shields.io/badge/Accuracy-95.270%25-brightgreen.svg) 5 | ![Speed](https://img.shields.io/badge/Speed-0.532%20ms-ff69b4.svg) 6 | 7 | Automatically generated on Sun 18 Nov 2018 20:31:59 8 | 9 | ## Network structure: 10 | - Network Size: **6.95452 MB** 11 | - Parameters: **1 738 630** 12 | - Nodes Count: **387** 13 | - Speed: **0.532 ms/sample** 14 | - Layers: 15 | - AggregationLayer: **1** 16 | - BatchNormalizationLayer: **110** 17 | - ConvolutionLayer: **111** 18 | - ElementwiseLayer: **109** 19 | - LinearLayer: **1** 20 | - SoftmaxLayer: **1** 21 | - ThreadingLayer: **54** 22 | 23 | 24 | ## Accuracy Curve 25 | ![Classification Curve.png](https://i.loli.net/2018/11/18/5bf15f5518d08.png) 26 | 27 | ![High Precision Classification Curve.png](https://i.loli.net/2018/11/18/5bf15f555eaa4.png) 28 | 29 | ## Main Indicator 30 | - Top-1: **95.2700%** 31 | - Top-2: **98.5300%** 32 | - Top-3: **99.5100%** 33 | - Top-5: **99.8900%** 34 | - LogLikelihood: **-1911.91** 35 | - CrossEntropyLoss: **0.191191** 36 | - ProbabilityLoss: **0.0111677** 37 | - MeanProbability: **94.6614%** 38 | - GeometricMeanProbability: **82.5974%** 39 | - VarianceProbability: **0.0387726** 40 | - ScottPi: **0.947444** 41 | - CohenKappa: **0.947444** 42 | - RejectionRate: **0.00000%** 43 | 44 | ![Accuracy Rejection Curve.png](https://i.loli.net/2018/11/18/5bf15f55a304a.png) 45 | 46 | ## Class Indicator 47 | | Class | Count | TPRate | TNRate | FPRate | FNRate | F1Score | 48 | |-------|-------|--------|--------|--------|--------|---------| 49 | | airplane | 1000 | 95.8000% | 99.3444% | 0.65555% | 4.20000% | 0.94992 | 50 | | automobile | 1000 | 98.2000% | 99.7444% | 0.25555% | 1.79999% | 0.97955 | 51 | | bird | 1000 | 92.0000% | 99.3777% | 0.62222% | 8.00000% | 0.93117 | 52 | | cat | 1000 | 90.7000% | 98.8000% | 1.20000% | 9.30000% | 0.90024 | 53 | | deer | 1000 | 97.1000% | 99.3666% | 0.63333% | 2.90000% | 0.95759 | 54 | | dog | 1000 | 92.0000% | 99.1111% | 0.88888% | 8.00000% | 0.92000 | 55 | | frog | 1000 | 96.3999% | 99.8111% | 0.18888% | 3.59999% | 0.97324 | 56 | | horse | 1000 | 96.3999% | 99.8111% | 0.18888% | 3.59999% | 0.97324 | 57 | | ship | 1000 | 97.7000% | 99.6222% | 0.37777% | 2.30000% | 0.97165 | 58 | | truck | 1000 | 96.3999% | 99.7555% | 0.24444% | 3.59999% | 0.97079 | 59 | 60 | ## Hard Class 61 | ![ConfusionMatrix.png](https://i.loli.net/2018/11/18/5bf15f570503b.png) 62 | 63 | ## Evaluation Report 64 | | Index | TestID | Result | Time | MemoryChange | 65 | |-------|--------|--------|------|--------------| 66 | | 1 | Dependency Check | Success | 2.23602 s | +0.03504 MB | 67 | | 2 | Pre-define | Success | 0.00099 s | +0.00159 MB | 68 | | 3 | GPU Warm-Up | Success | 4.35339 s | +14.6607 MB | 69 | | 4 | Loading Model | Success | 0.25036 s | +8.71920 MB | 70 | | 5 | Loading Data | Success | 0.34408 s | +42.3246 MB | 71 | | 6 | Benchmark Test | Success | 5.79950 s | +4.05528 MB | 72 | | 7 | Result Dump | Success | 0.02194 s | -0.34230 MB | 73 | | 8 | Analyzing | Success | 8.34576 s | +26.2853 MB | 74 | -------------------------------------------------------------------------------- /ImageGeneration/Anime/ComixGANs Photo2Comic Style Transfer/ComixGAN Comic Style Transfer Gamma.m: -------------------------------------------------------------------------------- 1 | (* ::Package:: *) 2 | 3 | (* ::Subchapter:: *) 4 | (*Import Weights*) 5 | 6 | 7 | SetDirectory@NotebookDirectory[]; 8 | Clear["Global`*"]; 9 | << DeepMath`; 10 | DeepMath`NetMerge; 11 | $name = "ComixGAN Comic Style Transfer Gamma"; 12 | params = Import[$name <> ".h5", "Data"]; 13 | 14 | 15 | (* ::Subchapter:: *) 16 | (*Pre-defined Structure*) 17 | 18 | 19 | $NCHW = TransposeLayer[{1<->4, 2<->3, 3<->4}]; 20 | getName[s_] := TemplateApply["/model_weights/`1`/`1`", {s}]; 21 | getPad[n_] := PaddingLayer[{{0, 0}, {n, n}, {n, n}}, Padding -> "Reflected"]; 22 | getCW[name_, s_, p_, k_ : 1] := ConvolutionLayer[ 23 | "Weights" -> k * $NCHW@params[getName[name] <> "/kernel:0"], 24 | "Biases" -> None, "Stride" -> s, "PaddingSize" -> p 25 | ]; 26 | getCN[name_, s_, p_, k_ : 1] := ConvolutionLayer[ 27 | "Weights" -> k * $NCHW@params[getName[name] <> "/kernel:0"], 28 | "Biases" -> k * params[getName[name] <> "/bias:0"], 29 | "Stride" -> s, "PaddingSize" -> p 30 | ]; 31 | getDN[name_, s_, p_] := DeconvolutionLayer[ 32 | "Weights" -> $NCHW@params[getName[name] <> "/kernel:0"], 33 | "Biases" -> params[getName[name] <> "/bias:0"], 34 | "Stride" -> s, "PaddingSize" -> p 35 | ]; 36 | getIN[name_] := NormalizationLayer[ 37 | "Biases" -> params[getName[name] <> "/beta:0"], 38 | "Scaling" -> params[getName[name] <> "/gamma:0"], 39 | "Epsilon" -> 0.001 40 | ]; 41 | 42 | 43 | getBlock[i_] := GeneralUtilities`Scope[ 44 | path = NetChain@{ 45 | getCW["conv2d_" <> ToString[i], 1, 1], 46 | getIN["instance_normalization_" <> ToString[i - 6]], 47 | Ramp, 48 | getCW["conv2d_" <> ToString[i + 1], 1, 1], 49 | getIN["instance_normalization_" <> ToString[i - 5]] 50 | }; 51 | NetMerge[path, Plus, Expand -> True] 52 | ]; 53 | 54 | 55 | (* ::Subchapter:: *) 56 | (*Main*) 57 | 58 | 59 | mainNet = NetChain[{ 60 | { 61 | getCW["conv2d_8", 1, 3], 62 | getIN["instance_normalization_4"], 63 | Ramp 64 | }, 65 | { 66 | getCN["conv2d_9", 2, 1], 67 | getCW["conv2d_10", 1, 1], 68 | getIN["instance_normalization_5"], 69 | Ramp 70 | }, 71 | { 72 | getCN["conv2d_11", 2, 1], 73 | getCW["conv2d_12", 1, 1], 74 | getIN["instance_normalization_6"], 75 | Ramp 76 | }, 77 | NetChain@Table[getBlock[i], {i, 13, 27, 2}], 78 | { 79 | getDN["conv2d_transpose_1", 2, 0], 80 | PartLayer[{All, 2 ;; All, 2 ;; All}], 81 | getCW["conv2d_29", 1, 1], 82 | getIN["instance_normalization_23"], 83 | Ramp 84 | }, 85 | { 86 | getDN["conv2d_transpose_2", 2, 0], 87 | getCW["conv2d_30", 1, 1], 88 | PartLayer[{All, 2 ;; All, 2 ;; All}], 89 | getIN["instance_normalization_24"], 90 | Ramp 91 | }, 92 | getCN["conv2d_31", 1, 3], 93 | LogisticSigmoid 94 | }, 95 | "Input" -> NetEncoder[{"Image", 512}], 96 | "Output" -> "Image" 97 | ] 98 | 99 | 100 | (* ::Subchapter:: *) 101 | (*Testing*) 102 | 103 | 104 | img = ExampleData[{"TestImage", "House"}] 105 | newNet = NetReplacePart[mainNet, "Input" -> NetEncoder[{"Image", ImageDimensions@img}]]; 106 | newNet[img, TargetDevice -> "GPU"] 107 | 108 | 109 | (* ::Subchapter:: *) 110 | (*Export Model*) 111 | 112 | 113 | Export[$name <> ".MAT", mainNet, "WXF", PerformanceGoal -> "Speed"] 114 | -------------------------------------------------------------------------------- /ImageEnhancement/Debluring/SRN Deblur on GOPRO/SRN Deblur trained on GOPRO.m: -------------------------------------------------------------------------------- 1 | (* ::Package:: *) 2 | 3 | SetDirectory@NotebookDirectory[]; 4 | << NeuralNetworks` 5 | << MXNetLink` 6 | << DeepMath` 7 | DateString[] 8 | 9 | 10 | (* ::Subitem:: *) 11 | (*Fri 16 Nov 2018 00:26:35*) 12 | 13 | 14 | (* ::Subchapter:: *) 15 | (*Import Weights*) 16 | 17 | 18 | raw = Import["color.wxf"]; 19 | params[name_String] := Block[ 20 | {$NCHW, prefix, input}, 21 | $NCHW = TransposeLayer[{1<->4, 2<->3, 3<->4}]; 22 | prefix = "g_net/"; 23 | input = Normal@raw[prefix <> name]; 24 | Switch[ 25 | Length@Dimensions@input, 26 | 1, RawArray["Real32", input], 27 | 4, RawArray["Real32", $NCHW[input]], 28 | _, RawArray["Real32", input] 29 | ] 30 | ] 31 | 32 | 33 | (* ::Subchapter:: *) 34 | (*Encoder & Decoder*) 35 | 36 | 37 | (* ::Subchapter:: *) 38 | (*Pre-defined Structure*) 39 | 40 | 41 | getCN[name_String, p_, s_, ops___] := ConvolutionLayer[ 42 | "Weights" -> params[name <> "/weights"], 43 | "Biases" -> params[name <> "/biases"], 44 | "PaddingSize" -> p, "Stride" -> s, ops 45 | ]; 46 | getDN[name_String, p_, s_, ops___] := DeconvolutionLayer[ 47 | "Weights" -> params[name <> "/weights"], 48 | "Biases" -> params[name <> "/biases"], 49 | "PaddingSize" -> p, "Stride" -> s, ops 50 | ]; 51 | getEncBlock[i_, j_] := Block[ 52 | {path}, 53 | path = NetChain@{ 54 | getCN["enc" <> ToString[i] <> "_" <> ToString[j] <> "/conv1", 2, 1], 55 | ElementwiseLayer["ReLU"], 56 | getCN["enc" <> ToString[i] <> "_" <> ToString[j] <> "/conv2", 2, 1] 57 | }; 58 | NetMerge[path, Expand -> All] 59 | ]; 60 | getEnc[i_, stride_ : 1] := Flatten[{ 61 | getCN["enc" <> ToString[i] <> "_1", 2, stride], 62 | ElementwiseLayer["ReLU"], 63 | Table[getEncBlock[i, j], {j, 2, 4}] 64 | }] // NetChain; 65 | getDecBlock[i_, j_] := Block[ 66 | {path}, 67 | path = NetChain@{ 68 | getCN["dec" <> ToString[i] <> "_" <> ToString[j] <> "/conv1", 2, 1], 69 | ElementwiseLayer["ReLU"], 70 | getCN["dec" <> ToString[i] <> "_" <> ToString[j] <> "/conv2", 2, 1] 71 | }; 72 | NetMerge[path, Expand -> All] 73 | ]; 74 | getDec[i_] := Flatten[{ 75 | Table[getDecBlock[i, j], {j, 3, 1, -1}], 76 | getDN["dec" <> ToString[i - 1] <> "_4", 1, 2], 77 | ElementwiseLayer["ReLU"] 78 | }] // NetChain; 79 | 80 | 81 | (* ::Subchapter:: *) 82 | (*Main*) 83 | 84 | 85 | mainNet = NetGraph[{ 86 | "Double" -> CatenateLayer[], 87 | "Enc_1" -> getEnc[1, 1], 88 | "Enc_2" -> getEnc[2, 2], 89 | "Enc_3" -> getEnc[3, 2], 90 | "Dec_3" -> getDec[3], 91 | "Add_3" -> ThreadingLayer[Plus], 92 | "Dec_2" -> getDec[2], 93 | "Add_2" -> ThreadingLayer[Plus], 94 | "Dec_1" -> Flatten@{ 95 | Table[getDecBlock[1, j], {j, 3, 1, -1}], 96 | getCN["dec1_0", 2, 1] 97 | } 98 | }, { 99 | {NetPort["Input"], NetPort["Input"]} -> "Double", 100 | "Double" -> "Enc_1" -> "Enc_2" -> "Enc_3" -> "Dec_3", 101 | {"Enc_2", "Dec_3"} -> "Add_3" -> "Dec_2", 102 | {"Enc_1", "Dec_2"} -> "Add_2" -> "Dec_1" 103 | }] 104 | 105 | 106 | (* ::Subchapter:: *) 107 | (*Test*) 108 | 109 | 110 | evalNet[img_] := NetChain[ 111 | {mainNet}, 112 | "Input" -> NetEncoder[{"Image", ImageDimensions@img}], 113 | "Output" -> NetDecoder["Image"] 114 | ][img, TargetDevice -> "GPU"] 115 | 116 | 117 | (* ::Subchapter:: *) 118 | (*Export Model*) 119 | 120 | 121 | Export["SRN Deblur trained on GOPRO.WXF", mainNet] 122 | -------------------------------------------------------------------------------- /ImageRecognition/Classifation/ResNet on ImageNet/Resnet18-V1b trained on ImageNet.m: -------------------------------------------------------------------------------- 1 | (* ::Package:: *) 2 | 3 | SetDirectory@NotebookDirectory[]; 4 | << NeuralNetworks` 5 | << MXNetLink` 6 | << DeepMath` 7 | DateString[] 8 | 9 | 10 | (* ::Subitem:: *) 11 | (*Tue 11 Dec 2018 00:48:52*) 12 | 13 | 14 | (* ::Subchapter:: *) 15 | (*Import Weights*) 16 | 17 | 18 | params = NDArrayImport["imagenet_resnet18_v1b-0000.params"]; 19 | 20 | 21 | (* ::Subchapter:: *) 22 | (*Encoder & Decoder*) 23 | 24 | 25 | mShift = {0.485, 0.456, 0.406}; 26 | vShift = {0.229, 0.224, 0.225}^2; 27 | encoder = NetEncoder[{"Image", 224, "MeanImage" -> mShift, "VarianceImage" -> vShift}] 28 | decoder = NetExtract[NetModel["ResNet-50 Trained on ImageNet Competition Data"], "Output"] 29 | 30 | 31 | (* ::Subchapter::Closed:: *) 32 | (*Pre-defined Structure*) 33 | 34 | 35 | getCN[name_String, p_ : 1, s_ : 1] := ConvolutionLayer[ 36 | "Weights" -> params["arg:resnetv1b_" <> name <> "_weight"], 37 | "Biases" -> None, "PaddingSize" -> p, "Stride" -> s 38 | ]; 39 | getBN[name_String] := BatchNormalizationLayer[ 40 | "Epsilon" -> 1*^-5, 41 | "Beta" -> params["arg:resnetv1b_" <> name <> "_beta"], 42 | "Gamma" -> params["arg:resnetv1b_" <> name <> "_gamma"], 43 | "MovingMean" -> params["aux:resnetv1b_" <> name <> "_running_mean"], 44 | "MovingVariance" -> params["aux:resnetv1b_" <> name <> "_running_var"] 45 | ]; 46 | getBlock2[num_, j_ : 2] := Block[ 47 | {i = ToString@num, path1, path2}, 48 | path1 = NetChain2Graph@NetChain[{ 49 | getCN["layers" <> i <> "_conv0", 1, j], 50 | getBN["layers" <> i <> "_batchnorm0"], 51 | ElementwiseLayer["ReLU"], 52 | getCN["layers" <> i <> "_conv1", 1, 1], 53 | getBN["layers" <> i <> "_batchnorm1"] 54 | }]; 55 | path2 = NetChain2Graph@NetChain[{ 56 | getCN["down" <> i <> "_conv0", 0, j], 57 | getBN["down" <> i <> "_batchnorm0"] 58 | }]; 59 | NetFlatten@NetGraph[{NetMerge[{path1, path2}], Ramp}, {1 -> 2}] 60 | ]; 61 | getBlock3[num_, j_] := Block[ 62 | {i = ToString@num, path}, 63 | path = NetChain[{ 64 | getCN["layers" <> i <> "_conv" <> ToString[j], 1, 1], 65 | getBN["layers" <> i <> "_batchnorm" <> ToString[j]], 66 | ElementwiseLayer["ReLU"], 67 | getCN["layers" <> i <> "_conv" <> ToString[j + 1], 1, 1], 68 | getBN["layers" <> i <> "_batchnorm" <> ToString[j + 1]], 69 | ElementwiseLayer["ReLU"] 70 | }]; 71 | NetFlatten@NetGraph[{NetMerge@path, Ramp}, {1 -> 2}] 72 | ]; 73 | 74 | 75 | (* ::Subchapter:: *) 76 | (*Main*) 77 | 78 | 79 | extractor = NetChain[{ 80 | { 81 | getCN["conv0", 3, 2], 82 | getBN["batchnorm0"], 83 | ElementwiseLayer["ReLU"] 84 | }, 85 | PoolingLayer[{3, 3}, "Stride" -> 2, "PaddingSize" -> 1], 86 | Table[getBlock3[1, i], {i, 0, 2, 2}], 87 | getBlock2[2, 2], 88 | Table[getBlock3[2, i], {i, 2, 2, 2}], 89 | getBlock2[3, 2], 90 | Table[getBlock3[3, i], {i, 2, 2, 2}], 91 | getBlock2[4, 2], 92 | Table[getBlock3[4, i], {i, 2, 2, 2}], 93 | AggregationLayer[Mean] 94 | }]; 95 | classifier = LinearLayer[1000, 96 | "Weights" -> params["arg:resnetv1b_dense0_weight"], 97 | "Biases" -> params["arg:resnetv1b_dense0_bias"] 98 | ]; 99 | mainNet = NetChain[{ 100 | "Extractor" -> extractor, 101 | "Classifier" -> classifier, 102 | "Predictor" -> SoftmaxLayer[] 103 | }, 104 | "Input" -> encoder, 105 | "Output" -> decoder 106 | ] 107 | 108 | 109 | (* ::Subchapter:: *) 110 | (*Export Model*) 111 | 112 | 113 | Export["Resnet18-V1b trained on ImageNet.WXF", mainNet] 114 | -------------------------------------------------------------------------------- /ImageRecognition/Classifation/ResNet on ImageNet/Resnet34-V1b trained on ImageNet.m: -------------------------------------------------------------------------------- 1 | (* ::Package:: *) 2 | 3 | SetDirectory@NotebookDirectory[]; 4 | << NeuralNetworks` 5 | << MXNetLink` 6 | << DeepMath` 7 | DateString[] 8 | 9 | 10 | (* ::Subitem:: *) 11 | (*Tue 11 Dec 2018 00:44:07*) 12 | 13 | 14 | (* ::Subchapter:: *) 15 | (*Import Weights*) 16 | 17 | 18 | params = NDArrayImport["imagenet_resnet34_v1b-0000.params"]; 19 | 20 | 21 | (* ::Subchapter:: *) 22 | (*Encoder & Decoder*) 23 | 24 | 25 | mShift = {0.485, 0.456, 0.406}; 26 | vShift = {0.229, 0.224, 0.225}^2; 27 | encoder = NetEncoder[{"Image", 224, "MeanImage" -> mShift, "VarianceImage" -> vShift}] 28 | decoder = NetExtract[NetModel["ResNet-50 Trained on ImageNet Competition Data"], "Output"] 29 | 30 | 31 | (* ::Subchapter::Closed:: *) 32 | (*Pre-defined Structure*) 33 | 34 | 35 | getCN[name_String, p_ : 1, s_ : 1] := ConvolutionLayer[ 36 | "Weights" -> params["arg:resnetv1b_" <> name <> "_weight"], 37 | "Biases" -> None, "PaddingSize" -> p, "Stride" -> s 38 | ]; 39 | getBN[name_String] := BatchNormalizationLayer[ 40 | "Epsilon" -> 1*^-5, 41 | "Beta" -> params["arg:resnetv1b_" <> name <> "_beta"], 42 | "Gamma" -> params["arg:resnetv1b_" <> name <> "_gamma"], 43 | "MovingMean" -> params["aux:resnetv1b_" <> name <> "_running_mean"], 44 | "MovingVariance" -> params["aux:resnetv1b_" <> name <> "_running_var"] 45 | ]; 46 | getBlock2[num_, j_ : 2] := Block[ 47 | {i = ToString@num, path1, path2}, 48 | path1 = NetChain2Graph@NetChain[{ 49 | getCN["layers" <> i <> "_conv0", 1, j], 50 | getBN["layers" <> i <> "_batchnorm0"], 51 | ElementwiseLayer["ReLU"], 52 | getCN["layers" <> i <> "_conv1", 1, 1], 53 | getBN["layers" <> i <> "_batchnorm1"] 54 | }]; 55 | path2 = NetChain2Graph@NetChain[{ 56 | getCN["down" <> i <> "_conv0", 0, j], 57 | getBN["down" <> i <> "_batchnorm0"] 58 | }]; 59 | NetFlatten@NetGraph[{NetMerge[{path1, path2}], Ramp}, {1 -> 2}] 60 | ]; 61 | getBlock3[num_, j_] := Block[ 62 | {i = ToString@num, path}, 63 | path = NetChain[{ 64 | getCN["layers" <> i <> "_conv" <> ToString[j], 1, 1], 65 | getBN["layers" <> i <> "_batchnorm" <> ToString[j]], 66 | ElementwiseLayer["ReLU"], 67 | getCN["layers" <> i <> "_conv" <> ToString[j + 1], 1, 1], 68 | getBN["layers" <> i <> "_batchnorm" <> ToString[j + 1]], 69 | ElementwiseLayer["ReLU"] 70 | }]; 71 | NetFlatten@NetGraph[{NetMerge@path, Ramp}, {1 -> 2}] 72 | ]; 73 | 74 | 75 | (* ::Subchapter:: *) 76 | (*Main*) 77 | 78 | 79 | extractor = NetChain[{ 80 | { 81 | getCN["conv0", 3, 2], 82 | getBN["batchnorm0"], 83 | ElementwiseLayer["ReLU"] 84 | }, 85 | PoolingLayer[{3, 3}, "Stride" -> 2, "PaddingSize" -> 1], 86 | Table[getBlock3[1, i], {i, 0, 4, 2}], 87 | getBlock2[2, 2], 88 | Table[getBlock3[2, i], {i, 2, 6, 2}], 89 | getBlock2[3, 2], 90 | Table[getBlock3[3, i], {i, 2, 10, 2}], 91 | getBlock2[4, 2], 92 | Table[getBlock3[4, i], {i, 2, 4, 2}], 93 | AggregationLayer[Mean] 94 | }]; 95 | classifier = LinearLayer[1000, 96 | "Weights" -> params["arg:resnetv1b_dense0_weight"], 97 | "Biases" -> params["arg:resnetv1b_dense0_bias"] 98 | ]; 99 | mainNet = NetChain[{ 100 | "Extractor" -> extractor, 101 | "Classifier" -> classifier, 102 | "Predictor" -> SoftmaxLayer[] 103 | }, 104 | "Input" -> encoder, 105 | "Output" -> decoder 106 | ] 107 | 108 | 109 | (* ::Subchapter:: *) 110 | (*Export Model*) 111 | 112 | 113 | Export["Resnet34-V1b trained on ImageNet.WXF", mainNet] 114 | -------------------------------------------------------------------------------- /ImageRecognition/Anime/ResNets trained on Danbooru2018/ResNet-20 trained on Danbooru2018.m: -------------------------------------------------------------------------------- 1 | (* ::Package:: *) 2 | 3 | (* ::Subchapter:: *) 4 | (*Import Weights*) 5 | 6 | 7 | SetDirectory@NotebookDirectory[]; 8 | Clear["`*"]; 9 | << DeepMath`; 10 | DeepMath`NetMerge; 11 | 12 | 13 | params = Import["resnet18.pth.wxf"]; 14 | 15 | 16 | (* ::Subchapter:: *) 17 | (*Pre-defined Structure*) 18 | 19 | 20 | $NCHW = TransposeLayer[{1<->4, 2<->3, 3<->4}]; 21 | getCN[name_, s_, p_] := ConvolutionLayer[ 22 | "Weights" -> params[name <> ".weight"], 23 | "Biases" -> None, 24 | "PaddingSize" -> p, "Stride" -> s 25 | ]; 26 | getBN[name_] := BatchNormalizationLayer[ 27 | "Biases" -> params[name <> ".bias"], 28 | "Scaling" -> params[name <> ".weight"], 29 | "MovingMean" -> params[name <> ".running_mean"], 30 | "MovingVariance" -> params[name <> ".running_var"], 31 | "Epsilon" -> 0.00001, 32 | "Momentum" -> 0.9 33 | ]; 34 | getLinear[name_, out_] := LinearLayer[ 35 | out, 36 | "Weights" -> params[name <> ".weight"], 37 | "Biases" -> params[name <> ".bias"] 38 | ]; 39 | 40 | 41 | getBlock[name_] := GeneralUtilities`Scope[ 42 | path = NetChain@{ 43 | getCN[name <> ".conv1", 1, 1], 44 | getBN[name <> ".bn1"], 45 | Ramp, 46 | getCN[name <> ".conv2", 1, 1], 47 | getBN[name <> ".bn2"] 48 | }; 49 | NetFlatten@NetChain@{NetMerge[path, Plus], Ramp} 50 | ]; 51 | getBlock2[name_] := GeneralUtilities`Scope[ 52 | left = NetChain@{ 53 | getCN[name <> ".conv1", 2, 1], 54 | getBN[name <> ".bn1"], 55 | Ramp, 56 | getCN[name <> ".conv2", 1, 1], 57 | getBN[name <> ".bn2"] 58 | }; 59 | right = NetChain@{ 60 | getCN[name <> ".downsample.0", 2, 0], 61 | getBN[name <> ".downsample.1"] 62 | }; 63 | NetFlatten@NetChain@{NetMerge[{left, right}, Plus], Ramp} 64 | ]; 65 | 66 | 67 | (* ::Subchapter:: *) 68 | (*Main*) 69 | 70 | 71 | encoder = NetEncoder[{ 72 | "Image", 320, 73 | "MeanImage" -> {0.713739812374115, 0.6627991795539856, 0.6518916487693787}, 74 | "VarianceImage" -> {0.2969885468482971, 0.3017076551914215, 0.2979130446910858}^2 75 | }] 76 | decoder = NetDecoder[{"Class", Import["class_names_100.ckpt.json"]}] 77 | mainNet = NetChain[ 78 | { 79 | { 80 | getCN["0.0", 2, 3], 81 | getBN["0.1"], 82 | Ramp 83 | }, 84 | PoolingLayer[{3, 3}, 2, "PaddingSize" -> 1, "Function" -> Max], 85 | { 86 | getBlock["0.4.0"], 87 | getBlock["0.4.1"] 88 | }, 89 | getBlock2["0.5.0"], 90 | { 91 | getBlock["0.5.1"] 92 | }, 93 | getBlock2["0.6.0"], 94 | { 95 | getBlock["0.6.1"] 96 | }, 97 | getBlock2["0.7.0"], 98 | { 99 | getBlock["0.7.1"] 100 | }, 101 | NetMerge[ 102 | {AggregationLayer[Max], AggregationLayer[Mean]}, 103 | Join, 104 | Expand -> True 105 | ], 106 | { 107 | getBN["1.2"], 108 | getLinear["1.4", 512], 109 | Ramp 110 | }, 111 | { 112 | getBN["1.6"], 113 | getLinear["1.8", 100] 114 | }, 115 | LogisticSigmoid 116 | }, 117 | "Input" -> encoder, 118 | "Output" -> decoder 119 | ] 120 | 121 | 122 | (* ::Subchapter:: *) 123 | (*Testing*) 124 | 125 | 126 | image = Import["Test.jpg"] 127 | mainNet = NetReplacePart[mainNet, {"Input" -> encoder, "Output" -> decoder}]; 128 | result = mainNet[image, "Probabilities"]; 129 | Take[ReverseSort@Select[result, # > 0.3&], UpTo[10]] // Dataset 130 | 131 | 132 | NetInformation[mainNet, "LayerTypeCounts"] // ReverseSort // Dataset 133 | 134 | 135 | (* ::Subchapter:: *) 136 | (*Export Model*) 137 | 138 | 139 | export = <| 140 | "Main" -> mainNet, 141 | "Encoder" -> encoder, 142 | "Decoder" -> decoder 143 | |>; 144 | Export["ResNet-20 trained on Danbooru2018.MAT", export, "WXF", PerformanceGoal -> "Speed"] 145 | -------------------------------------------------------------------------------- /ImageRecognition/Classifation/DenseNet on ImageNet/DenseNet161 trained on ImageNet.m: -------------------------------------------------------------------------------- 1 | (* ::Package:: *) 2 | 3 | SetDirectory@NotebookDirectory[]; 4 | Needs["MXNetLink`"] 5 | Needs["NeuralNetworks`"] 6 | DateString[] 7 | 8 | 9 | (* ::Subitem:: *) 10 | (*Wed 17 Oct 2018 12:40:47*) 11 | 12 | 13 | (* ::Subchapter:: *) 14 | (*Import Weights*) 15 | 16 | 17 | params = NDArrayImport["imagenet_densenet161-0000.params"]; 18 | 19 | 20 | (* ::Subchapter:: *) 21 | (*Encoder & Decoder*) 22 | 23 | 24 | mShift = {0.485, 0.456, 0.406}; 25 | vShift = {0.229, 0.224, 0.225}^2; 26 | encoder = NetEncoder[{"Image", 224, "MeanImage" -> mShift, "VarianceImage" -> vShift}] 27 | decoder = NetExtract[NetModel["ResNet-50 Trained on ImageNet Competition Data"], "Output"] 28 | 29 | 30 | (* ::Subchapter::Closed:: *) 31 | (*Pre-defined Structure*) 32 | 33 | 34 | getBN[i_, j_] := BatchNormalizationLayer[ 35 | "Epsilon" -> 1*^-5, 36 | "Beta" -> params["arg:densenet1_stage" <> i <> "_batchnorm" <> j <> "_beta"], 37 | "Gamma" -> params["arg:densenet1_stage" <> i <> "_batchnorm" <> j <> "_gamma"], 38 | "MovingMean" -> params["aux:densenet1_stage" <> i <> "_batchnorm" <> j <> "_running_mean"], 39 | "MovingVariance" -> params["aux:densenet1_stage" <> i <> "_batchnorm" <> j <> "_running_var"] 40 | ]; 41 | getBN2[j_] := BatchNormalizationLayer[ 42 | "Epsilon" -> 1*^-5, 43 | "Beta" -> params["arg:densenet1_batchnorm" <> j <> "_beta"], 44 | "Gamma" -> params["arg:densenet1_batchnorm" <> j <> "_gamma"], 45 | "MovingMean" -> params["aux:densenet1_batchnorm" <> j <> "_running_mean"], 46 | "MovingVariance" -> params["aux:densenet1_batchnorm" <> j <> "_running_var"] 47 | ]; 48 | getCN[i_, j_, p_ : 1, s_ : 1] := ConvolutionLayer[ 49 | "Weights" -> params["arg:densenet1_stage" <> i <> "_conv" <> j <> "_weight"], 50 | "Biases" -> None, "PaddingSize" -> p, "Stride" -> s 51 | ]; 52 | getCN2[j_, p_ : 1, s_ : 1] := ConvolutionLayer[ 53 | "Weights" -> params["arg:densenet1_conv" <> j <> "_weight"], 54 | "Biases" -> None, "PaddingSize" -> p, "Stride" -> s 55 | ]; 56 | getBlock[i_, j_] := NetGraph[{ 57 | getBN[ToString[i], ToString[j]], Ramp, getCN[ToString[i], ToString[j], 0, 1], 58 | getBN[ToString[i], ToString[j + 1]], Ramp, getCN[ToString[i], ToString[j + 1]], 59 | CatenateLayer[] 60 | }, { 61 | NetPort["Input"] -> 7, 62 | NetPort["Input"] -> 1 -> 2 -> 3 -> 4 -> 5 -> 6 -> 7 63 | }]; 64 | getBlock2[i_] := NetChain@{ 65 | getBN2[ToString@i], Ramp, getCN2[ToString@i, 0, 1], 66 | PoolingLayer[{2, 2}, "Stride" -> 2, "Function" -> Mean] 67 | }; 68 | $getBlock2 = NetChain@{ 69 | getBN2["4"], Ramp, 70 | PoolingLayer[{7, 7}, "Stride" -> 7, "Function" -> Mean] 71 | }; 72 | 73 | 74 | (* ::Subchapter:: *) 75 | (*Main*) 76 | 77 | 78 | extractor = NetChain[{ 79 | getCN2["0", 3, 2], 80 | getBN2["0"], 81 | ElementwiseLayer["ReLU"], 82 | PoolingLayer[{3, 3}, "Stride" -> 2, "PaddingSize" -> 1], 83 | NetChain@Table[getBlock[1, i], {i, 0, 10, 2}], 84 | getBlock2[1], 85 | NetChain@Table[getBlock[2, i], {i, 0, 22, 2}], 86 | getBlock2[2], 87 | NetChain@Table[getBlock[3, i], {i, 0, 70, 2}], 88 | getBlock2[3], 89 | NetChain@Table[getBlock[4, i], {i, 0, 46, 2}], 90 | $getBlock2 91 | }]; 92 | classifier = LinearLayer[1000, 93 | "Weights" -> params["arg:densenet1_dense0_weight"], 94 | "Biases" -> params["arg:densenet1_dense0_bias"] 95 | ]; 96 | mainNet = NetChain[{ 97 | "Extractor" -> extractor, 98 | "Classifier" -> classifier, 99 | "Predictor" -> SoftmaxLayer[] 100 | }, 101 | "Input" -> encoder, 102 | "Output" -> decoder 103 | ] 104 | 105 | 106 | (* ::Subchapter:: *) 107 | (*Export Model*) 108 | 109 | 110 | Export["DenseNet161 trained on ImageNet.WXF", mainNet] 111 | -------------------------------------------------------------------------------- /ImageRecognition/Classifation/DenseNet on ImageNet/DenseNet169 trained on ImageNet.m: -------------------------------------------------------------------------------- 1 | (* ::Package:: *) 2 | 3 | SetDirectory@NotebookDirectory[]; 4 | Needs["MXNetLink`"] 5 | Needs["NeuralNetworks`"] 6 | DateString[] 7 | 8 | 9 | (* ::Subitem:: *) 10 | (*Wed 17 Oct 2018 12:38:48*) 11 | 12 | 13 | (* ::Subchapter:: *) 14 | (*Import Weights*) 15 | 16 | 17 | params = NDArrayImport["imagenet_densenet169-0000.params"]; 18 | 19 | 20 | (* ::Subchapter:: *) 21 | (*Encoder & Decoder*) 22 | 23 | 24 | mShift = {0.485, 0.456, 0.406}; 25 | vShift = {0.229, 0.224, 0.225}^2; 26 | encoder = NetEncoder[{"Image", 224, "MeanImage" -> mShift, "VarianceImage" -> vShift}] 27 | decoder = NetExtract[NetModel["ResNet-50 Trained on ImageNet Competition Data"], "Output"] 28 | 29 | 30 | (* ::Subchapter::Closed:: *) 31 | (*Pre-defined Structure*) 32 | 33 | 34 | getBN[i_, j_] := BatchNormalizationLayer[ 35 | "Epsilon" -> 1*^-5, 36 | "Beta" -> params["arg:densenet2_stage" <> i <> "_batchnorm" <> j <> "_beta"], 37 | "Gamma" -> params["arg:densenet2_stage" <> i <> "_batchnorm" <> j <> "_gamma"], 38 | "MovingMean" -> params["aux:densenet2_stage" <> i <> "_batchnorm" <> j <> "_running_mean"], 39 | "MovingVariance" -> params["aux:densenet2_stage" <> i <> "_batchnorm" <> j <> "_running_var"] 40 | ]; 41 | getBN2[j_] := BatchNormalizationLayer[ 42 | "Epsilon" -> 1*^-5, 43 | "Beta" -> params["arg:densenet2_batchnorm" <> j <> "_beta"], 44 | "Gamma" -> params["arg:densenet2_batchnorm" <> j <> "_gamma"], 45 | "MovingMean" -> params["aux:densenet2_batchnorm" <> j <> "_running_mean"], 46 | "MovingVariance" -> params["aux:densenet2_batchnorm" <> j <> "_running_var"] 47 | ]; 48 | getCN[i_, j_, p_ : 1, s_ : 1] := ConvolutionLayer[ 49 | "Weights" -> params["arg:densenet2_stage" <> i <> "_conv" <> j <> "_weight"], 50 | "Biases" -> None, "PaddingSize" -> p, "Stride" -> s 51 | ]; 52 | getCN2[j_, p_ : 1, s_ : 1] := ConvolutionLayer[ 53 | "Weights" -> params["arg:densenet2_conv" <> j <> "_weight"], 54 | "Biases" -> None, "PaddingSize" -> p, "Stride" -> s 55 | ]; 56 | getBlock[i_, j_] := NetGraph[{ 57 | getBN[ToString[i], ToString[j]], Ramp, getCN[ToString[i], ToString[j], 0, 1], 58 | getBN[ToString[i], ToString[j + 1]], Ramp, getCN[ToString[i], ToString[j + 1]], 59 | CatenateLayer[] 60 | }, { 61 | NetPort["Input"] -> 7, 62 | NetPort["Input"] -> 1 -> 2 -> 3 -> 4 -> 5 -> 6 -> 7 63 | }]; 64 | getBlock2[i_] := NetChain@{ 65 | getBN2[ToString@i], Ramp, getCN2[ToString@i, 0, 1], 66 | PoolingLayer[{2, 2}, "Stride" -> 2, "Function" -> Mean] 67 | }; 68 | $getBlock2 = NetChain@{ 69 | getBN2["4"], Ramp, 70 | PoolingLayer[{7, 7}, "Stride" -> 7, "Function" -> Mean] 71 | }; 72 | 73 | 74 | (* ::Subchapter:: *) 75 | (*Main*) 76 | 77 | 78 | extractor = NetChain[{ 79 | getCN2["0", 3, 2], 80 | getBN2["0"], 81 | ElementwiseLayer["ReLU"], 82 | PoolingLayer[{3, 3}, "Stride" -> 2, "PaddingSize" -> 1], 83 | NetChain@Table[getBlock[1, i], {i, 0, 10, 2}], 84 | getBlock2[1], 85 | NetChain@Table[getBlock[2, i], {i, 0, 22, 2}], 86 | getBlock2[2], 87 | NetChain@Table[getBlock[3, i], {i, 0, 62, 2}], 88 | getBlock2[3], 89 | NetChain@Table[getBlock[4, i], {i, 0, 62, 2}], 90 | $getBlock2 91 | }]; 92 | classifier = LinearLayer[1000, 93 | "Weights" -> params["arg:densenet2_dense0_weight"], 94 | "Biases" -> params["arg:densenet2_dense0_bias"] 95 | ]; 96 | mainNet = NetChain[{ 97 | "Extractor" -> extractor, 98 | "Classifier" -> classifier, 99 | "Predictor" -> SoftmaxLayer[] 100 | }, 101 | "Input" -> encoder, 102 | "Output" -> decoder 103 | ] 104 | 105 | 106 | (* ::Subchapter:: *) 107 | (*Export Model*) 108 | 109 | 110 | Export["DenseNet169 trained on ImageNet.WXF", mainNet] 111 | -------------------------------------------------------------------------------- /ImageRecognition/Classifation/DenseNet on ImageNet/DenseNet201 trained on ImageNet.m: -------------------------------------------------------------------------------- 1 | (* ::Package:: *) 2 | 3 | SetDirectory@NotebookDirectory[]; 4 | Needs["MXNetLink`"] 5 | Needs["NeuralNetworks`"] 6 | DateString[] 7 | 8 | 9 | (* ::Subitem:: *) 10 | (*Wed 17 Oct 2018 19:42:31*) 11 | 12 | 13 | (* ::Subchapter:: *) 14 | (*Import Weights*) 15 | 16 | 17 | params = NDArrayImport["imagenet_densenet201-0000.params"]; 18 | 19 | 20 | (* ::Subchapter:: *) 21 | (*Encoder & Decoder*) 22 | 23 | 24 | mShift = {0.485, 0.456, 0.406}; 25 | vShift = {0.229, 0.224, 0.225}^2; 26 | encoder = NetEncoder[{"Image", 224, "MeanImage" -> mShift, "VarianceImage" -> vShift}] 27 | decoder = NetExtract[NetModel["ResNet-50 Trained on ImageNet Competition Data"], "Output"] 28 | 29 | 30 | (* ::Subchapter::Closed:: *) 31 | (*Pre-defined Structure*) 32 | 33 | 34 | getBN[i_, j_] := BatchNormalizationLayer[ 35 | "Epsilon" -> 1*^-5, 36 | "Beta" -> params["arg:densenet3_stage" <> i <> "_batchnorm" <> j <> "_beta"], 37 | "Gamma" -> params["arg:densenet3_stage" <> i <> "_batchnorm" <> j <> "_gamma"], 38 | "MovingMean" -> params["aux:densenet3_stage" <> i <> "_batchnorm" <> j <> "_running_mean"], 39 | "MovingVariance" -> params["aux:densenet3_stage" <> i <> "_batchnorm" <> j <> "_running_var"] 40 | ]; 41 | getBN2[j_] := BatchNormalizationLayer[ 42 | "Epsilon" -> 1*^-5, 43 | "Beta" -> params["arg:densenet3_batchnorm" <> j <> "_beta"], 44 | "Gamma" -> params["arg:densenet3_batchnorm" <> j <> "_gamma"], 45 | "MovingMean" -> params["aux:densenet3_batchnorm" <> j <> "_running_mean"], 46 | "MovingVariance" -> params["aux:densenet3_batchnorm" <> j <> "_running_var"] 47 | ]; 48 | getCN[i_, j_, p_ : 1, s_ : 1] := ConvolutionLayer[ 49 | "Weights" -> params["arg:densenet3_stage" <> i <> "_conv" <> j <> "_weight"], 50 | "Biases" -> None, "PaddingSize" -> p, "Stride" -> s 51 | ]; 52 | getCN2[j_, p_ : 1, s_ : 1] := ConvolutionLayer[ 53 | "Weights" -> params["arg:densenet3_conv" <> j <> "_weight"], 54 | "Biases" -> None, "PaddingSize" -> p, "Stride" -> s 55 | ]; 56 | getBlock[i_, j_] := NetGraph[{ 57 | getBN[ToString[i], ToString[j]], Ramp, getCN[ToString[i], ToString[j], 0, 1], 58 | getBN[ToString[i], ToString[j + 1]], Ramp, getCN[ToString[i], ToString[j + 1]], 59 | CatenateLayer[] 60 | }, { 61 | NetPort["Input"] -> 7, 62 | NetPort["Input"] -> 1 -> 2 -> 3 -> 4 -> 5 -> 6 -> 7 63 | }]; 64 | getBlock2[i_] := NetChain@{ 65 | getBN2[ToString@i], Ramp, getCN2[ToString@i, 0, 1], 66 | PoolingLayer[{2, 2}, "Stride" -> 2, "Function" -> Mean] 67 | }; 68 | $getBlock2 = NetChain@{ 69 | getBN2["4"], Ramp, 70 | PoolingLayer[{7, 7}, "Stride" -> 7, "Function" -> Mean] 71 | }; 72 | 73 | 74 | (* ::Subchapter:: *) 75 | (*Main*) 76 | 77 | 78 | extractor = NetChain[{ 79 | getCN2["0", 3, 2], 80 | getBN2["0"], 81 | ElementwiseLayer["ReLU"], 82 | PoolingLayer[{3, 3}, "Stride" -> 2, "PaddingSize" -> 1], 83 | NetChain@Table[getBlock[1, i], {i, 0, 10, 2}], 84 | getBlock2[1], 85 | NetChain@Table[getBlock[2, i], {i, 0, 22, 2}], 86 | getBlock2[2], 87 | NetChain@Table[getBlock[3, i], {i, 0, 94, 2}], 88 | getBlock2[3], 89 | NetChain@Table[getBlock[4, i], {i, 0, 62, 2}], 90 | $getBlock2 91 | }]; 92 | classifier = LinearLayer[1000, 93 | "Weights" -> params["arg:densenet3_dense0_weight"], 94 | "Biases" -> params["arg:densenet3_dense0_bias"] 95 | ]; 96 | mainNet = NetChain[{ 97 | "Extractor" -> extractor, 98 | "Classifier" -> classifier, 99 | "Predictor" -> SoftmaxLayer[] 100 | }, 101 | "Input" -> encoder, 102 | "Output" -> decoder 103 | ] 104 | 105 | 106 | (* ::Subchapter:: *) 107 | (*Export Model*) 108 | 109 | 110 | Export["DenseNet201 trained on ImageNet.WXF", mainNet] 111 | --------------------------------------------------------------------------------