├── .gitignore ├── LICENSE ├── README.md ├── assests ├── comparison.PNG ├── teaser.png └── transfer.png └── switch_norm.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | *.egg-info/ 24 | .installed.cfg 25 | *.egg 26 | MANIFEST 27 | 28 | # PyInstaller 29 | # Usually these files are written by a python script from a template 30 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 31 | *.manifest 32 | *.spec 33 | 34 | # Installer logs 35 | pip-log.txt 36 | pip-delete-this-directory.txt 37 | 38 | # Unit test / coverage reports 39 | htmlcov/ 40 | .tox/ 41 | .coverage 42 | .coverage.* 43 | .cache 44 | nosetests.xml 45 | coverage.xml 46 | *.cover 47 | .hypothesis/ 48 | .pytest_cache/ 49 | 50 | # Translations 51 | *.mo 52 | *.pot 53 | 54 | # Django stuff: 55 | *.log 56 | local_settings.py 57 | db.sqlite3 58 | 59 | # Flask stuff: 60 | instance/ 61 | .webassets-cache 62 | 63 | # Scrapy stuff: 64 | .scrapy 65 | 66 | # Sphinx documentation 67 | docs/_build/ 68 | 69 | # PyBuilder 70 | target/ 71 | 72 | # Jupyter Notebook 73 | .ipynb_checkpoints 74 | 75 | # pyenv 76 | .python-version 77 | 78 | # celery beat schedule file 79 | celerybeat-schedule 80 | 81 | # SageMath parsed files 82 | *.sage.py 83 | 84 | # Environments 85 | .env 86 | .venv 87 | env/ 88 | venv/ 89 | ENV/ 90 | env.bak/ 91 | venv.bak/ 92 | 93 | # Spyder project settings 94 | .spyderproject 95 | .spyproject 96 | 97 | # Rope project settings 98 | .ropeproject 99 | 100 | # mkdocs documentation 101 | /site 102 | 103 | # mypy 104 | .mypy_cache/ 105 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2018 Junho Kim (1993.01.12) 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Switchable_Normalization-Tensorflow 2 | Simple Tensorflow implementation of [Switchable Normalization](https://arxiv.org/abs/1806.10779) 3 | 4 | ## Summary 5 | ![summary](./assests/teaser.png) 6 | 7 | ## Code 8 | ```python 9 | def switch_norm(x, scope='switch_norm') : 10 | with tf.variable_scope(scope) : 11 | ch = x.shape[-1] 12 | eps = 1e-5 13 | 14 | batch_mean, batch_var = tf.nn.moments(x, [0, 1, 2], keep_dims=True) 15 | ins_mean, ins_var = tf.nn.moments(x, [1, 2], keep_dims=True) 16 | layer_mean, layer_var = tf.nn.moments(x, [1, 2, 3], keep_dims=True) 17 | 18 | gamma = tf.get_variable("gamma", [ch], initializer=tf.constant_initializer(1.0)) 19 | beta = tf.get_variable("beta", [ch], initializer=tf.constant_initializer(0.0)) 20 | 21 | mean_weight = tf.nn.softmax(tf.get_variable("mean_weight", [3], initializer=tf.constant_initializer(1.0))) 22 | var_wegiht = tf.nn.softmax(tf.get_variable("var_weight", [3], initializer=tf.constant_initializer(1.0))) 23 | 24 | mean = mean_weight[0] * batch_mean + mean_weight[1] * ins_mean + mean_weight[2] * layer_mean 25 | var = var_wegiht[0] * batch_var + var_wegiht[1] * ins_var + var_wegiht[2] * layer_var 26 | 27 | x = (x - mean) / (tf.sqrt(var + eps)) 28 | x = x * gamma + beta 29 | 30 | return x 31 | ``` 32 | 33 | ## Usage 34 | ```python 35 | with tf.variable_scope('network') : 36 | x = conv(x, scope='conv_0') 37 | x = switch_norm(x, scope='switch_norm_0') 38 | x = relu(x) 39 | ``` 40 | 41 | ## Results 42 | ### Comparison 43 | ![compare](./assests/comparison.PNG) 44 | 45 | ### Style Transfer 46 | ![transfer](./assests/transfer.png) 47 | 48 | ## Related works 49 | * [Batch_Instance_Normalization](https://github.com/taki0112/Batch_Instance_Normalization-Tensorflow) 50 | 51 | ## Author 52 | Junho Kim 53 | -------------------------------------------------------------------------------- /assests/comparison.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/taki0112/Switchable_Normalization-Tensorflow/c96f9c8e7811c6f0fb389222493b86e502110730/assests/comparison.PNG -------------------------------------------------------------------------------- /assests/teaser.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/taki0112/Switchable_Normalization-Tensorflow/c96f9c8e7811c6f0fb389222493b86e502110730/assests/teaser.png -------------------------------------------------------------------------------- /assests/transfer.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/taki0112/Switchable_Normalization-Tensorflow/c96f9c8e7811c6f0fb389222493b86e502110730/assests/transfer.png -------------------------------------------------------------------------------- /switch_norm.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | 3 | def switch_norm(x, scope='switch_norm') : 4 | with tf.variable_scope(scope) : 5 | ch = x.shape[-1] 6 | eps = 1e-5 7 | 8 | batch_mean, batch_var = tf.nn.moments(x, [0, 1, 2], keep_dims=True) 9 | ins_mean, ins_var = tf.nn.moments(x, [1, 2], keep_dims=True) 10 | layer_mean, layer_var = tf.nn.moments(x, [1, 2, 3], keep_dims=True) 11 | 12 | gamma = tf.get_variable("gamma", [ch], initializer=tf.constant_initializer(1.0)) 13 | beta = tf.get_variable("beta", [ch], initializer=tf.constant_initializer(0.0)) 14 | 15 | mean_weight = tf.nn.softmax(tf.get_variable("mean_weight", [3], initializer=tf.constant_initializer(1.0))) 16 | var_wegiht = tf.nn.softmax(tf.get_variable("var_weight", [3], initializer=tf.constant_initializer(1.0))) 17 | 18 | mean = mean_weight[0] * batch_mean + mean_weight[1] * ins_mean + mean_weight[2] * layer_mean 19 | var = var_wegiht[0] * batch_var + var_wegiht[1] * ins_var + var_wegiht[2] * layer_var 20 | 21 | x = (x - mean) / (tf.sqrt(var + eps)) 22 | x = x * gamma + beta 23 | 24 | return x --------------------------------------------------------------------------------