├── .gitignore
├── Demo.ipynb
├── LICENSE
├── README.md
├── invariance_examples
├── indexs.npy
├── l0
│ ├── automated.npy
│ └── automated_labels.npy
└── linf
│ ├── automated_eps03.npy
│ ├── automated_eps03_labels.npy
│ ├── automated_eps04.npy
│ ├── automated_eps04_labels.npy
│ ├── manual_eps04.npy
│ ├── manual_eps04_labels.npy
│ └── manual_indexs.npy
├── l0
├── README.md
├── gan
│ ├── acgan_mnist.py
│ ├── setup.sh
│ └── tflib
│ │ ├── __init__.py
│ │ ├── cifar10.py
│ │ ├── inception_score.py
│ │ ├── mnist.py
│ │ ├── ops
│ │ ├── __init__.py
│ │ ├── batchnorm.py
│ │ ├── cond_batchnorm.py
│ │ ├── conv1d.py
│ │ ├── conv2d.py
│ │ ├── deconv2d.py
│ │ ├── layernorm.py
│ │ └── linear.py
│ │ ├── plot.py
│ │ ├── save_images.py
│ │ └── small_imagenet.py
└── invariant_l0_attack.py
└── linf
├── Invariant_Linf_Attack.ipynb
├── README.md
├── X_test_100.npy
├── adv_examples.npy
├── all_grids_nns.npy
├── all_nns.npy
└── all_y_nns.npy
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | *.egg-info/
24 | .installed.cfg
25 | *.egg
26 | MANIFEST
27 |
28 | # PyInstaller
29 | # Usually these files are written by a python script from a template
30 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
31 | *.manifest
32 | *.spec
33 |
34 | # Installer logs
35 | pip-log.txt
36 | pip-delete-this-directory.txt
37 |
38 | # Unit test / coverage reports
39 | htmlcov/
40 | .tox/
41 | .coverage
42 | .coverage.*
43 | .cache
44 | nosetests.xml
45 | coverage.xml
46 | *.cover
47 | .hypothesis/
48 | .pytest_cache/
49 |
50 | # Translations
51 | *.mo
52 | *.pot
53 |
54 | # Django stuff:
55 | *.log
56 | local_settings.py
57 | db.sqlite3
58 |
59 | # Flask stuff:
60 | instance/
61 | .webassets-cache
62 |
63 | # Scrapy stuff:
64 | .scrapy
65 |
66 | # Sphinx documentation
67 | docs/_build/
68 |
69 | # PyBuilder
70 | target/
71 |
72 | # Jupyter Notebook
73 | .ipynb_checkpoints
74 |
75 | # pyenv
76 | .python-version
77 |
78 | # celery beat schedule file
79 | celerybeat-schedule
80 |
81 | # SageMath parsed files
82 | *.sage.py
83 |
84 | # Environments
85 | .env
86 | .venv
87 | env/
88 | venv/
89 | ENV/
90 | env.bak/
91 | venv.bak/
92 |
93 | # Spyder project settings
94 | .spyderproject
95 | .spyproject
96 |
97 | # Rope project settings
98 | .ropeproject
99 |
100 | # mkdocs documentation
101 | /site
102 |
103 | # mypy
104 | .mypy_cache/
105 |
106 | *.DS_Store
107 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2019 ftramer
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Excessive-Invariance
2 | Source code for the paper:
3 |
4 | **Fundamental Tradeoffs between Invariance and Sensitivity to Adversarial Perturbations**
5 | *Florian Tramèr, Jens Behrmann, Nicholas Carlini, Nicolas Papernot and Jörn-Henrik Jacobsen*
6 | https://arxiv.org/abs/2002.04599
7 |
8 | The `Demo` notebook shows how to load pre-computed invariance examples for the l0 and linf norms.
9 | The folders `l0` and `linf` contain code for generating such invariance examples.
10 |
--------------------------------------------------------------------------------
/invariance_examples/indexs.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ftramer/Excessive-Invariance/780cab97b95f3a2a50c8b2cd14f2256ce8062c41/invariance_examples/indexs.npy
--------------------------------------------------------------------------------
/invariance_examples/l0/automated.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ftramer/Excessive-Invariance/780cab97b95f3a2a50c8b2cd14f2256ce8062c41/invariance_examples/l0/automated.npy
--------------------------------------------------------------------------------
/invariance_examples/l0/automated_labels.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ftramer/Excessive-Invariance/780cab97b95f3a2a50c8b2cd14f2256ce8062c41/invariance_examples/l0/automated_labels.npy
--------------------------------------------------------------------------------
/invariance_examples/linf/automated_eps03.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ftramer/Excessive-Invariance/780cab97b95f3a2a50c8b2cd14f2256ce8062c41/invariance_examples/linf/automated_eps03.npy
--------------------------------------------------------------------------------
/invariance_examples/linf/automated_eps03_labels.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ftramer/Excessive-Invariance/780cab97b95f3a2a50c8b2cd14f2256ce8062c41/invariance_examples/linf/automated_eps03_labels.npy
--------------------------------------------------------------------------------
/invariance_examples/linf/automated_eps04.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ftramer/Excessive-Invariance/780cab97b95f3a2a50c8b2cd14f2256ce8062c41/invariance_examples/linf/automated_eps04.npy
--------------------------------------------------------------------------------
/invariance_examples/linf/automated_eps04_labels.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ftramer/Excessive-Invariance/780cab97b95f3a2a50c8b2cd14f2256ce8062c41/invariance_examples/linf/automated_eps04_labels.npy
--------------------------------------------------------------------------------
/invariance_examples/linf/manual_eps04.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ftramer/Excessive-Invariance/780cab97b95f3a2a50c8b2cd14f2256ce8062c41/invariance_examples/linf/manual_eps04.npy
--------------------------------------------------------------------------------
/invariance_examples/linf/manual_eps04_labels.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ftramer/Excessive-Invariance/780cab97b95f3a2a50c8b2cd14f2256ce8062c41/invariance_examples/linf/manual_eps04_labels.npy
--------------------------------------------------------------------------------
/invariance_examples/linf/manual_indexs.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ftramer/Excessive-Invariance/780cab97b95f3a2a50c8b2cd14f2256ce8062c41/invariance_examples/linf/manual_indexs.npy
--------------------------------------------------------------------------------
/l0/README.md:
--------------------------------------------------------------------------------
1 | The code in this folder is for generating and evaluating invariance-based adversarial examples under the 0-norm on MNIST.
2 | The `automate.py` file can be used to automatically generate adversarial examples for 100 random MNIST test points
3 | (the indices of these 100 points is specified in the notebook).
4 | Pre-generated examples are in `invariance_examples/l0/automated.npy`. The human-assigned labels for these examples are in
5 | `invariance_examples/l0/automated_labels.npy`
6 |
--------------------------------------------------------------------------------
/l0/gan/acgan_mnist.py:
--------------------------------------------------------------------------------
1 | import os, sys
2 | sys.path.append(os.getcwd())
3 |
4 | import time
5 |
6 | import matplotlib
7 | matplotlib.use('Agg')
8 | import matplotlib.pyplot as plt
9 | import numpy as np
10 | import sklearn.datasets
11 | import tensorflow as tf
12 |
13 | import tflib as lib
14 | import tflib.ops.linear
15 | import tflib.ops.conv2d
16 | import tflib.ops.batchnorm
17 | import tflib.ops.deconv2d
18 | import tflib.save_images
19 | import tflib.mnist
20 | import tflib.plot
21 |
22 | MODE = 'wgan-gp' # dcgan, wgan, or wgan-gp
23 | DIM = 128#64 # Model dimensionality
24 | BATCH_SIZE = 100 # Batch size
25 | CRITIC_ITERS = 5 # For WGAN and WGAN-GP, number of critic iters per gen iter
26 | LAMBDA = 10 # Gradient penalty lambda hyperparameter
27 | ITERS = 2000000 # How many generator iterations to train for
28 | OUTPUT_DIM = 784 # Number of pixels in MNIST (28*28)
29 |
30 | NOISY = False
31 |
32 | lib.print_model_settings(locals().copy())
33 |
34 | def LeakyReLU(x, alpha=0.2):
35 | return tf.maximum(alpha*x, x)
36 |
37 | def ReLULayer(name, n_in, n_out, inputs):
38 | output = lib.ops.linear.Linear(
39 | name+'.Linear',
40 | n_in,
41 | n_out,
42 | inputs,
43 | initialization='he'
44 | )
45 | return tf.nn.relu(output)
46 |
47 | def LeakyReLULayer(name, n_in, n_out, inputs):
48 | output = lib.ops.linear.Linear(
49 | name+'.Linear',
50 | n_in,
51 | n_out,
52 | inputs,
53 | initialization='he'
54 | )
55 | return LeakyReLU(output)
56 |
57 | def Generator(n_samples, noise=None):
58 | label = None
59 | if noise is None:
60 | label = tf.random_uniform([n_samples],0,10,dtype=tf.int32)
61 | label = tf.one_hot(label, 10)
62 | noise = tf.random_normal([n_samples, 64])
63 | noise = tf.concat([label, noise], axis=1)
64 |
65 | output = lib.ops.linear.Linear('Generator.Input', 64+10, 4*4*4*DIM, noise)
66 | output = tf.nn.relu(output)
67 | output = tf.reshape(output, [-1, 4*DIM, 4, 4])
68 |
69 | output = lib.ops.deconv2d.Deconv2D('Generator.2', 4*DIM, 2*DIM, 5, output)
70 | output = tf.nn.relu(output)
71 |
72 | output = output[:,:,:7,:7]
73 |
74 | output = lib.ops.deconv2d.Deconv2D('Generator.3', 2*DIM, DIM, 5, output)
75 | output = tf.nn.relu(output)
76 |
77 | output = lib.ops.deconv2d.Deconv2D('Generator.5', DIM, 1, 5, output)
78 |
79 | if NOISY:
80 | output += tf.random_normal((n_samples,1,28,28), stddev=.1)
81 |
82 | output = tf.nn.sigmoid(output)
83 |
84 |
85 | return tf.reshape(output, [-1, OUTPUT_DIM]), label
86 |
87 | def Discriminator(inputs):
88 | output = tf.reshape(inputs, [-1, 1, 28, 28])
89 |
90 | output = lib.ops.conv2d.Conv2D('Discriminator.1',1,DIM,5,output,stride=2)
91 | output = LeakyReLU(output)
92 |
93 | output = lib.ops.conv2d.Conv2D('Discriminator.2', DIM, 2*DIM, 5, output, stride=2)
94 | output = LeakyReLU(output)
95 |
96 | output = lib.ops.conv2d.Conv2D('Discriminator.3', 2*DIM, 4*DIM, 5, output, stride=2)
97 | output = LeakyReLU(output)
98 |
99 | output = tf.reshape(output, [-1, 4*4*4*DIM])
100 |
101 | preds = lib.ops.linear.Linear('Discriminator.Output', 4*4*4*DIM, 10, output)
102 | output = lib.ops.linear.Linear('Discriminator.Output', 4*4*4*DIM, 1, output)
103 |
104 | return tf.reshape(output, [-1]), preds
105 |
106 | if __name__ == "__main__":
107 | real_data = tf.placeholder(tf.float32, shape=[BATCH_SIZE, OUTPUT_DIM])
108 | labels_real = tf.placeholder(tf.int32, shape=[BATCH_SIZE])
109 | fake_data, labels_fake = Generator(BATCH_SIZE)
110 |
111 | disc_real, preds_real = Discriminator(real_data)
112 | disc_fake, preds_fake = Discriminator(fake_data)
113 |
114 | gen_params = lib.params_with_name('Generator')
115 | disc_params = lib.params_with_name('Discriminator')
116 |
117 | if True:
118 | classifier_cost_real = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels_real,
119 | logits=preds_real)
120 | classifier_cost_fake = tf.nn.softmax_cross_entropy_with_logits(labels=labels_fake,
121 | logits=preds_fake)
122 | classifier_cost = classifier_cost_real + classifier_cost_fake
123 | gen_cost = -tf.reduce_mean(disc_fake) + classifier_cost
124 | disc_cost = tf.reduce_mean(disc_fake) - tf.reduce_mean(disc_real) + classifier_cost
125 |
126 | alpha = tf.random_uniform(
127 | shape=[BATCH_SIZE,1],
128 | minval=0.,
129 | maxval=1.
130 | )
131 | differences = fake_data - real_data
132 | interpolates = real_data + (alpha*differences)
133 | gradients = tf.gradients(Discriminator(interpolates), [interpolates])[0]
134 | slopes = tf.sqrt(tf.reduce_sum(tf.square(gradients), reduction_indices=[1]))
135 | gradient_penalty = tf.reduce_mean((slopes-1.)**2)
136 | disc_cost += LAMBDA*gradient_penalty
137 |
138 | gen_train_op = tf.train.AdamOptimizer(
139 | learning_rate=1e-4,
140 | beta1=0.5,
141 | beta2=0.9
142 | ).minimize(gen_cost, var_list=gen_params)
143 | disc_train_op = tf.train.AdamOptimizer(
144 | learning_rate=1e-4,
145 | beta1=0.5,
146 | beta2=0.9
147 | ).minimize(disc_cost, var_list=disc_params)
148 |
149 | clip_disc_weights = None
150 |
151 | # For saving samples
152 | fixed_noise = np.random.normal(size=(128, 74))
153 | fixed_noise[:,:10] = 0
154 | for i in range(128):
155 | fixed_noise[i,i%10] = 1
156 | fixed_noise = tf.constant(fixed_noise.astype('float32'))
157 | fixed_noise_samples, _ = Generator(128, noise=fixed_noise)
158 | def generate_image(frame, true_dist):
159 | samples = session.run(fixed_noise_samples)
160 | lib.save_images.save_images(
161 | samples.reshape((128, 28, 28)),
162 | ("noisy-" if NOISY else "")+'mnist_acgan_samples_{0:09d}.png'.format(frame)
163 | )
164 |
165 | # Dataset iterator
166 | train_gen, dev_gen, test_gen = lib.mnist.load(BATCH_SIZE, BATCH_SIZE)
167 | def inf_train_gen():
168 | while True:
169 | for images,targets in train_gen():
170 | yield images,targets
171 |
172 | saver = tf.train.Saver()
173 |
174 | # Train loop
175 | with tf.Session() as session:
176 |
177 | session.run(tf.initialize_all_variables())
178 |
179 | gen = inf_train_gen()
180 |
181 | for iteration in range(ITERS):
182 | start_time = time.time()
183 |
184 | if iteration > 0:
185 | _ = session.run(gen_train_op)
186 |
187 | if MODE == 'dcgan':
188 | disc_iters = 1
189 | else:
190 | disc_iters = CRITIC_ITERS
191 | for i in range(disc_iters):
192 | _data,_targets = next(gen)
193 | _disc_cost, _ = session.run(
194 | [disc_cost, disc_train_op],
195 | feed_dict={real_data: _data,
196 | labels_real: _targets}
197 | )
198 | if clip_disc_weights is not None:
199 | _ = session.run(clip_disc_weights)
200 |
201 | lib.plot.plot('train disc cost', _disc_cost)
202 | lib.plot.plot('time', time.time() - start_time)
203 |
204 | # Calculate dev loss and generate samples every 100 iters
205 | if iteration % 100 == 99:
206 | dev_disc_costs = []
207 | for images,targets in dev_gen():
208 | _dev_disc_cost, _creal, _cfake = session.run(
209 | (disc_cost, classifier_cost_real, classifier_cost_fake),
210 | feed_dict={real_data: images,
211 | labels_real: targets}
212 | )
213 | dev_disc_costs.append(_dev_disc_cost)
214 | lib.plot.plot('dev disc cost', np.mean(dev_disc_costs))
215 | lib.plot.plot('dev classreal cost', np.mean(_creal))
216 | lib.plot.plot('dev classfake cost', np.mean(_cfake))
217 |
218 | generate_image(iteration, _data)
219 | saver.save(session, 'model/mnist-acgan-2'+("-noisy" if NOISY else ""))
220 |
221 | # Write logs every 100 iters
222 | if (iteration < 5) or (iteration % 100 == 99):
223 | lib.plot.flush()
224 |
225 | lib.plot.tick()
226 |
227 |
--------------------------------------------------------------------------------
/l0/gan/setup.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | cd "$(dirname "$0")" # cd to directory of this script
4 |
5 | # $1 is filename
6 | # $2 is expected sha
7 | check_sha1() {
8 | computed=$(sha1sum "$1" 2>/dev/null | awk '{print $1}') || return 1
9 | if [ "$computed" == "$2" ]; then
10 | return 0;
11 | else
12 | return 1;
13 | fi
14 | }
15 |
16 | # $1 is URL
17 | # $2 is extracted file name
18 | # $3 is the checksum
19 | fetch() {
20 | f=${1##*/}
21 | if check_sha1 $f $3; then
22 | echo "$2 already downloaded"
23 | return
24 | fi
25 | echo "downloading $1"
26 | wget -q $1 -O $f
27 | if check_sha1 $f $3; then
28 | echo "downloaded $2"
29 | else
30 | echo "HASH MISMATCH, SHA1($f) != $3"
31 | return
32 | fi
33 |
34 | tar xzf $f
35 | }
36 |
37 | fetch https://github.com/anishathalye/obfuscated-gradients/releases/download/v0/defensegan_data.tgz data 00a0eeeae06896ae51aa705985572e06e7119c61
38 |
--------------------------------------------------------------------------------
/l0/gan/tflib/__init__.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import tensorflow as tf
3 |
4 | import locale
5 |
6 | locale.setlocale(locale.LC_ALL, '')
7 |
8 | _params = {}
9 | _param_aliases = {}
10 | def param(name, *args, **kwargs):
11 | """
12 | A wrapper for `tf.Variable` which enables parameter sharing in models.
13 |
14 | Creates and returns theano shared variables similarly to `tf.Variable`,
15 | except if you try to create a param with the same name as a
16 | previously-created one, `param(...)` will just return the old one instead of
17 | making a new one.
18 |
19 | This constructor also adds a `param` attribute to the shared variables it
20 | creates, so that you can easily search a graph for all params.
21 | """
22 |
23 | if name not in _params:
24 | kwargs['name'] = name
25 | param = tf.Variable(*args, **kwargs)
26 | param.param = True
27 | _params[name] = param
28 | result = _params[name]
29 | i = 0
30 | while result in _param_aliases:
31 | # print 'following alias {}: {} to {}'.format(i, result, _param_aliases[result])
32 | i += 1
33 | result = _param_aliases[result]
34 | return result
35 |
36 | def params_with_name(name):
37 | return [p for n,p in list(_params.items()) if name in n]
38 |
39 | def delete_all_params():
40 | _params.clear()
41 |
42 | def alias_params(replace_dict):
43 | for old,new in list(replace_dict.items()):
44 | # print "aliasing {} to {}".format(old,new)
45 | _param_aliases[old] = new
46 |
47 | def delete_param_aliases():
48 | _param_aliases.clear()
49 |
50 | # def search(node, critereon):
51 | # """
52 | # Traverse the Theano graph starting at `node` and return a list of all nodes
53 | # which match the `critereon` function. When optimizing a cost function, you
54 | # can use this to get a list of all of the trainable params in the graph, like
55 | # so:
56 |
57 | # `lib.search(cost, lambda x: hasattr(x, "param"))`
58 | # """
59 |
60 | # def _search(node, critereon, visited):
61 | # if node in visited:
62 | # return []
63 | # visited.add(node)
64 |
65 | # results = []
66 | # if isinstance(node, T.Apply):
67 | # for inp in node.inputs:
68 | # results += _search(inp, critereon, visited)
69 | # else: # Variable node
70 | # if critereon(node):
71 | # results.append(node)
72 | # if node.owner is not None:
73 | # results += _search(node.owner, critereon, visited)
74 | # return results
75 |
76 | # return _search(node, critereon, set())
77 |
78 | # def print_params_info(params):
79 | # """Print information about the parameters in the given param set."""
80 |
81 | # params = sorted(params, key=lambda p: p.name)
82 | # values = [p.get_value(borrow=True) for p in params]
83 | # shapes = [p.shape for p in values]
84 | # print "Params for cost:"
85 | # for param, value, shape in zip(params, values, shapes):
86 | # print "\t{0} ({1})".format(
87 | # param.name,
88 | # ",".join([str(x) for x in shape])
89 | # )
90 |
91 | # total_param_count = 0
92 | # for shape in shapes:
93 | # param_count = 1
94 | # for dim in shape:
95 | # param_count *= dim
96 | # total_param_count += param_count
97 | # print "Total parameter count: {0}".format(
98 | # locale.format("%d", total_param_count, grouping=True)
99 | # )
100 |
101 | def print_model_settings(locals_):
102 | print("Uppercase local vars:")
103 | all_vars = [(k,v) for (k,v) in list(locals_.items()) if (k.isupper() and k!='T' and k!='SETTINGS' and k!='ALL_SETTINGS')]
104 | all_vars = sorted(all_vars, key=lambda x: x[0])
105 | for var_name, var_value in all_vars:
106 | print("\t{}: {}".format(var_name, var_value))
107 |
108 |
109 | def print_model_settings_dict(settings):
110 | print("Settings dict:")
111 | all_vars = [(k,v) for (k,v) in list(settings.items())]
112 | all_vars = sorted(all_vars, key=lambda x: x[0])
113 | for var_name, var_value in all_vars:
114 | print("\t{}: {}".format(var_name, var_value))
--------------------------------------------------------------------------------
/l0/gan/tflib/cifar10.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | import os
4 | import urllib.request, urllib.parse, urllib.error
5 | import gzip
6 | import pickle as pickle
7 |
8 | def unpickle(file):
9 | fo = open(file, 'rb')
10 | dict = pickle.load(fo, encoding='latin1')
11 | fo.close()
12 | return dict['data'], dict['labels']
13 |
14 | def cifar_generator(filenames, batch_size, data_dir):
15 | all_data = []
16 | all_labels = []
17 | for filename in filenames:
18 | data, labels = unpickle(data_dir + '/' + filename)
19 | all_data.append(data)
20 | all_labels.append(labels)
21 |
22 | images = np.concatenate(all_data, axis=0)
23 | labels = np.concatenate(all_labels, axis=0)
24 |
25 | def get_epoch():
26 | rng_state = np.random.get_state()
27 | np.random.shuffle(images)
28 | np.random.set_state(rng_state)
29 | np.random.shuffle(labels)
30 |
31 | for i in range(len(images) // batch_size):
32 | yield (images[i*batch_size:(i+1)*batch_size], labels[i*batch_size:(i+1)*batch_size])
33 |
34 | return get_epoch
35 |
36 |
37 | def load(batch_size, data_dir):
38 | return (
39 | cifar_generator(['data_batch_1','data_batch_2','data_batch_3','data_batch_4','data_batch_5'], batch_size, data_dir),
40 | cifar_generator(['test_batch'], batch_size, data_dir)
41 | )
42 |
--------------------------------------------------------------------------------
/l0/gan/tflib/inception_score.py:
--------------------------------------------------------------------------------
1 | # From https://github.com/openai/improved-gan/blob/master/inception_score/model.py
2 | # Code derived from tensorflow/tensorflow/models/image/imagenet/classify_image.py
3 |
4 |
5 |
6 |
7 | import os.path
8 | import sys
9 | import tarfile
10 |
11 | import numpy as np
12 | from six.moves import urllib
13 | import tensorflow as tf
14 | import glob
15 | import scipy.misc
16 | import math
17 | import sys
18 |
19 | MODEL_DIR = '/tmp/imagenet'
20 | DATA_URL = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz'
21 | softmax = None
22 |
23 | # Call this function with list of images. Each of elements should be a
24 | # numpy array with values ranging from 0 to 255.
25 | def get_inception_score(images, splits=10):
26 | assert(type(images) == list)
27 | assert(type(images[0]) == np.ndarray)
28 | assert(len(images[0].shape) == 3)
29 | assert(np.max(images[0]) > 10)
30 | assert(np.min(images[0]) >= 0.0)
31 | inps = []
32 | for img in images:
33 | img = img.astype(np.float32)
34 | inps.append(np.expand_dims(img, 0))
35 | bs = 100
36 | with tf.Session() as sess:
37 | preds = []
38 | n_batches = int(math.ceil(float(len(inps)) / float(bs)))
39 | for i in range(n_batches):
40 | # sys.stdout.write(".")
41 | # sys.stdout.flush()
42 | inp = inps[(i * bs):min((i + 1) * bs, len(inps))]
43 | inp = np.concatenate(inp, 0)
44 | pred = sess.run(softmax, {'ExpandDims:0': inp})
45 | preds.append(pred)
46 | preds = np.concatenate(preds, 0)
47 | scores = []
48 | for i in range(splits):
49 | part = preds[(i * preds.shape[0] // splits):((i + 1) * preds.shape[0] // splits), :]
50 | kl = part * (np.log(part) - np.log(np.expand_dims(np.mean(part, 0), 0)))
51 | kl = np.mean(np.sum(kl, 1))
52 | scores.append(np.exp(kl))
53 | return np.mean(scores), np.std(scores)
54 |
55 | # This function is called automatically.
56 | def _init_inception():
57 | global softmax
58 | if not os.path.exists(MODEL_DIR):
59 | os.makedirs(MODEL_DIR)
60 | filename = DATA_URL.split('/')[-1]
61 | filepath = os.path.join(MODEL_DIR, filename)
62 | if not os.path.exists(filepath):
63 | def _progress(count, block_size, total_size):
64 | sys.stdout.write('\r>> Downloading %s %.1f%%' % (
65 | filename, float(count * block_size) / float(total_size) * 100.0))
66 | sys.stdout.flush()
67 | filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)
68 | print()
69 | statinfo = os.stat(filepath)
70 | print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.')
71 | tarfile.open(filepath, 'r:gz').extractall(MODEL_DIR)
72 | with tf.gfile.FastGFile(os.path.join(
73 | MODEL_DIR, 'classify_image_graph_def.pb'), 'rb') as f:
74 | graph_def = tf.GraphDef()
75 | graph_def.ParseFromString(f.read())
76 | _ = tf.import_graph_def(graph_def, name='')
77 | # Works with an arbitrary minibatch size.
78 | with tf.Session() as sess:
79 | pool3 = sess.graph.get_tensor_by_name('pool_3:0')
80 | ops = pool3.graph.get_operations()
81 | for op_idx, op in enumerate(ops):
82 | for o in op.outputs:
83 | shape = o.get_shape()
84 | shape = [s.value for s in shape]
85 | new_shape = []
86 | for j, s in enumerate(shape):
87 | if s == 1 and j == 0:
88 | new_shape.append(None)
89 | else:
90 | new_shape.append(s)
91 | o._shape = tf.TensorShape(new_shape)
92 | w = sess.graph.get_operation_by_name("softmax/logits/MatMul").inputs[1]
93 | logits = tf.matmul(tf.squeeze(pool3), w)
94 | softmax = tf.nn.softmax(logits)
95 |
96 | if softmax is None:
97 | _init_inception()
98 |
--------------------------------------------------------------------------------
/l0/gan/tflib/mnist.py:
--------------------------------------------------------------------------------
1 | import numpy
2 |
3 | import os
4 | import urllib.request, urllib.parse, urllib.error
5 | import gzip
6 | import pickle as pickle
7 |
8 | def mnist_generator(data, batch_size, n_labelled, limit=None):
9 | images, targets = data
10 |
11 | rng_state = numpy.random.get_state()
12 | numpy.random.shuffle(images)
13 | numpy.random.set_state(rng_state)
14 | numpy.random.shuffle(targets)
15 | if limit is not None:
16 | print("WARNING ONLY FIRST {} MNIST DIGITS".format(limit))
17 | images = images.astype('float32')[:limit]
18 | targets = targets.astype('int32')[:limit]
19 | if n_labelled is not None:
20 | labelled = numpy.zeros(len(images), dtype='int32')
21 | labelled[:n_labelled] = 1
22 |
23 | def get_epoch():
24 | rng_state = numpy.random.get_state()
25 | numpy.random.shuffle(images)
26 | numpy.random.set_state(rng_state)
27 | numpy.random.shuffle(targets)
28 |
29 | if n_labelled is not None:
30 | numpy.random.set_state(rng_state)
31 | numpy.random.shuffle(labelled)
32 |
33 | image_batches = images.reshape(-1, batch_size, 784)
34 | target_batches = targets.reshape(-1, batch_size)
35 |
36 | if n_labelled is not None:
37 | labelled_batches = labelled.reshape(-1, batch_size)
38 |
39 | for i in range(len(image_batches)):
40 | yield (numpy.copy(image_batches[i]), numpy.copy(target_batches[i]), numpy.copy(labelled))
41 |
42 | else:
43 |
44 | for i in range(len(image_batches)):
45 | yield (numpy.copy(image_batches[i]), numpy.copy(target_batches[i]))
46 |
47 | return get_epoch
48 |
49 | def load(batch_size, test_batch_size, n_labelled=None):
50 | filepath = '/tmp/mnist.pkl.gz'
51 | url = 'http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz'
52 |
53 | if not os.path.isfile(filepath):
54 | print("Couldn't find MNIST dataset in /tmp, downloading...")
55 | urllib.request.urlretrieve(url, filepath)
56 |
57 | with gzip.open('/tmp/mnist.pkl.gz', 'rb') as f:
58 | train_data, dev_data, test_data = pickle.load(f, encoding='latin1')
59 |
60 | return (
61 | mnist_generator(train_data, batch_size, n_labelled),
62 | mnist_generator(dev_data, test_batch_size, n_labelled),
63 | mnist_generator(test_data, test_batch_size, n_labelled)
64 | )
65 |
--------------------------------------------------------------------------------
/l0/gan/tflib/ops/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ftramer/Excessive-Invariance/780cab97b95f3a2a50c8b2cd14f2256ce8062c41/l0/gan/tflib/ops/__init__.py
--------------------------------------------------------------------------------
/l0/gan/tflib/ops/batchnorm.py:
--------------------------------------------------------------------------------
1 | import tflib as lib
2 |
3 | import numpy as np
4 | import tensorflow as tf
5 |
6 | def Batchnorm(name, axes, inputs, is_training=None, stats_iter=None, update_moving_stats=True, fused=True):
7 | if ((axes == [0,2,3]) or (axes == [0,2])) and fused==True:
8 | if axes==[0,2]:
9 | inputs = tf.expand_dims(inputs, 3)
10 | # Old (working but pretty slow) implementation:
11 | ##########
12 |
13 | # inputs = tf.transpose(inputs, [0,2,3,1])
14 |
15 | # mean, var = tf.nn.moments(inputs, [0,1,2], keep_dims=False)
16 | # offset = lib.param(name+'.offset', np.zeros(mean.get_shape()[-1], dtype='float32'))
17 | # scale = lib.param(name+'.scale', np.ones(var.get_shape()[-1], dtype='float32'))
18 | # result = tf.nn.batch_normalization(inputs, mean, var, offset, scale, 1e-4)
19 |
20 | # return tf.transpose(result, [0,3,1,2])
21 |
22 | # New (super fast but untested) implementation:
23 | offset = lib.param(name+'.offset', np.zeros(inputs.get_shape()[1], dtype='float32'))
24 | scale = lib.param(name+'.scale', np.ones(inputs.get_shape()[1], dtype='float32'))
25 |
26 | moving_mean = lib.param(name+'.moving_mean', np.zeros(inputs.get_shape()[1], dtype='float32'), trainable=False)
27 | moving_variance = lib.param(name+'.moving_variance', np.ones(inputs.get_shape()[1], dtype='float32'), trainable=False)
28 |
29 | def _fused_batch_norm_training():
30 | return tf.nn.fused_batch_norm(inputs, scale, offset, epsilon=1e-5, data_format='NCHW')
31 | def _fused_batch_norm_inference():
32 | # Version which blends in the current item's statistics
33 | batch_size = tf.cast(tf.shape(inputs)[0], 'float32')
34 | mean, var = tf.nn.moments(inputs, [2,3], keep_dims=True)
35 | mean = ((1./batch_size)*mean) + (((batch_size-1.)/batch_size)*moving_mean)[None,:,None,None]
36 | var = ((1./batch_size)*var) + (((batch_size-1.)/batch_size)*moving_variance)[None,:,None,None]
37 | return tf.nn.batch_normalization(inputs, mean, var, offset[None,:,None,None], scale[None,:,None,None], 1e-5), mean, var
38 |
39 | # Standard version
40 | # return tf.nn.fused_batch_norm(
41 | # inputs,
42 | # scale,
43 | # offset,
44 | # epsilon=1e-2,
45 | # mean=moving_mean,
46 | # variance=moving_variance,
47 | # is_training=False,
48 | # data_format='NCHW'
49 | # )
50 |
51 | if is_training is None:
52 | outputs, batch_mean, batch_var = _fused_batch_norm_training()
53 | else:
54 | outputs, batch_mean, batch_var = tf.cond(is_training,
55 | _fused_batch_norm_training,
56 | _fused_batch_norm_inference)
57 | if update_moving_stats:
58 | no_updates = lambda: outputs
59 | def _force_updates():
60 | """Internal function forces updates moving_vars if is_training."""
61 | float_stats_iter = tf.cast(stats_iter, tf.float32)
62 |
63 | update_moving_mean = tf.assign(moving_mean, ((float_stats_iter/(float_stats_iter+1))*moving_mean) + ((1/(float_stats_iter+1))*batch_mean))
64 | update_moving_variance = tf.assign(moving_variance, ((float_stats_iter/(float_stats_iter+1))*moving_variance) + ((1/(float_stats_iter+1))*batch_var))
65 |
66 | with tf.control_dependencies([update_moving_mean, update_moving_variance]):
67 | return tf.identity(outputs)
68 | outputs = tf.cond(is_training, _force_updates, no_updates)
69 |
70 | if axes == [0,2]:
71 | return outputs[:,:,:,0] # collapse last dim
72 | else:
73 | return outputs
74 | else:
75 | # raise Exception('old BN')
76 | # TODO we can probably use nn.fused_batch_norm here too for speedup
77 | mean, var = tf.nn.moments(inputs, axes, keep_dims=True)
78 | shape = mean.get_shape().as_list()
79 | if 0 not in axes:
80 | print("WARNING ({}): didn't find 0 in axes, but not using separate BN params for each item in batch".format(name))
81 | shape[0] = 1
82 | offset = lib.param(name+'.offset', np.zeros(shape, dtype='float32'))
83 | scale = lib.param(name+'.scale', np.ones(shape, dtype='float32'))
84 | result = tf.nn.batch_normalization(inputs, mean, var, offset, scale, 1e-5)
85 |
86 |
87 | return result
88 |
--------------------------------------------------------------------------------
/l0/gan/tflib/ops/cond_batchnorm.py:
--------------------------------------------------------------------------------
1 | import tflib as lib
2 |
3 | import numpy as np
4 | import tensorflow as tf
5 |
6 | def Batchnorm(name, axes, inputs, is_training=None, stats_iter=None, update_moving_stats=True, fused=True, labels=None, n_labels=None):
7 | """conditional batchnorm (dumoulin et al 2016) for BCHW conv filtermaps"""
8 | if axes != [0,2,3]:
9 | raise Exception('unsupported')
10 | mean, var = tf.nn.moments(inputs, axes, keep_dims=True)
11 | shape = mean.get_shape().as_list() # shape is [1,n,1,1]
12 | offset_m = lib.param(name+'.offset', np.zeros([n_labels,shape[1]], dtype='float32'))
13 | scale_m = lib.param(name+'.scale', np.ones([n_labels,shape[1]], dtype='float32'))
14 | offset = tf.nn.embedding_lookup(offset_m, labels)
15 | scale = tf.nn.embedding_lookup(scale_m, labels)
16 | result = tf.nn.batch_normalization(inputs, mean, var, offset[:,:,None,None], scale[:,:,None,None], 1e-5)
17 | return result
--------------------------------------------------------------------------------
/l0/gan/tflib/ops/conv1d.py:
--------------------------------------------------------------------------------
1 | import tflib as lib
2 |
3 | import numpy as np
4 | import tensorflow as tf
5 |
6 | _default_weightnorm = False
7 | def enable_default_weightnorm():
8 | global _default_weightnorm
9 | _default_weightnorm = True
10 |
11 | def Conv1D(name, input_dim, output_dim, filter_size, inputs, he_init=True, mask_type=None, stride=1, weightnorm=None, biases=True, gain=1.):
12 | """
13 | inputs: tensor of shape (batch size, num channels, width)
14 | mask_type: one of None, 'a', 'b'
15 |
16 | returns: tensor of shape (batch size, num channels, width)
17 | """
18 | with tf.name_scope(name) as scope:
19 |
20 | if mask_type is not None:
21 | mask_type, mask_n_channels = mask_type
22 |
23 | mask = np.ones(
24 | (filter_size, input_dim, output_dim),
25 | dtype='float32'
26 | )
27 | center = filter_size // 2
28 |
29 | # Mask out future locations
30 | # filter shape is (width, input channels, output channels)
31 | mask[center+1:, :, :] = 0.
32 |
33 | # Mask out future channels
34 | for i in range(mask_n_channels):
35 | for j in range(mask_n_channels):
36 | if (mask_type=='a' and i >= j) or (mask_type=='b' and i > j):
37 | mask[
38 | center,
39 | i::mask_n_channels,
40 | j::mask_n_channels
41 | ] = 0.
42 |
43 |
44 | def uniform(stdev, size):
45 | return np.random.uniform(
46 | low=-stdev * np.sqrt(3),
47 | high=stdev * np.sqrt(3),
48 | size=size
49 | ).astype('float32')
50 |
51 | fan_in = input_dim * filter_size
52 | fan_out = output_dim * filter_size / stride
53 |
54 | if mask_type is not None: # only approximately correct
55 | fan_in /= 2.
56 | fan_out /= 2.
57 |
58 | if he_init:
59 | filters_stdev = np.sqrt(4./(fan_in+fan_out))
60 | else: # Normalized init (Glorot & Bengio)
61 | filters_stdev = np.sqrt(2./(fan_in+fan_out))
62 |
63 | filter_values = uniform(
64 | filters_stdev,
65 | (filter_size, input_dim, output_dim)
66 | )
67 | # print "WARNING IGNORING GAIN"
68 | filter_values *= gain
69 |
70 | filters = lib.param(name+'.Filters', filter_values)
71 |
72 | if weightnorm==None:
73 | weightnorm = _default_weightnorm
74 | if weightnorm:
75 | norm_values = np.sqrt(np.sum(np.square(filter_values), axis=(0,1)))
76 | target_norms = lib.param(
77 | name + '.g',
78 | norm_values
79 | )
80 | with tf.name_scope('weightnorm') as scope:
81 | norms = tf.sqrt(tf.reduce_sum(tf.square(filters), reduction_indices=[0,1]))
82 | filters = filters * (target_norms / norms)
83 |
84 | if mask_type is not None:
85 | with tf.name_scope('filter_mask'):
86 | filters = filters * mask
87 |
88 | result = tf.nn.conv1d(
89 | value=inputs,
90 | filters=filters,
91 | stride=stride,
92 | padding='SAME',
93 | data_format='NCHW'
94 | )
95 |
96 | if biases:
97 | _biases = lib.param(
98 | name+'.Biases',
99 | np.zeros([output_dim], dtype='float32')
100 | )
101 |
102 | # result = result + _biases
103 |
104 | result = tf.expand_dims(result, 3)
105 | result = tf.nn.bias_add(result, _biases, data_format='NCHW')
106 | result = tf.squeeze(result)
107 |
108 | return result
109 |
--------------------------------------------------------------------------------
/l0/gan/tflib/ops/conv2d.py:
--------------------------------------------------------------------------------
1 | import tflib as lib
2 |
3 | import numpy as np
4 | import tensorflow as tf
5 |
6 | _default_weightnorm = False
7 | def enable_default_weightnorm():
8 | global _default_weightnorm
9 | _default_weightnorm = True
10 |
11 | _weights_stdev = None
12 | def set_weights_stdev(weights_stdev):
13 | global _weights_stdev
14 | _weights_stdev = weights_stdev
15 |
16 | def unset_weights_stdev():
17 | global _weights_stdev
18 | _weights_stdev = None
19 |
20 | def Conv2D(name, input_dim, output_dim, filter_size, inputs, he_init=True, mask_type=None, stride=1, weightnorm=None, biases=True, gain=1.):
21 | """
22 | inputs: tensor of shape (batch size, num channels, height, width)
23 | mask_type: one of None, 'a', 'b'
24 |
25 | returns: tensor of shape (batch size, num channels, height, width)
26 | """
27 | with tf.name_scope(name) as scope:
28 |
29 | if mask_type is not None:
30 | mask_type, mask_n_channels = mask_type
31 |
32 | mask = np.ones(
33 | (filter_size, filter_size, input_dim, output_dim),
34 | dtype='float32'
35 | )
36 | center = filter_size // 2
37 |
38 | # Mask out future locations
39 | # filter shape is (height, width, input channels, output channels)
40 | mask[center+1:, :, :, :] = 0.
41 | mask[center, center+1:, :, :] = 0.
42 |
43 | # Mask out future channels
44 | for i in range(mask_n_channels):
45 | for j in range(mask_n_channels):
46 | if (mask_type=='a' and i >= j) or (mask_type=='b' and i > j):
47 | mask[
48 | center,
49 | center,
50 | i::mask_n_channels,
51 | j::mask_n_channels
52 | ] = 0.
53 |
54 |
55 | def uniform(stdev, size):
56 | return np.random.uniform(
57 | low=-stdev * np.sqrt(3),
58 | high=stdev * np.sqrt(3),
59 | size=size
60 | ).astype('float32')
61 |
62 | fan_in = input_dim * filter_size**2
63 | fan_out = output_dim * filter_size**2 / (stride**2)
64 |
65 | if mask_type is not None: # only approximately correct
66 | fan_in /= 2.
67 | fan_out /= 2.
68 |
69 | if he_init:
70 | filters_stdev = np.sqrt(4./(fan_in+fan_out))
71 | else: # Normalized init (Glorot & Bengio)
72 | filters_stdev = np.sqrt(2./(fan_in+fan_out))
73 |
74 | if _weights_stdev is not None:
75 | filter_values = uniform(
76 | _weights_stdev,
77 | (filter_size, filter_size, input_dim, output_dim)
78 | )
79 | else:
80 | filter_values = uniform(
81 | filters_stdev,
82 | (filter_size, filter_size, input_dim, output_dim)
83 | )
84 |
85 | # print "WARNING IGNORING GAIN"
86 | filter_values *= gain
87 |
88 | filters = lib.param(name+'.Filters', filter_values)
89 |
90 | if weightnorm==None:
91 | weightnorm = _default_weightnorm
92 | if weightnorm:
93 | norm_values = np.sqrt(np.sum(np.square(filter_values), axis=(0,1,2)))
94 | target_norms = lib.param(
95 | name + '.g',
96 | norm_values
97 | )
98 | with tf.name_scope('weightnorm') as scope:
99 | norms = tf.sqrt(tf.reduce_sum(tf.square(filters), reduction_indices=[0,1,2]))
100 | filters = filters * (target_norms / norms)
101 |
102 | if mask_type is not None:
103 | with tf.name_scope('filter_mask'):
104 | filters = filters * mask
105 |
106 | result = tf.nn.conv2d(
107 | input=inputs,
108 | filter=filters,
109 | strides=[1, 1, stride, stride],
110 | padding='SAME',
111 | data_format='NCHW'
112 | )
113 |
114 | if biases:
115 | _biases = lib.param(
116 | name+'.Biases',
117 | np.zeros(output_dim, dtype='float32')
118 | )
119 |
120 | result = tf.nn.bias_add(result, _biases, data_format='NCHW')
121 |
122 |
123 | return result
124 |
--------------------------------------------------------------------------------
/l0/gan/tflib/ops/deconv2d.py:
--------------------------------------------------------------------------------
1 | import tflib as lib
2 |
3 | import numpy as np
4 | import tensorflow as tf
5 |
6 | _default_weightnorm = False
7 | def enable_default_weightnorm():
8 | global _default_weightnorm
9 | _default_weightnorm = True
10 |
11 | _weights_stdev = None
12 | def set_weights_stdev(weights_stdev):
13 | global _weights_stdev
14 | _weights_stdev = weights_stdev
15 |
16 | def unset_weights_stdev():
17 | global _weights_stdev
18 | _weights_stdev = None
19 |
20 | def Deconv2D(
21 | name,
22 | input_dim,
23 | output_dim,
24 | filter_size,
25 | inputs,
26 | he_init=True,
27 | weightnorm=None,
28 | biases=True,
29 | gain=1.,
30 | mask_type=None,
31 | ):
32 | """
33 | inputs: tensor of shape (batch size, height, width, input_dim)
34 | returns: tensor of shape (batch size, 2*height, 2*width, output_dim)
35 | """
36 | with tf.name_scope(name) as scope:
37 |
38 | if mask_type != None:
39 | raise Exception('Unsupported configuration')
40 |
41 | def uniform(stdev, size):
42 | return np.random.uniform(
43 | low=-stdev * np.sqrt(3),
44 | high=stdev * np.sqrt(3),
45 | size=size
46 | ).astype('float32')
47 |
48 | stride = 2
49 | fan_in = input_dim * filter_size**2 / (stride**2)
50 | fan_out = output_dim * filter_size**2
51 |
52 | if he_init:
53 | filters_stdev = np.sqrt(4./(fan_in+fan_out))
54 | else: # Normalized init (Glorot & Bengio)
55 | filters_stdev = np.sqrt(2./(fan_in+fan_out))
56 |
57 |
58 | if _weights_stdev is not None:
59 | filter_values = uniform(
60 | _weights_stdev,
61 | (filter_size, filter_size, output_dim, input_dim)
62 | )
63 | else:
64 | filter_values = uniform(
65 | filters_stdev,
66 | (filter_size, filter_size, output_dim, input_dim)
67 | )
68 |
69 | filter_values *= gain
70 |
71 | filters = lib.param(
72 | name+'.Filters',
73 | filter_values
74 | )
75 |
76 | if weightnorm==None:
77 | weightnorm = _default_weightnorm
78 | if weightnorm:
79 | norm_values = np.sqrt(np.sum(np.square(filter_values), axis=(0,1,3)))
80 | target_norms = lib.param(
81 | name + '.g',
82 | norm_values
83 | )
84 | with tf.name_scope('weightnorm') as scope:
85 | norms = tf.sqrt(tf.reduce_sum(tf.square(filters), reduction_indices=[0,1,3]))
86 | filters = filters * tf.expand_dims(target_norms / norms, 1)
87 |
88 |
89 | inputs = tf.transpose(inputs, [0,2,3,1], name='NCHW_to_NHWC')
90 |
91 | input_shape = tf.shape(inputs)
92 | try: # tf pre-1.0 (top) vs 1.0 (bottom)
93 | output_shape = tf.pack([input_shape[0], 2*input_shape[1], 2*input_shape[2], output_dim])
94 | except Exception as e:
95 | output_shape = tf.stack([input_shape[0], 2*input_shape[1], 2*input_shape[2], output_dim])
96 |
97 | result = tf.nn.conv2d_transpose(
98 | value=inputs,
99 | filter=filters,
100 | output_shape=output_shape,
101 | strides=[1, 2, 2, 1],
102 | padding='SAME'
103 | )
104 |
105 | if biases:
106 | _biases = lib.param(
107 | name+'.Biases',
108 | np.zeros(output_dim, dtype='float32')
109 | )
110 | result = tf.nn.bias_add(result, _biases)
111 |
112 | result = tf.transpose(result, [0,3,1,2], name='NHWC_to_NCHW')
113 |
114 |
115 | return result
116 |
--------------------------------------------------------------------------------
/l0/gan/tflib/ops/layernorm.py:
--------------------------------------------------------------------------------
1 | import tflib as lib
2 |
3 | import numpy as np
4 | import tensorflow as tf
5 |
6 | def Layernorm(name, norm_axes, inputs):
7 | mean, var = tf.nn.moments(inputs, norm_axes, keep_dims=True)
8 |
9 | # Assume the 'neurons' axis is the first of norm_axes. This is the case for fully-connected and BCHW conv layers.
10 | n_neurons = inputs.get_shape().as_list()[norm_axes[0]]
11 |
12 | offset = lib.param(name+'.offset', np.zeros(n_neurons, dtype='float32'))
13 | scale = lib.param(name+'.scale', np.ones(n_neurons, dtype='float32'))
14 |
15 | # Add broadcasting dims to offset and scale (e.g. BCHW conv data)
16 | offset = tf.reshape(offset, [-1] + [1 for i in range(len(norm_axes)-1)])
17 | scale = tf.reshape(scale, [-1] + [1 for i in range(len(norm_axes)-1)])
18 |
19 | result = tf.nn.batch_normalization(inputs, mean, var, offset, scale, 1e-5)
20 |
21 | return result
--------------------------------------------------------------------------------
/l0/gan/tflib/ops/linear.py:
--------------------------------------------------------------------------------
1 | import tflib as lib
2 |
3 | import numpy as np
4 | import tensorflow as tf
5 |
6 | _default_weightnorm = False
7 | def enable_default_weightnorm():
8 | global _default_weightnorm
9 | _default_weightnorm = True
10 |
11 | def disable_default_weightnorm():
12 | global _default_weightnorm
13 | _default_weightnorm = False
14 |
15 | _weights_stdev = None
16 | def set_weights_stdev(weights_stdev):
17 | global _weights_stdev
18 | _weights_stdev = weights_stdev
19 |
20 | def unset_weights_stdev():
21 | global _weights_stdev
22 | _weights_stdev = None
23 |
24 | def Linear(
25 | name,
26 | input_dim,
27 | output_dim,
28 | inputs,
29 | biases=True,
30 | initialization=None,
31 | weightnorm=None,
32 | gain=1.
33 | ):
34 | """
35 | initialization: None, `lecun`, 'glorot', `he`, 'glorot_he', `orthogonal`, `("uniform", range)`
36 | """
37 | with tf.name_scope(name) as scope:
38 |
39 | def uniform(stdev, size):
40 | if _weights_stdev is not None:
41 | stdev = _weights_stdev
42 | return np.random.uniform(
43 | low=-stdev * np.sqrt(3),
44 | high=stdev * np.sqrt(3),
45 | size=size
46 | ).astype('float32')
47 |
48 | if initialization == 'lecun':# and input_dim != output_dim):
49 | # disabling orth. init for now because it's too slow
50 | weight_values = uniform(
51 | np.sqrt(1./input_dim),
52 | (input_dim, output_dim)
53 | )
54 |
55 | elif initialization == 'glorot' or (initialization == None):
56 |
57 | weight_values = uniform(
58 | np.sqrt(2./(input_dim+output_dim)),
59 | (input_dim, output_dim)
60 | )
61 |
62 | elif initialization == 'he':
63 |
64 | weight_values = uniform(
65 | np.sqrt(2./input_dim),
66 | (input_dim, output_dim)
67 | )
68 |
69 | elif initialization == 'glorot_he':
70 |
71 | weight_values = uniform(
72 | np.sqrt(4./(input_dim+output_dim)),
73 | (input_dim, output_dim)
74 | )
75 |
76 | elif initialization == 'orthogonal' or \
77 | (initialization == None and input_dim == output_dim):
78 |
79 | # From lasagne
80 | def sample(shape):
81 | if len(shape) < 2:
82 | raise RuntimeError("Only shapes of length 2 or more are "
83 | "supported.")
84 | flat_shape = (shape[0], np.prod(shape[1:]))
85 | # TODO: why normal and not uniform?
86 | a = np.random.normal(0.0, 1.0, flat_shape)
87 | u, _, v = np.linalg.svd(a, full_matrices=False)
88 | # pick the one with the correct shape
89 | q = u if u.shape == flat_shape else v
90 | q = q.reshape(shape)
91 | return q.astype('float32')
92 | weight_values = sample((input_dim, output_dim))
93 |
94 | elif initialization[0] == 'uniform':
95 |
96 | weight_values = np.random.uniform(
97 | low=-initialization[1],
98 | high=initialization[1],
99 | size=(input_dim, output_dim)
100 | ).astype('float32')
101 |
102 | else:
103 |
104 | raise Exception('Invalid initialization!')
105 |
106 | weight_values *= gain
107 |
108 | weight = lib.param(
109 | name + '.W',
110 | weight_values
111 | )
112 |
113 | if weightnorm==None:
114 | weightnorm = _default_weightnorm
115 | if weightnorm:
116 | norm_values = np.sqrt(np.sum(np.square(weight_values), axis=0))
117 | # norm_values = np.linalg.norm(weight_values, axis=0)
118 |
119 | target_norms = lib.param(
120 | name + '.g',
121 | norm_values
122 | )
123 |
124 | with tf.name_scope('weightnorm') as scope:
125 | norms = tf.sqrt(tf.reduce_sum(tf.square(weight), reduction_indices=[0]))
126 | weight = weight * (target_norms / norms)
127 |
128 | # if 'Discriminator' in name:
129 | # print "WARNING weight constraint on {}".format(name)
130 | # weight = tf.nn.softsign(10.*weight)*.1
131 |
132 | if inputs.get_shape().ndims == 2:
133 | result = tf.matmul(inputs, weight)
134 | else:
135 | reshaped_inputs = tf.reshape(inputs, [-1, input_dim])
136 | result = tf.matmul(reshaped_inputs, weight)
137 | result = tf.reshape(result, tf.pack(tf.unpack(tf.shape(inputs))[:-1] + [output_dim]))
138 |
139 | if biases:
140 | result = tf.nn.bias_add(
141 | result,
142 | lib.param(
143 | name + '.b',
144 | np.zeros((output_dim,), dtype='float32')
145 | )
146 | )
147 |
148 | return result
--------------------------------------------------------------------------------
/l0/gan/tflib/plot.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | import matplotlib
4 | matplotlib.use('Agg')
5 | import matplotlib.pyplot as plt
6 |
7 | import collections
8 | import time
9 | import pickle as pickle
10 |
11 | _since_beginning = collections.defaultdict(lambda: {})
12 | _since_last_flush = collections.defaultdict(lambda: {})
13 |
14 | _iter = [0]
15 | def tick():
16 | _iter[0] += 1
17 |
18 | def plot(name, value):
19 | _since_last_flush[name][_iter[0]] = value
20 |
21 | def flush():
22 | prints = []
23 |
24 | for name, vals in list(_since_last_flush.items()):
25 | prints.append("{}\t{}".format(name, np.mean(list(vals.values()))))
26 | _since_beginning[name].update(vals)
27 |
28 | x_vals = np.sort(list(_since_beginning[name].keys()))
29 | y_vals = [_since_beginning[name][x] for x in x_vals]
30 |
31 | plt.clf()
32 | plt.plot(x_vals, y_vals)
33 | plt.xlabel('iteration')
34 | plt.ylabel(name)
35 | plt.savefig(name.replace(' ', '_')+'.jpg')
36 |
37 | print("iter {}\t{}".format(_iter[0], "\t".join(prints)))
38 | _since_last_flush.clear()
39 |
40 | with open('log.pkl', 'wb') as f:
41 | pickle.dump(dict(_since_beginning), f, pickle.HIGHEST_PROTOCOL)
--------------------------------------------------------------------------------
/l0/gan/tflib/save_images.py:
--------------------------------------------------------------------------------
1 | """
2 | Image grid saver, based on color_grid_vis from github.com/Newmu
3 | """
4 |
5 | import numpy as np
6 | import scipy.misc
7 | from scipy.misc import imsave
8 |
9 | def save_images(X, save_path):
10 | # [0, 1] -> [0,255]
11 | if isinstance(X.flatten()[0], np.floating):
12 | X = (255.99*X).astype('uint8')
13 |
14 | n_samples = X.shape[0]
15 | rows = int(np.sqrt(n_samples))
16 | while n_samples % rows != 0:
17 | rows -= 1
18 |
19 | nh, nw = rows, n_samples//rows
20 |
21 | if X.ndim == 2:
22 | X = np.reshape(X, (X.shape[0], int(np.sqrt(X.shape[1])), int(np.sqrt(X.shape[1]))))
23 |
24 | if X.ndim == 4:
25 | # BCHW -> BHWC
26 | X = X.transpose(0,2,3,1)
27 | h, w = X[0].shape[:2]
28 | img = np.zeros((h*nh, w*nw, 3))
29 | elif X.ndim == 3:
30 | h, w = X[0].shape[:2]
31 | img = np.zeros((h*nh, w*nw))
32 |
33 | for n, x in enumerate(X):
34 | j = n//nw
35 | i = n%nw
36 | img[j*h:j*h+h, i*w:i*w+w] = x
37 |
38 | imsave(save_path, img)
39 |
--------------------------------------------------------------------------------
/l0/gan/tflib/small_imagenet.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import scipy.misc
3 | import time
4 |
5 | def make_generator(path, n_files, batch_size):
6 | epoch_count = [1]
7 | def get_epoch():
8 | images = np.zeros((batch_size, 3, 64, 64), dtype='int32')
9 | files = list(range(n_files))
10 | random_state = np.random.RandomState(epoch_count[0])
11 | random_state.shuffle(files)
12 | epoch_count[0] += 1
13 | for n, i in enumerate(files):
14 | image = scipy.misc.imread("{}/{}.png".format(path, str(i+1).zfill(len(str(n_files)))))
15 | images[n % batch_size] = image.transpose(2,0,1)
16 | if n > 0 and n % batch_size == 0:
17 | yield (images,)
18 | return get_epoch
19 |
20 | def load(batch_size, data_dir='/home/ishaan/data/imagenet64'):
21 | return (
22 | make_generator(data_dir+'/train_64x64', 1281149, batch_size),
23 | make_generator(data_dir+'/valid_64x64', 49999, batch_size)
24 | )
25 |
26 | if __name__ == '__main__':
27 | train_gen, valid_gen = load(64)
28 | t0 = time.time()
29 | for i, batch in enumerate(train_gen(), start=1):
30 | print("{}\t{}".format(str(time.time() - t0), batch[0][0,0,0,0]))
31 | if i == 1000:
32 | break
33 | t0 = time.time()
--------------------------------------------------------------------------------
/l0/invariant_l0_attack.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 | import random
3 | import time
4 | import numpy as np
5 | from keras.datasets import mnist
6 | import sys
7 | import os
8 | import itertools
9 | import sklearn.cluster
10 | import scipy.misc
11 |
12 | import keras
13 | from keras.models import Sequential
14 | from keras.layers import Dense, Dropout, Flatten, Activation
15 | from keras.layers import Conv2D, MaxPooling2D, BatchNormalization
16 | from keras.preprocessing.image import ImageDataGenerator
17 |
18 | DTYPE = tf.float32
19 |
20 | def make_model(filters=64, s1=5, s2=5, s3=3,
21 | d1=0, d2=0, fc=256,
22 | lr=1e-3, decay=1e-3):
23 | model = Sequential()
24 | model.add(Conv2D(filters, kernel_size=(s1, s1),
25 | activation='relu',
26 | input_shape=(28, 28, 1)))
27 | model.add(MaxPooling2D(pool_size=(2, 2)))
28 | model.add(Conv2D(filters*2, (s2, s2), activation='relu'))
29 | model.add(BatchNormalization())
30 | model.add(Conv2D(filters*2, (s3, s3), activation='relu'))
31 | model.add(BatchNormalization())
32 | model.add(MaxPooling2D(pool_size=(2, 2)))
33 | model.add(Dropout(d1))
34 | model.add(Flatten())
35 | model.add(Dense(fc, activation='relu'))
36 | model.add(Dropout(d2))
37 | model.add(Dense(10))
38 |
39 | opt = keras.optimizers.Adam(lr, decay=decay)
40 |
41 | model.compile(loss=keras.losses.categorical_crossentropy,
42 | optimizer=opt,
43 | metrics=['accuracy'])
44 |
45 | final = Sequential()
46 | final.add(model)
47 | final.add(Activation('softmax'))
48 | final.compile(loss=keras.losses.categorical_crossentropy,
49 | optimizer=opt,
50 | metrics=['accuracy'])
51 |
52 |
53 | return model, final
54 |
55 |
56 | def train_model(model, x_train, y_train, batch_size=256,
57 | epochs=20):
58 | model.fit(x_train, keras.utils.to_categorical(y_train, 10),
59 | batch_size=batch_size,
60 | epochs=epochs,
61 | shuffle=True,
62 | verbose=2,
63 | )
64 |
65 | return model
66 |
67 |
68 | def show(img):
69 | img = img
70 | remap = " .*#" + "#" * 100
71 | img = (img.flatten()) * 3
72 | print("START")
73 | for i in range(28):
74 | print("".join([remap[int(round(x))] for x in img[i * 28:i * 28 + 28]]))
75 |
76 | def compute_mat(angle, sx, sy, ax, ay, tx, ty, da, db):
77 | mat = np.eye(3)
78 | mat = np.dot(mat, [[1,ax,0],
79 | [ay,1,0],
80 | [0, 0, 1]])
81 | mat = np.dot(mat, [[sx,0,0],
82 | [0,sy,0],
83 | [0, 0, 1]])
84 | mat = np.dot(mat, [[1,0,tx],
85 | [0,1,ty],
86 | [0, 0, 1]])
87 | mat = np.dot(mat, [[np.cos(angle), np.sin(angle), 0],
88 | [np.sin(angle), np.cos(angle), 0],
89 | [0, 0, 1]])
90 |
91 | inv = np.linalg.inv(mat)
92 | return mat, inv
93 |
94 | def cluster(mask):
95 | dbscan = sklearn.cluster.DBSCAN(2, min_samples=5)
96 | points = [(i,j) for i in range(28) for j in range(28) if mask[0,i,j,0]]
97 | points = np.array(points)
98 | dbscan.fit(points)
99 | flat = points[:,0]*28+points[:,1]
100 | labels = dbscan.labels_
101 |
102 | arr = np.zeros((28*28))
103 | arr[flat] = -1
104 |
105 | for i in range(max(labels)+1):
106 | arr[flat[labels==i]] = 1+i
107 | arr = arr.reshape((28,28))
108 | return arr
109 |
110 | def improve_transform():
111 | sys.path.append("gan/")
112 | from gan.acgan_mnist import Generator
113 |
114 | zin = tf.placeholder(tf.float32, [None, 74])
115 | x_target = tf.placeholder(tf.float32, [None, 28, 28, 1])
116 |
117 | generated_images, _ = Generator(None, zin)
118 | generated_images = tf.reshape(generated_images, [-1, 28, 28, 1])
119 |
120 | similarity_loss = tf.reduce_sum(np.abs(generated_images - x_target),axis=(1,2,3))
121 | z_loss = 0.01*tf.reduce_sum(zin[:,10:]**2, axis=1)
122 | total_loss = similarity_loss + z_loss
123 | grads = tf.gradients(similarity_loss, [zin])[0]
124 |
125 | sess = tf.Session()
126 |
127 | touse = [x for x in tf.trainable_variables() if 'Generator' in x.name]
128 | saver = tf.train.Saver(touse)
129 | saver.restore(sess, 'gan/model/mnist-acgan-2')
130 |
131 | keras.backend.set_learning_phase(False)
132 |
133 | def score(image, label):
134 | #show(image)
135 | zs = np.random.normal(0, 1, size=(128, 74))
136 | zs[:,:10] = 0
137 | zs[:,label] = 1
138 |
139 | for _ in range(30):
140 | #print("generate")
141 | ell, l_sim, l_z, nimg, delta = sess.run((total_loss, similarity_loss,
142 | z_loss, generated_images,grads),
143 | {zin: zs,
144 | x_target: image[np.newaxis,:,:,:]})
145 | #print(l_sim)
146 | #show(nimg)
147 | zs[:,10:] -= delta[:,10:]*.01
148 |
149 | return np.min(ell)
150 |
151 | transformation_matrix = tf.placeholder(tf.float32, [8])
152 | xs = tf.placeholder(DTYPE, [None, 28, 28, 1])
153 | transformed = tf.contrib.image.transform(xs, transformation_matrix,
154 | 'BILINEAR')
155 |
156 | uids = list(set([int(x.split("_")[1]) for x in os.listdir("best") if 'best_' in x and "_10000" in x]))
157 |
158 | num = [max([int(x.split("_")[2][:-4]) for x in os.listdir("best") if str(uids[i]) in x and 'idx' not in x and 'tran' not in x]) for i in range(4)]
159 |
160 |
161 |
162 | arr = []
163 | for fileid, filecount in zip(uids, num):
164 | best = np.load("best/best_%d_%d.npy"%(fileid,filecount))
165 | best_idx = np.array(np.load("best/best_%d_%d_idx.npy"%(fileid,filecount)), dtype=np.int32)
166 | best_transforms = np.load("best/best_%d_transforms_%d.npy"%(fileid,filecount))
167 |
168 | mask = (abs(best-x_test[use_idx]) > .5)
169 | delta = np.sum(mask,axis=(1,2,3))
170 | arr.append(delta)
171 | print(delta)
172 | print(np.median(delta))
173 | arr = np.min(arr,axis=0)
174 |
175 | fout = open("/tmp/out.html","w")
176 |
177 | def write(txt, img, lab, delta, doinv=False, do=True):
178 | if do:
179 | if len(img.shape) == 4:
180 | img = img[0]
181 | if doinv:
182 | timg = sess.run(transformed, {xs: img[np.newaxis,:,:,:],
183 | transformation_matrix: inv.flatten()[:-1]})[0]
184 | else:
185 | timg = img
186 |
187 | s = score(timg, lab)
188 | else:
189 | s = 0
190 |
191 | print(lab, type(lab))
192 | print(delta, type(delta))
193 | fout.write('