├── LICENSE ├── README.md ├── TF1 ├── inception_score_tf1.py └── inception_score_tpu_tf1.py ├── inception_score.py └── inception_score_tpu.py /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Inception Score 2 | ===================================== 3 | 4 | Tensorflow implementation of the "Inception Score" (IS) for the evaluation of generative models, with a bug raised in [https://github.com/openai/improved-gan/issues/29](https://github.com/openai/improved-gan/issues/29) fixed. 5 | 6 | ## Major Dependencies 7 | - `tensorflow==1.14` or (`tensorflow==1.15` and `tensorflow-gan==1.0.0.dev0`) or (`tensorflow>=2` and `tensorflow-gan>=2.0.0`) 8 | ## Features 9 | - Fast, easy-to-use and memory-efficient, written in a way that is similar to the original implementation 10 | - No prior knowledge about Tensorflow is necessary if your are using CPUs or GPUs 11 | - Makes use of [TF-GAN](https://github.com/tensorflow/gan) 12 | - Downloads InceptionV1 automatically 13 | - Compatible with both Python 2 and Python 3 14 | 15 | ## Usage 16 | - If you are working with GPUs, use `inception_score.py`; if you are working with TPUs, use `inception_score_tpu.py` and pass a Tensorflow Session and a [TPUStrategy](https://www.tensorflow.org/api_docs/python/tf/distribute/experimental/TPUStrategy) as additional arguments. 17 | - Call `get_inception_score(images, splits=10)`, where `images` is a numpy array with values ranging from 0 to 255 and shape in the form `[N, 3, HEIGHT, WIDTH]` where `N`, `HEIGHT` and `WIDTH` can be arbitrary. `dtype` of the images is recommended to be `np.uint8` to save CPU memory. 18 | - A smaller `BATCH_SIZE` reduces GPU/TPU memory usage, but at the cost of a slight slowdown. 19 | - If you want to compute a general "Classifier Score" with probabilities `preds` from another classifier, call `preds2score(preds, splits=10)`. `preds` can be a numpy array of arbitrary shape `[N, num_classes]`. 20 | ## Examples 21 | GPU: [![Example In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1hgJJI5wuILxcHsmrkZMkHJtk6uDlKOwr?usp=sharing) 22 | 23 | TPU and TF1: [![Example In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1F0fXOKlzIkOSEAdIRa9oyacW34SUX2_v?usp=sharing) 24 | 25 | TPU and TF2: [![Example In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1Cb8erVc-v6zCG-cLfOWCIjFZPl5zQ4jl?usp=sharing) 26 | 27 | ## Links 28 | - The Inception Score was proposed in the paper [Improved Techniques for Training GANs](https://arxiv.org/abs/1606.03498) 29 | - Code for the [Fréchet Inception Distance](https://github.com/tsc2017/Frechet-Inception-Distance) 30 | -------------------------------------------------------------------------------- /TF1/inception_score_tf1.py: -------------------------------------------------------------------------------- 1 | ''' 2 | From https://github.com/tsc2017/Inception-Score 3 | Code derived from https://github.com/openai/improved-gan/blob/master/inception_score/model.py and https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/gan/python/eval/python/classifier_metrics_impl.py 4 | 5 | Usage: 6 | Call get_inception_score(images, splits=10) 7 | Args: 8 | images: A numpy array with values ranging from 0 to 255 and shape in the form [N, 3, HEIGHT, WIDTH] where N, HEIGHT and WIDTH can be arbitrary. A dtype of np.uint8 is recommended to save CPU memory. 9 | splits: The number of splits of the images, default is 10. 10 | Returns: 11 | Mean and standard deviation of the Inception Score across the splits. 12 | ''' 13 | 14 | import tensorflow as tf 15 | import os 16 | import functools 17 | import numpy as np 18 | import time 19 | from tensorflow.python.ops import array_ops 20 | if float('.'.join(tf.__version__.split('.')[:2])) < 1.15: 21 | tfgan = tf.contrib.gan 22 | else: 23 | import tensorflow_gan as tfgan 24 | 25 | session=tf.compat.v1.InteractiveSession() 26 | 27 | # A smaller BATCH_SIZE reduces GPU memory usage, but at the cost of a slight slowdown 28 | BATCH_SIZE = 64 29 | INCEPTION_URL = 'http://download.tensorflow.org/models/frozen_inception_v1_2015_12_05.tar.gz' 30 | INCEPTION_FROZEN_GRAPH = 'inceptionv1_for_inception_score.pb' 31 | 32 | # Run images through Inception. 33 | inception_images = tf.compat.v1.placeholder(tf.float32, [None, 3, None, None]) 34 | def inception_logits(images = inception_images, num_splits = 1): 35 | images = tf.transpose(images, [0, 2, 3, 1]) 36 | size = 299 37 | images = tf.compat.v1.image.resize_bilinear(images, [size, size]) 38 | generated_images_list = array_ops.split(images, num_or_size_splits = num_splits) 39 | logits = tf.map_fn( 40 | fn = functools.partial( 41 | tfgan.eval.run_inception, 42 | default_graph_def_fn = functools.partial( 43 | tfgan.eval.get_graph_def_from_url_tarball, 44 | INCEPTION_URL, 45 | INCEPTION_FROZEN_GRAPH, 46 | os.path.basename(INCEPTION_URL)), 47 | output_tensor = 'logits:0'), 48 | elems = array_ops.stack(generated_images_list), 49 | parallel_iterations = 8, 50 | back_prop = False, 51 | swap_memory = True, 52 | name = 'RunClassifier') 53 | logits = array_ops.concat(array_ops.unstack(logits), 0) 54 | return logits 55 | 56 | logits=inception_logits() 57 | 58 | def get_inception_probs(inps): 59 | n_batches = int(np.ceil(float(inps.shape[0]) / BATCH_SIZE)) 60 | preds = np.zeros([inps.shape[0], 1000], dtype = np.float32) 61 | for i in range(n_batches): 62 | inp = inps[i * BATCH_SIZE:(i + 1) * BATCH_SIZE] / 255. * 2 - 1 63 | preds[i * BATCH_SIZE : i * BATCH_SIZE + min(BATCH_SIZE, inp.shape[0])] = session.run(logits,{inception_images: inp})[:, :1000] 64 | preds = np.exp(preds) / np.sum(np.exp(preds), 1, keepdims=True) 65 | return preds 66 | 67 | def preds2score(preds, splits=10): 68 | scores = [] 69 | for i in range(splits): 70 | part = preds[(i * preds.shape[0] // splits):((i + 1) * preds.shape[0] // splits), :] 71 | kl = part * (np.log(part) - np.log(np.expand_dims(np.mean(part, 0), 0))) 72 | kl = np.mean(np.sum(kl, 1)) 73 | scores.append(np.exp(kl)) 74 | return np.mean(scores), np.std(scores) 75 | 76 | def get_inception_score(images, splits=10): 77 | assert(type(images) == np.ndarray) 78 | assert(len(images.shape) == 4) 79 | assert(images.shape[1] == 3) 80 | assert(np.min(images[0]) >= 0 and np.max(images[0]) > 10), 'Image values should be in the range [0, 255]' 81 | print('Calculating Inception Score with %i images in %i splits' % (images.shape[0], splits)) 82 | start_time=time.time() 83 | preds = get_inception_probs(images) 84 | mean, std = preds2score(preds, splits) 85 | print('Inception Score calculation time: %f s' % (time.time() - start_time)) 86 | return mean, std # Reference values: 11.38 for 50000 CIFAR-10 training set images, or mean=11.31, std=0.10 if in 10 splits. 87 | -------------------------------------------------------------------------------- /TF1/inception_score_tpu_tf1.py: -------------------------------------------------------------------------------- 1 | ''' 2 | From https://github.com/tsc2017/Inception-Score 3 | Code derived from https://github.com/openai/improved-gan/blob/master/inception_score/model.py and https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/gan/python/eval/python/classifier_metrics_impl.py 4 | 5 | Usage: 6 | Call get_inception_score(images, splits=10, session=YOUR_SESSION, strategy=YOUR_TPUSTRATEGY) 7 | Args: 8 | images: A numpy array with values ranging from 0 to 255 and shape in the form [N, 3, HEIGHT, WIDTH] where N, HEIGHT and WIDTH can be arbitrary. A dtype of np.uint8 is recommended to save CPU memory. 9 | splits: The number of splits of the images, default is 10. 10 | Returns: 11 | Mean and standard deviation of the Inception Score across the splits. 12 | ''' 13 | 14 | import tensorflow as tf 15 | import os 16 | import functools 17 | import numpy as np 18 | import time 19 | from tensorflow.python.ops import array_ops 20 | tfgan = tf.contrib.gan 21 | if float('.'.join(tf.__version__.split('.')[:2])) < 1.15: 22 | tfgan = tf.contrib.gan 23 | else: 24 | import tensorflow_gan as tfgan 25 | # A smaller BATCH_SIZE reduces GPU memory usage, but at the cost of a slight slowdown 26 | BATCH_SIZE = 8 27 | INCEPTION_URL = 'http://download.tensorflow.org/models/frozen_inception_v1_2015_12_05_v4.tar.gz' 28 | INCEPTION_FROZEN_GRAPH = 'inceptionv1_for_inception_score_tpu.pb' 29 | FIRST_RUN=[1] 30 | # Run images through Inception. 31 | inception_images = [None] 32 | def inception_logits(num_splits = 1): 33 | images = inception_images[0] 34 | images = tf.transpose(images, [0, 2, 3, 1]) 35 | size = 299 36 | images = tf.compat.v1.image.resize_bilinear(images, [size, size]) 37 | generated_images_list = array_ops.split(images, num_or_size_splits = num_splits) 38 | logits = tf.map_fn( 39 | fn = functools.partial( 40 | tfgan.eval.run_inception, 41 | default_graph_def_fn = functools.partial( 42 | tfgan.eval.get_graph_def_from_url_tarball, 43 | INCEPTION_URL, 44 | INCEPTION_FROZEN_GRAPH, 45 | os.path.basename(INCEPTION_URL)), 46 | output_tensor = 'logits:0'), 47 | elems = array_ops.stack(generated_images_list), 48 | parallel_iterations = 8, 49 | back_prop = False, 50 | swap_memory = True, 51 | name = 'RunClassifier') 52 | logits = array_ops.concat(array_ops.unstack(logits), 0) 53 | return logits 54 | 55 | logits=[None] 56 | def get_inception_probs(inps, session=None, strategy=None): 57 | if FIRST_RUN[0]: 58 | with session.graph.as_default(): 59 | inception_images[0]=tf.compat.v1.placeholder(tf.float32, [None, 3, None, None], name = 'inception_images') 60 | print('Running Inception for the first time, compiling...') 61 | logits[0]=strategy.experimental_run(inception_logits).values[0] 62 | FIRST_RUN[0]=0 63 | n_batches = int(np.ceil(float(inps.shape[0]) / BATCH_SIZE)) 64 | preds = np.zeros([inps.shape[0], 1000], dtype = np.float32) 65 | for i in range(n_batches): 66 | inp = inps[i * BATCH_SIZE:(i + 1) * BATCH_SIZE] / 255. * 2 - 1 67 | preds[i * BATCH_SIZE : i * BATCH_SIZE + min(BATCH_SIZE, inp.shape[0])] = session.run(logits[0],{inception_images[0]: inp})[:, :1000] 68 | preds = np.exp(preds) / np.sum(np.exp(preds), 1, keepdims=True) 69 | return preds 70 | 71 | def preds2score(preds, splits=10): 72 | scores = [] 73 | for i in range(splits): 74 | part = preds[(i * preds.shape[0] // splits):((i + 1) * preds.shape[0] // splits), :] 75 | kl = part * (np.log(part) - np.log(np.expand_dims(np.mean(part, 0), 0))) 76 | kl = np.mean(np.sum(kl, 1)) 77 | scores.append(np.exp(kl)) 78 | return np.mean(scores), np.std(scores) 79 | 80 | def get_inception_score(images, splits=10, session=None, strategy=None): 81 | assert(type(images) == np.ndarray) 82 | assert(len(images.shape) == 4) 83 | assert(images.shape[1] == 3) 84 | assert(np.min(images[0]) >= 0 and np.max(images[0]) > 10), 'Image values should be in the range [0, 255]' 85 | print('Calculating Inception Score with %i images in %i splits' % (images.shape[0], splits)) 86 | start_time=time.time() 87 | preds = get_inception_probs(images, session, strategy) 88 | mean, std = preds2score(preds, splits) 89 | print('Inception Score calculation time: %f s' % (time.time() - start_time)) 90 | return mean, std # Reference values: 11.38 for 50000 CIFAR-10 training set images, or mean=11.31, std=0.10 if in 10 splits. 91 | -------------------------------------------------------------------------------- /inception_score.py: -------------------------------------------------------------------------------- 1 | ''' 2 | From https://github.com/tsc2017/Inception-Score 3 | Code derived from https://github.com/openai/improved-gan/blob/master/inception_score/model.py and https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/gan/python/eval/python/classifier_metrics_impl.py 4 | 5 | Usage: 6 | Call get_inception_score(images, splits=10) 7 | Args: 8 | images: A numpy array with values ranging from 0 to 255 and shape in the form [N, 3, HEIGHT, WIDTH] where N, HEIGHT and WIDTH can be arbitrary. A dtype of np.uint8 is recommended to save CPU memory. 9 | splits: The number of splits of the images, default is 10. 10 | Returns: 11 | Mean and standard deviation of the Inception Score across the splits. 12 | ''' 13 | 14 | import tensorflow.compat.v1 as tf 15 | tf.disable_v2_behavior() 16 | import tensorflow_gan as tfgan 17 | import os 18 | import functools 19 | import numpy as np 20 | import time 21 | from tensorflow.python.ops import array_ops 22 | # pip install tensorflow-gan 23 | import tensorflow_gan as tfgan 24 | session=tf.compat.v1.InteractiveSession() 25 | # A smaller BATCH_SIZE reduces GPU memory usage, but at the cost of a slight slowdown 26 | BATCH_SIZE = 64 27 | INCEPTION_TFHUB = 'https://tfhub.dev/tensorflow/tfgan/eval/inception/1' 28 | INCEPTION_OUTPUT = 'logits' 29 | 30 | # Run images through Inception. 31 | inception_images = tf.compat.v1.placeholder(tf.float32, [None, 3, None, None], name = 'inception_images') 32 | def inception_logits(images = inception_images, num_splits = 1): 33 | images = tf.transpose(images, [0, 2, 3, 1]) 34 | size = 299 35 | images = tf.compat.v1.image.resize_bilinear(images, [size, size]) 36 | generated_images_list = array_ops.split(images, num_or_size_splits = num_splits) 37 | logits = tf.map_fn( 38 | fn = tfgan.eval.classifier_fn_from_tfhub(INCEPTION_TFHUB, INCEPTION_OUTPUT, True), 39 | elems = array_ops.stack(generated_images_list), 40 | parallel_iterations = 8, 41 | back_prop = False, 42 | swap_memory = True, 43 | name = 'RunClassifier') 44 | logits = array_ops.concat(array_ops.unstack(logits), 0) 45 | return logits 46 | 47 | logits=inception_logits() 48 | 49 | def get_inception_probs(inps): 50 | session=tf.get_default_session() 51 | n_batches = int(np.ceil(float(inps.shape[0]) / BATCH_SIZE)) 52 | preds = np.zeros([inps.shape[0], 1000], dtype = np.float32) 53 | for i in range(n_batches): 54 | inp = inps[i * BATCH_SIZE:(i + 1) * BATCH_SIZE] / 255. * 2 - 1 55 | preds[i * BATCH_SIZE : i * BATCH_SIZE + min(BATCH_SIZE, inp.shape[0])] = session.run(logits,{inception_images: inp})[:, :1000] 56 | preds = np.exp(preds) / np.sum(np.exp(preds), 1, keepdims=True) 57 | return preds 58 | 59 | def preds2score(preds, splits=10): 60 | scores = [] 61 | for i in range(splits): 62 | part = preds[(i * preds.shape[0] // splits):((i + 1) * preds.shape[0] // splits), :] 63 | kl = part * (np.log(part) - np.log(np.expand_dims(np.mean(part, 0), 0))) 64 | kl = np.mean(np.sum(kl, 1)) 65 | scores.append(np.exp(kl)) 66 | return np.mean(scores), np.std(scores) 67 | 68 | def get_inception_score(images, splits=10): 69 | assert(type(images) == np.ndarray) 70 | assert(len(images.shape) == 4) 71 | assert(images.shape[1] == 3) 72 | assert(np.min(images[0]) >= 0 and np.max(images[0]) > 10), 'Image values should be in the range [0, 255]' 73 | print('Calculating Inception Score with %i images in %i splits' % (images.shape[0], splits)) 74 | start_time=time.time() 75 | preds = get_inception_probs(images) 76 | mean, std = preds2score(preds, splits) 77 | print('Inception Score calculation time: %f s' % (time.time() - start_time)) 78 | return mean, std # Reference values: 11.38 for 50000 CIFAR-10 training set images, or mean=11.31, std=0.10 if in 10 splits. 79 | -------------------------------------------------------------------------------- /inception_score_tpu.py: -------------------------------------------------------------------------------- 1 | ''' 2 | From https://github.com/tsc2017/Inception-Score 3 | Code derived from https://github.com/openai/improved-gan/blob/master/inception_score/model.py and https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/gan/python/eval/python/classifier_metrics_impl.py 4 | 5 | Usage: 6 | Call get_inception_score(images, splits=10) 7 | Args: 8 | images: A numpy array with values ranging from 0 to 255 and shape in the form [N, 3, HEIGHT, WIDTH] where N, HEIGHT and WIDTH can be arbitrary. A dtype of np.uint8 is recommended to save CPU memory. 9 | splits: The number of splits of the images, default is 10. 10 | Returns: 11 | Mean and standard deviation of the Inception Score across the splits. 12 | ''' 13 | 14 | import tensorflow.compat.v1 as tf 15 | tf.disable_v2_behavior() 16 | import os 17 | import functools 18 | import numpy as np 19 | import time 20 | from tensorflow.python.ops import array_ops 21 | # pip install tensorflow-gan 22 | import tensorflow_gan as tfgan 23 | # A smaller BATCH_SIZE reduces TPU memory usage, but at the cost of a slight slowdown 24 | BATCH_SIZE = 1000 25 | INCEPTION_TFHUB = 'https://tfhub.dev/tensorflow/tfgan/eval/inception/1' 26 | INCEPTION_OUTPUT = 'logits' 27 | FIRST_RUN=[True] 28 | # Run images through Inception. 29 | inception_images =[None] 30 | image_iterator_init=[None] 31 | inception_size = 299 32 | input_size=[32] 33 | def inception_logits(images): 34 | images = tf.transpose(images, [0, 2, 3, 1]) 35 | images = tf.compat.v1.image.resize_bilinear(images, [inception_size, inception_size]) 36 | generated_images_list = array_ops.split(images, num_or_size_splits = 1) 37 | logits = tf.map_fn( 38 | fn = tfgan.eval.classifier_fn_from_tfhub(INCEPTION_TFHUB, INCEPTION_OUTPUT, True), 39 | elems = array_ops.stack(generated_images_list), 40 | parallel_iterations = 1, 41 | back_prop = False, 42 | swap_memory = True, 43 | name = 'RunClassifier') 44 | logits = array_ops.concat(array_ops.unstack(logits), 0) 45 | return logits 46 | 47 | logits=[None] 48 | def get_inception_probs(inps, session=None, strategy=None): 49 | if FIRST_RUN[0]: 50 | print('Running Inception for the first time, compiling...') 51 | with session.graph.as_default(): 52 | inception_images[0]=tf.compat.v1.placeholder(tf.float32, [BATCH_SIZE, 3, input_size[0], input_size[0]], name = 'inception_images') 53 | image_dataset = tf.data.Dataset.from_tensor_slices((inception_images[0])).batch(BATCH_SIZE, drop_remainder=True) 54 | image_iterator = strategy.make_dataset_iterator(image_dataset) 55 | image_iterator_init[0] = image_iterator.initialize() 56 | logits[0]=tf.concat(strategy.experimental_run(inception_logits, image_iterator).values,0) 57 | FIRST_RUN[0]=False 58 | n_batches = int(np.ceil(float(inps.shape[0]) / BATCH_SIZE)) 59 | preds = np.zeros([inps.shape[0], 1000], dtype = np.float32) 60 | for i in range(n_batches): 61 | inp = inps[i * BATCH_SIZE:(i + 1) * BATCH_SIZE] / 255. * 2 - 1 62 | session.run(image_iterator_init[0],{inception_images[0]: inp}) 63 | preds[i * BATCH_SIZE : i * BATCH_SIZE + min(BATCH_SIZE, inp.shape[0])] = session.run(logits[0])[:, :1000] 64 | preds = np.exp(preds) / np.sum(np.exp(preds), 1, keepdims=True) 65 | return preds 66 | 67 | def preds2score(preds, splits=10): 68 | scores = [] 69 | for i in range(splits): 70 | part = preds[(i * preds.shape[0] // splits):((i + 1) * preds.shape[0] // splits), :] 71 | kl = part * (np.log(part) - np.log(np.expand_dims(np.mean(part, 0), 0))) 72 | kl = np.mean(np.sum(kl, 1)) 73 | scores.append(np.exp(kl)) 74 | return np.mean(scores), np.std(scores) 75 | 76 | def get_inception_score(images, splits=10, session=None, strategy=None): 77 | assert(type(images) == np.ndarray) 78 | assert(len(images.shape) == 4) 79 | assert(images.shape[1] == 3) 80 | assert(np.min(images[0]) >= 0 and np.max(images[0]) > 10), 'Image values should be in the range [0, 255]' 81 | input_size[0]=images.shape[3] 82 | print('Calculating Inception Score with %i images in %i splits' % (images.shape[0], splits)) 83 | start_time=time.time() 84 | preds = get_inception_probs(images, session, strategy) 85 | mean, std = preds2score(preds, splits) 86 | print('Inception Score calculation time: %f s' % (time.time() - start_time)) 87 | return mean, std # Reference values: 11.38 for 50000 CIFAR-10 training set images, or mean=11.31, std=0.10 if in 10 splits. 88 | --------------------------------------------------------------------------------