├── generative-tf ├── __init__.py ├── models │ ├── __init__.py │ ├── initialization.py │ └── variational_autoencoder.py └── train_mnist_vae.py ├── README.md ├── .gitignore └── LICENSE /generative-tf/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /generative-tf/models/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # generative-tf 2 | Generative Models with TensorFlow 3 | 4 | 5 | #### Variational Autoencoder 6 | 7 | > Kingma, D. P. & Welling, M. Auto-Encoding Variational Bayes 8 | 9 | ```python 10 | python generative-tf/train_mnist_vae.py \ 11 | --epochs 5000 \ 12 | --print-every-N 100 \ 13 | --latent-dim 10 \ 14 | --hidden-dim 500 \ 15 | --batch-size 100 \ 16 | --optimizer rmsprop 17 | ``` 18 | 19 | > Burda, Y., Grosse, R. & Salakhutdinov, R. Importance Weighted Autoencoders. 1–12 (2015). at 20 | 21 | Add importance weighting with: 22 | 23 | ```python 24 | --importance-weighting 25 | ``` -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | 5 | # C extensions 6 | *.so 7 | 8 | # Distribution / packaging 9 | .Python 10 | env/ 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | *.egg-info/ 23 | .installed.cfg 24 | *.egg 25 | 26 | # PyInstaller 27 | # Usually these files are written by a python script from a template 28 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 29 | *.manifest 30 | *.spec 31 | 32 | # Installer logs 33 | pip-log.txt 34 | pip-delete-this-directory.txt 35 | 36 | # Unit test / coverage reports 37 | htmlcov/ 38 | .tox/ 39 | .coverage 40 | .coverage.* 41 | .cache 42 | nosetests.xml 43 | coverage.xml 44 | *,cover 45 | 46 | # Translations 47 | *.mo 48 | *.pot 49 | 50 | # Django stuff: 51 | *.log 52 | 53 | # Sphinx documentation 54 | docs/_build/ 55 | 56 | # PyBuilder 57 | target/ 58 | 59 | MNIST_data/ 60 | -------------------------------------------------------------------------------- /generative-tf/models/initialization.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | import numpy as np 3 | 4 | def xavier_glorot_initialization(in_dim, out_dim, distribution='normal'): 5 | """ 6 | Xavier Glorot and Yoshua Bengio (2010) 7 | Understanding the difficulty of training deep feedforward neural networks. 8 | International Conference on Artificial Intelligence and Statistics. 9 | """ 10 | 11 | if distribution == 'uniform': 12 | extreme = np.sqrt(12.0 / (in_dim + out_dim)) 13 | return tf.random_uniform( 14 | (in_dim, out_dim), minval=-extreme, maxval=extreme, dtype=tf.float32) 15 | elif distribution == 'normal' or distribution == 'gaussian': 16 | stddev = np.sqrt(2.0 / (in_dim + out_dim)) 17 | return tf.random_normal( 18 | (in_dim, out_dim), stddev=stddev, dtype=tf.float32) 19 | 20 | 21 | def he_initialization(in_dim, out_dim, activation, alpha=None): 22 | """ 23 | Kaiming He et al. (2015) 24 | Delving deep into rectifiers: Surpassing human-level performance on imagenet classification. 25 | arXiv preprint arXiv:1502.01852. 26 | """ 27 | 28 | gain = None 29 | if activation == 'linear' or activation == 'sigmoid': 30 | gain = 1.0 31 | elif activation == 'relu': 32 | gain = np.sqrt(2.0) 33 | elif activation == 'leaky_relu' and alpha is not None: 34 | gain = np.sqrt(2.0 / (1 + alpha**2)) 35 | 36 | if gain is None: 37 | raise ValueError("{} is an supported activation for He initialization".format(activation)) 38 | 39 | stddev = gain * np.sqrt(1.0 / in_dim) 40 | return tf.random_normal( 41 | (in_dim, out_dim), stddev=stddev, dtype=tf.float32) 42 | -------------------------------------------------------------------------------- /generative-tf/train_mnist_vae.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import tensorflow as tf 3 | from tensorflow.examples.tutorials.mnist import input_data 4 | import time 5 | 6 | from models.variational_autoencoder import VariationalAutoencoder 7 | 8 | 9 | mnist = input_data.read_data_sets("MNIST_data/", one_hot=True) 10 | 11 | def train_test_mnist_vae(epochs, 12 | print_n, 13 | hidden_dim=10, 14 | latent_dim=10, 15 | batch_size=100, 16 | optimizer='adam', 17 | importance_weighting=False, 18 | test_full=False 19 | ): 20 | 21 | input_dim = 784 22 | 23 | vae = VariationalAutoencoder( 24 | input_dim, 25 | latent_dim=latent_dim, 26 | hidden_dim=hidden_dim, 27 | batch_size=batch_size, 28 | ) 29 | with vae.graph.as_default(): 30 | if optimizer == 'adam': 31 | opt = tf.train.AdamOptimizer(1e-4).minimize(-vae._evidence_lower_bound(importance_weighting=importance_weighting)) 32 | elif optimizer == 'rmsprop': 33 | opt = tf.train.RMSPropOptimizer(learning_rate=0.001, decay=0.9).minimize(-vae._evidence_lower_bound(importance_weighting=importance_weighting)) 34 | else: 35 | raise ValueError("Optimizer '{}' is not supported".format(optimizer)) 36 | 37 | with tf.Session(graph=vae.graph) as sess: 38 | init = tf.initialize_all_variables() 39 | sess.run(init) 40 | start_time = time.time() 41 | current_start_time = start_time 42 | 43 | for i in range(epochs): 44 | batch_xs, batch_ys = mnist.train.next_batch(batch_size) 45 | sess.run(opt, feed_dict={vae.x: batch_xs}) 46 | if i % print_n == 0: 47 | elbo = sess.run(vae._evidence_lower_bound(), feed_dict={vae.x: batch_xs}) 48 | if test_full: 49 | test_elbo = 0 50 | n_test = mnist.test.images.shape[0] 51 | for i in xrange(n_test / batch_size): 52 | test_batch = mnist.test.images[i * batch_size: i * batch_size + batch_size, ::] 53 | test_elbo += sess.run(vae._evidence_lower_bound(), feed_dict={vae.x: test_batch}) 54 | else: 55 | test_elbo = sess.run(vae._evidence_lower_bound(), feed_dict={vae.x: mnist.test.images[:batch_size, ::]}) 56 | current_time = time.time() 57 | time_elapsed = current_time - current_start_time 58 | time_per_epoch = (current_time - start_time) / (i + 1) 59 | current_start_time = current_time 60 | print(""" 61 | At batch: {} 62 | batch ELBO: {} 63 | test batch ELBO {}, 64 | time elapsed {:.2}s, 65 | time per epoch {:.2}s 66 | """.format(i, elbo, test_elbo, time_elapsed, time_per_epoch) 67 | ) 68 | 69 | 70 | if __name__ == '__main__': 71 | parser = argparse.ArgumentParser() 72 | 73 | parser.add_argument('--print-every-N-iter', dest='print_n', default=25, type=int) 74 | parser.add_argument('--epochs', dest='epochs', default=1000, type=int) 75 | parser.add_argument('--hidden-dim', dest='hidden_dim', default=10, type=int) 76 | parser.add_argument('--latent-dim', dest='latent_dim', default=10, type=int) 77 | parser.add_argument('--batch-size', dest='batch_size', default=100, type=int) 78 | 79 | parser.add_argument('--optimizer', dest='optimizer', default='adam', type=str) 80 | 81 | parser.add_argument('--importance-weighting', dest='importance_weighting', action='store_true') 82 | 83 | args = parser.parse_args() 84 | train_test_mnist_vae(args.epochs, 85 | args.print_n, 86 | hidden_dim=args.hidden_dim, 87 | latent_dim=args.latent_dim, 88 | batch_size=args.batch_size, 89 | optimizer=args.optimizer, 90 | importance_weighting=args.importance_weighting 91 | ) -------------------------------------------------------------------------------- /generative-tf/models/variational_autoencoder.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | import numpy as np 3 | from initialization import xavier_glorot_initialization 4 | 5 | class VariationalAutoencoder(): 6 | def __init__(self, 7 | input_dim, 8 | latent_dim, 9 | hidden_dim=10, 10 | batch_size=100, 11 | num_layers=0, 12 | activation_func=tf.nn.relu, 13 | output_activation_func=tf.nn.sigmoid): 14 | 15 | self.graph = tf.Graph() 16 | self.activation_func = activation_func 17 | self.output_activation_func = output_activation_func 18 | self.input_dim = input_dim 19 | self.batch_size = batch_size 20 | 21 | with self.graph.as_default(): 22 | ## Input x variable 23 | self.x = tf.placeholder(tf.float32, shape=(self.batch_size, input_dim)) 24 | 25 | ## Dimension of the latent variables mu/mean and log_variance 26 | self._latent_dim = latent_dim 27 | self.batch_size = batch_size 28 | 29 | self._encoder_W = tf.Variable(xavier_glorot_initialization(input_dim, hidden_dim)) 30 | self._encoder_bias = tf.Variable(tf.zeros([hidden_dim])) 31 | 32 | self._mean_encoder = tf.Variable(xavier_glorot_initialization(hidden_dim, latent_dim)) 33 | self._mean_encoder_bias = tf.Variable(tf.zeros([latent_dim])) 34 | 35 | self._log_variance_encoder = tf.Variable(xavier_glorot_initialization(hidden_dim, latent_dim)) 36 | self._log_variance_encoder_bias = tf.Variable(tf.zeros([latent_dim])) 37 | 38 | self._decoder_W = tf.Variable(xavier_glorot_initialization(latent_dim, hidden_dim)) 39 | self._decoder_bias = tf.Variable(tf.zeros([hidden_dim])) 40 | 41 | self._mean_decoder = tf.Variable(xavier_glorot_initialization(hidden_dim, input_dim)) 42 | self._mean_decoder_bias = tf.Variable(tf.zeros([input_dim])) 43 | 44 | def _generate(self, 45 | z, 46 | activation_func=tf.nn.softplus, 47 | output_activation_func=tf.nn.sigmoid): 48 | with self.graph.as_default(): 49 | 50 | # Compute the hidden state from latent variables 51 | h = activation_func( 52 | tf.matmul(z, self._decoder_W) + self._decoder_bias 53 | ) 54 | 55 | # Compute the reconstruction from hidden state 56 | mean = output_activation_func( 57 | tf.matmul(h, self._mean_decoder) + self._mean_decoder_bias 58 | ) 59 | 60 | log_variance = None 61 | 62 | return (mean, log_variance) 63 | 64 | def _encode(self, x): 65 | """ 66 | Forward step of the variational autoencoder 67 | 68 | Takes input 69 | 70 | """ 71 | with self.graph.as_default(): 72 | h = self.activation_func( 73 | tf.matmul(x, self._encoder_W) + self._encoder_bias 74 | ) 75 | 76 | latent_mean = self.activation_func( 77 | tf.matmul(h, self._mean_encoder) + self._mean_encoder_bias 78 | ) 79 | 80 | latent_log_variance = self.activation_func( 81 | tf.matmul(h, self._log_variance_encoder) + self._log_variance_encoder_bias 82 | ) 83 | 84 | return (latent_mean, latent_log_variance) 85 | 86 | def _evidence_lower_bound(self, 87 | monte_carlo_samples=5, 88 | importance_weighting=False, 89 | tol=1e-5): 90 | """ 91 | Variational objective function 92 | 93 | ELBO = E(log joint log-likelihood) - E(log q) 94 | = MC estimate of log joint - Entropy(q) 95 | 96 | """ 97 | 98 | with self.graph.as_default(): 99 | x_resampled = tf.tile(self.x, tf.constant([monte_carlo_samples, 1])) 100 | 101 | # Forward pass of data into latent space 102 | mean_encoder, log_variance_encoder = self._encode(x_resampled) 103 | 104 | random_noise = tf.random_normal( 105 | (self.batch_size * monte_carlo_samples, self._latent_dim), 0, 1, dtype=tf.float32) 106 | 107 | # Reparameterization trick of re-scaling/transforming random error 108 | std_dev = tf.sqrt(tf.exp(log_variance_encoder)) 109 | z = mean_encoder + std_dev * random_noise 110 | 111 | # Reconstruction/decoding of latent space 112 | mean_decoder, _ = self._generate(z) 113 | 114 | # Bernoulli log-likelihood reconstruction 115 | # TODO: other distributon types 116 | def bernoulli_log_joint(x): 117 | return tf.reduce_sum( 118 | (x * tf.log(tol + mean_decoder)) 119 | + ((1 - x) * tf.log(tol + 1 - mean_decoder)), 120 | 1) 121 | 122 | log2pi = tf.log(2.0 * np.pi) 123 | 124 | def gaussian_likelihood(data, mean, log_variance): 125 | """Log-likelihood of data given ~ N(mean, exp(log_variance)) 126 | 127 | Parameters 128 | ---------- 129 | data : 130 | Samples from Gaussian centered at mean 131 | mean : 132 | Mean of the Gaussian distribution 133 | log_variance : 134 | Log variance of the Gaussian distribution 135 | 136 | Returns 137 | ------- 138 | log_likelihood : float 139 | 140 | """ 141 | 142 | num_components = data.get_shape().as_list()[1] 143 | variance = tf.exp(log_variance) 144 | log_likelihood = ( 145 | -(log2pi * (num_components / 2.0)) 146 | - tf.reduce_sum( 147 | (tf.square(data - mean) / (2 * variance)) + (log_variance / 2.0), 148 | 1) 149 | ) 150 | 151 | return log_likelihood 152 | 153 | def standard_gaussian_likelihood(data): 154 | """Log-likelihood of data given ~ N(0, 1) 155 | 156 | Parameters 157 | ---------- 158 | data : 159 | Samples from Guassian centered at 0 160 | 161 | Returns 162 | ------- 163 | log_likelihood : float 164 | 165 | """ 166 | 167 | num_components = data.get_shape().as_list()[1] 168 | log_likelihood = ( 169 | -(log2pi * (num_components / 2.0)) 170 | - tf.reduce_sum(tf.square(data) / 2.0, 1) 171 | ) 172 | 173 | return log_likelihood 174 | 175 | log_p_given_z = bernoulli_log_joint(x_resampled) 176 | 177 | if importance_weighting: 178 | log_q_z = gaussian_likelihood(z, mean_encoder, log_variance_encoder) 179 | log_p_z = standard_gaussian_likelihood(z) 180 | 181 | regularization_term = log_p_z - log_q_z 182 | else: 183 | # Analytic solution to KL(q_z | p_z) 184 | p_z_q_z_kl_divergence = \ 185 | -0.5 * tf.reduce_sum(1 186 | + log_variance_encoder 187 | - tf.square(mean_encoder) 188 | - tf.exp(log_variance_encoder), 1) 189 | 190 | regularization_term = -p_z_q_z_kl_divergence 191 | 192 | log_p_given_z_mc = tf.reshape(log_p_given_z, 193 | [self.batch_size, monte_carlo_samples]) 194 | regularization_term_mc = tf.reshape(regularization_term, 195 | [self.batch_size, monte_carlo_samples]) 196 | 197 | log_weights = log_p_given_z_mc + regularization_term_mc 198 | 199 | if importance_weighting: 200 | # Need to compute normalization constant for weights, which is 201 | # log (sum (exp(log_weights))) 202 | # weights_iw = tf.log(tf.sum(tf.exp(log_weights))) 203 | 204 | # Instead using log-sum-exp trick 205 | wmax = tf.reduce_max(log_weights, 1, keep_dims=True) 206 | 207 | # w_i = p_x/ q_z, log_wi = log_p_joint - log_qz 208 | # log ( 1/k * sum(exp(log w_i))) 209 | weights_iw = tf.log(tf.reduce_mean(tf.exp(log_weights - wmax), 1)) 210 | objective = tf.reduce_mean(wmax) + tf.reduce_mean(weights_iw) 211 | else: 212 | objective = tf.reduce_mean(log_weights) 213 | 214 | return objective -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright {yyyy} {name of copyright owner} 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | 203 | --------------------------------------------------------------------------------