├── .gitignore ├── main.py ├── README.md ├── util.py ├── models.py ├── data.py ├── training.py └── LICENSE /.gitignore: -------------------------------------------------------------------------------- 1 | .vscode/* 2 | data/* 3 | __pycache__/* -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | from training import pretrain, train_discriminator, train 2 | from data import sample_groups 3 | import torch 4 | 5 | n_target_samples = 7 6 | plot_accuracy = True 7 | 8 | if __name__ == '__main__': 9 | cuda = torch.cuda.is_available() 10 | 11 | groups, data = sample_groups(n_target_samples=n_target_samples) 12 | 13 | encoder, classifier = pretrain(data, cuda=cuda, epochs=20) 14 | 15 | discriminator = train_discriminator(encoder, groups, n_target_samples=n_target_samples, epochs=50, cuda=cuda) 16 | 17 | train(encoder, discriminator, classifier, data, groups, n_target_samples=n_target_samples, cuda=cuda, epochs=150, plot_accuracy=plot_accuracy) -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # FADA-pytorch 2 | 3 | PyTorch implementation of Few-Shot Domain Adaptation (https://arxiv.org/abs/1711.02536) 4 | 5 | Disclaimer: I'm not in any way affiliated with the authors. There might be errors in the implementation. 6 | 7 | ### Todo 8 | 9 | * [x] Main algorithm implemented 10 | * [ ] Fix TODOs in code 11 | * [ ] Comprehensive tests 12 | * [ ] More datasets to test on (currently only MNIST -> USPS) 13 | * [ ] The authors don't give the value of γ they used, one should be found via cross-validation 14 | 15 | ### Usage 16 | 17 | Use `python3 main.py` to run the MNIST -> SVHN training and print accuracy at each epoch. 18 | 19 | ### Tests 20 | 21 | Preliminary results show ~46% accuracy on test set with `n=7` samples per class from target domain. This approximately matches what is reported in the paper (47.0%) -------------------------------------------------------------------------------- /util.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch.autograd import Variable 3 | 4 | def accuracy(y_pred, y): 5 | return (torch.max(y_pred, 1)[1] == y).float().mean().data[0] 6 | 7 | ''' Returns the mean accuracy on the test set, given a model ''' 8 | def eval_on_test(test_dataloader, model_fn): 9 | acc = 0 10 | for x, y in test_dataloader: 11 | x, y = Variable(x), Variable(y) 12 | 13 | if torch.cuda.is_available(): 14 | x, y = x.cuda(), y.cuda() 15 | acc += accuracy(model_fn(x), y) 16 | return round(acc / float(len(test_dataloader)), 3) 17 | 18 | ''' Converts a list of (x, x) pairs into two Tensors ''' 19 | def into_tensor(data, into_vars=True): 20 | X1 = [x[0] for x in data] 21 | X2 = [x[1] for x in data] 22 | if torch.cuda.is_available(): 23 | return Variable(torch.stack(X1)).cuda(), Variable(torch.stack(X2)).cuda() 24 | return Variable(torch.stack(X1)), Variable(torch.stack(X2)) -------------------------------------------------------------------------------- /models.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn.functional as F 3 | from torch import nn 4 | 5 | ''' 6 | Domain-Class Discriminator (see (3) in the paper) 7 | Takes in the concatenated latent representation of two samples from 8 | G1, G2, G3 or G4, and outputs a class label, one of [0, 1, 2, 3] 9 | ''' 10 | class DCD(nn.Module): 11 | def __init__(self, H=64, D_in=784): 12 | super(DCD, self).__init__() 13 | self.fc1 = nn.Linear(D_in, H) 14 | self.fc2 = nn.Linear(H, H) 15 | self.out = nn.Linear(H, 4) 16 | 17 | def forward(self, x): 18 | out = F.relu(self.fc1(x)) 19 | out = self.fc2(out) 20 | return F.softmax(self.out(out), dim=1) 21 | 22 | ''' Called h in the paper. Gives class predictions based on the latent representation ''' 23 | class Classifier(nn.Module): 24 | def __init__(self, D_in=64): 25 | super(Classifier, self).__init__() 26 | self.out = nn.Linear(D_in, 10) 27 | 28 | def forward(self, x): 29 | return F.softmax(self.out(x), dim=1) 30 | 31 | ''' 32 | Creates latent representation based on data. Called g in the paper. 33 | Like in the paper, we use g_s = g_t = g, that is, we share weights between target 34 | and source representations. 35 | 36 | Model is as specified in section 4.1. See https://github.com/kuangliu/pytorch-cifar/blob/master/models/lenet.py 37 | ''' 38 | class Encoder(nn.Module): 39 | def __init__(self): 40 | super(Encoder, self).__init__() 41 | 42 | self.conv1 = nn.Conv2d(1, 6, 5) 43 | self.conv2 = nn.Conv2d(6, 16, 5) 44 | self.fc1 = nn.Linear(256, 120) 45 | self.fc2 = nn.Linear(120, 84) 46 | self.fc3 = nn.Linear(84, 64) 47 | 48 | def forward(self, x): 49 | out = F.relu(self.conv1(x)) 50 | out = F.max_pool2d(out, 2) 51 | out = F.relu(self.conv2(out)) 52 | out = F.max_pool2d(out, 2) 53 | out = out.view(out.size(0), -1) 54 | out = F.relu(self.fc1(out)) 55 | out = F.relu(self.fc2(out)) 56 | out = self.fc3(out) 57 | return out -------------------------------------------------------------------------------- /data.py: -------------------------------------------------------------------------------- 1 | import torchvision 2 | import torch 3 | import numpy as np 4 | 5 | ''' Returns the MNIST dataloader ''' 6 | def mnist_dataloader(batch_size=256, train=True, cuda=False): 7 | dataset = torchvision.datasets.MNIST('./data', download=True, train=train, transform=torchvision.transforms.ToTensor()) 8 | return torch.utils.data.DataLoader(dataset, shuffle=True, batch_size=batch_size, num_workers=2, pin_memory=cuda) 9 | 10 | 11 | ''' Returns the SVHN dataloader ''' 12 | def svhn_dataloader(batch_size=256, train=True, cuda=False): 13 | 14 | transform = torchvision.transforms.Compose([ 15 | torchvision.transforms.Resize((28, 28)), 16 | torchvision.transforms.Grayscale(), 17 | torchvision.transforms.ToTensor() 18 | ]) 19 | dataset = torchvision.datasets.SVHN('./data', download=True, split=('train' if train else 'test'), transform=transform) 20 | return torch.utils.data.DataLoader(dataset, shuffle=True, batch_size=batch_size, num_workers=2, pin_memory=cuda) 21 | 22 | 23 | ''' Samples a subset from source into memory ''' 24 | def sample_data(n=2000): 25 | dataset = torchvision.datasets.MNIST('./data', download=True, train=True, transform=torchvision.transforms.ToTensor()) 26 | X = torch.FloatTensor(n, 1, 28, 28) 27 | Y = torch.LongTensor(n) 28 | 29 | inds = torch.randperm(len(dataset))[:n] 30 | for i, index in enumerate(inds): 31 | x, y = dataset[index] 32 | X[i] = x 33 | Y[i] = y 34 | return X, Y 35 | 36 | ''' Returns a subset of the target domain such that it has n_target_samples per class ''' 37 | def create_target_samples(n=1): 38 | transform = torchvision.transforms.Compose([ 39 | torchvision.transforms.Resize((28, 28)), 40 | torchvision.transforms.Grayscale(), 41 | torchvision.transforms.ToTensor() 42 | ]) 43 | 44 | dataset = torchvision.datasets.SVHN('./data', download=True, split='train', transform=transform) 45 | X, Y = [], [] 46 | classes = 10 * [n] 47 | 48 | i = 0 49 | while True: 50 | if len(X) == n*10: 51 | break 52 | x, y = dataset[i] 53 | if classes[y] > 0: 54 | X.append(x) 55 | Y.append(y) 56 | classes[y] -= 1 57 | i += 1 58 | 59 | assert(len(X) == n*10) 60 | return torch.stack(X), torch.from_numpy(np.array(Y)) 61 | 62 | ''' 63 | Samples uniformly groups G1 and G3 from D_s x D_s and groups G2 and G4 from D_s x D_t 64 | ''' 65 | def create_groups(X_s, y_s, X_t, y_t): 66 | n = X_t.shape[0] 67 | G1, G3 = [], [] 68 | 69 | # TODO optimize 70 | # Groups G1 and G3 come from the source domain 71 | for i, (x1, y1) in enumerate(zip(X_s, y_s)): 72 | for j, (x2, y2) in enumerate(zip(X_s, y_s)): 73 | if y1 == y2 and i != j and len(G1) < n: 74 | G1.append((x1, x2)) 75 | if y1 != y2 and i != j and len(G3) < n: 76 | G3.append((x1, x2)) 77 | 78 | G2, G4 = [], [] 79 | 80 | # Groups G2 and G4 are mixed from the source and target domains 81 | for i, (x1, y1) in enumerate(zip(X_s, y_s)): 82 | for j, (x2, y2) in enumerate(zip(X_t, y_t)): 83 | if y1 == y2 and i != j and len(G2) < n: 84 | G2.append((x1, x2)) 85 | if y1 != y2 and i != j and len(G4) < n: 86 | G4.append((x1, x2)) 87 | 88 | groups = [G1, G2, G3, G4] 89 | 90 | # Make sure we sampled enough samples 91 | for g in groups: 92 | assert(len(g) == n) 93 | return groups 94 | 95 | ''' Sample groups G1, G2, G3, G4 ''' 96 | def sample_groups(n_target_samples=2): 97 | X_s, y_s = sample_data() 98 | X_t, y_t = create_target_samples(n_target_samples) 99 | 100 | print("Sampling groups") 101 | return create_groups(X_s, y_s, X_t, y_t), (X_s, y_s, X_t, y_t) 102 | -------------------------------------------------------------------------------- /training.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch import nn 3 | from torch import optim 4 | from torch.autograd import Variable 5 | from data import mnist_dataloader, svhn_dataloader 6 | from models import Classifier, Encoder, DCD 7 | from util import eval_on_test, into_tensor 8 | import random 9 | import matplotlib.pyplot as plt 10 | 11 | def model_fn(encoder, classifier): 12 | return lambda x: classifier(encoder(x)) 13 | 14 | ''' Pretrain the encoder and classifier as in (a) in figure 2. ''' 15 | def pretrain(data, epochs=5, batch_size=128, cuda=False): 16 | 17 | X_s, y_s, _, _ = data 18 | 19 | test_dataloader = mnist_dataloader(train=False, cuda=cuda) 20 | 21 | classifier = Classifier() 22 | encoder = Encoder() 23 | 24 | if cuda: 25 | classifier.cuda() 26 | encoder.cuda() 27 | 28 | ''' Jointly optimize both encoder and classifier ''' 29 | optimizer = optim.Adam(list(encoder.parameters()) + list(classifier.parameters())) 30 | loss_fn = nn.CrossEntropyLoss() 31 | 32 | for e in range(epochs): 33 | 34 | for _ in range(len(X_s) // batch_size): 35 | inds = torch.randperm(len(X_s))[:batch_size] 36 | 37 | x, y = Variable(X_s[inds]), Variable(y_s[inds]) 38 | optimizer.zero_grad() 39 | 40 | if cuda: 41 | x, y = x.cuda(), y.cuda() 42 | 43 | y_pred = model_fn(encoder, classifier)(x) 44 | 45 | loss = loss_fn(y_pred, y) 46 | 47 | loss.backward() 48 | 49 | optimizer.step() 50 | 51 | print("Epoch", e, "Loss", loss.data[0], "Accuracy", eval_on_test(test_dataloader, model_fn(encoder, classifier))) 52 | 53 | return encoder, classifier 54 | 55 | ''' Train the discriminator while the encoder is frozen ''' 56 | def train_discriminator(encoder, groups, n_target_samples=2, cuda=False, epochs=20): 57 | 58 | source_loader = mnist_dataloader(train=True, cuda=cuda) 59 | target_loader = svhn_dataloader(train=True, cuda=cuda) 60 | 61 | discriminator = DCD(D_in=128) # Takes in concatenated hidden representations 62 | loss_fn = nn.CrossEntropyLoss() 63 | 64 | # Only train DCD 65 | optimizer = optim.Adam(discriminator.parameters()) 66 | 67 | # Size of group G2, the smallest one, times the amount of groups 68 | n_iters = 4 * n_target_samples 69 | 70 | if cuda: 71 | discriminator.cuda() 72 | 73 | print("Training DCD") 74 | for e in range(epochs): 75 | 76 | for _ in range(n_iters): 77 | 78 | # Sample a pair of samples from a group 79 | group = random.choice([0, 1, 2, 3]) 80 | 81 | x1, x2 = groups[group][random.randint(0, len(groups[group]) - 1)] 82 | x1, x2 = Variable(x1), Variable(x2) 83 | 84 | if cuda: 85 | x1, x2 = x1.cuda(), x2.cuda() 86 | 87 | # Optimize the DCD using sample drawn 88 | optimizer.zero_grad() 89 | 90 | # Concatenate encoded representations 91 | x_cat = torch.cat([encoder(x1.unsqueeze(0)), encoder(x2.unsqueeze(0))], 1) 92 | y_pred = discriminator(x_cat) 93 | 94 | # Label is the group 95 | y = Variable(torch.LongTensor([group])) 96 | if cuda: 97 | y = y.cuda() 98 | loss = -loss_fn(y_pred, y) 99 | 100 | loss.backward() 101 | 102 | optimizer.step() 103 | 104 | print("Epoch", e, "Loss", loss.data[0]) 105 | 106 | return discriminator 107 | 108 | ''' FADA Loss, as given by (4) in the paper. The minus sign is shifted because it seems to be wrong ''' 109 | def fada_loss(y_pred_g2, g1_true, y_pred_g4, g3_true, gamma=0.2): 110 | return -gamma * torch.mean(g1_true * torch.log(y_pred_g2) + g3_true * torch.log(y_pred_g4)) 111 | 112 | ''' Step three of the algorithm, train everything except the DCD ''' 113 | def train(encoder, discriminator, classifier, data, groups, n_target_samples=2, cuda=False, epochs=20, batch_size=256, plot_accuracy=False): 114 | 115 | # For evaluation only 116 | test_dataloader = svhn_dataloader(train=False, cuda=cuda) 117 | 118 | X_s, Y_s, X_t, Y_t = data 119 | 120 | G1, G2, G3, G4 = groups 121 | 122 | ''' Two optimizers, one for DCD (which is frozen) and one for class training ''' 123 | class_optimizer = optim.Adam(list(encoder.parameters()) + list(classifier.parameters())) 124 | dcd_optimizer = optim.Adam(encoder.parameters()) 125 | 126 | loss_fn = nn.CrossEntropyLoss() 127 | n_iters = 4 * n_target_samples 128 | 129 | if plot_accuracy: 130 | accuracies = [] 131 | for e in range(epochs): 132 | 133 | # Shuffle data at each epoch 134 | inds = torch.randperm(X_s.shape[0]) 135 | X_s, Y_s = X_s[inds], Y_s[inds] 136 | 137 | inds = torch.randperm(X_t.shape[0]) 138 | X_t, Y_t = X_t[inds], Y_t[inds] 139 | 140 | g2_one, g2_two = into_tensor(G2, into_vars=True) 141 | g4_one, g4_two = into_tensor(G4, into_vars=True) 142 | 143 | inds = torch.randperm(g2_one.shape[0]) 144 | if cuda: 145 | inds = inds.cuda() 146 | g2_one, g2_two, g4_one, g4_two = g2_one[inds], g2_two[inds], g4_one[inds], g4_two[inds] 147 | 148 | for _ in range(n_iters): 149 | 150 | class_optimizer.zero_grad() 151 | dcd_optimizer.zero_grad() 152 | 153 | # Evaluate source predictions 154 | inds = torch.randperm(X_s.shape[0])[:batch_size] 155 | x_s, y_s = Variable(X_s[inds]), Variable(Y_s[inds]) 156 | if cuda: 157 | x_s, y_s = x_s.cuda(), y_s.cuda() 158 | y_pred_s = model_fn(encoder, classifier)(x_s) 159 | 160 | # Evaluate target predictions 161 | ind = random.randint(0, X_t.shape[0] - 1) 162 | x_t, y_t = Variable(X_t[ind].unsqueeze(0)), Variable(torch.LongTensor([Y_t[ind]])) 163 | if cuda: 164 | x_t, y_t = x_t.cuda(), y_t.cuda() 165 | 166 | y_pred_t = model_fn(encoder, classifier)(x_t) 167 | 168 | # Evaluate groups 169 | 170 | x1, x2 = encoder(g2_one), encoder(g2_two) 171 | y_pred_g2 = discriminator(torch.cat([x1, x2], 1)) 172 | g1_true = 1 173 | 174 | x1, x2 = encoder(g4_one), encoder(g4_two) 175 | y_pred_g4 = discriminator(torch.cat([x1, x2], 1)) 176 | g3_true = 3 177 | 178 | # Evaluate loss 179 | # This is the full loss given by (5) in the paper 180 | loss = fada_loss(y_pred_g2, g1_true, y_pred_g4, g3_true) + loss_fn(y_pred_s, y_s) + loss_fn(y_pred_t, y_t) 181 | 182 | loss.backward() 183 | 184 | class_optimizer.step() 185 | acc = eval_on_test(test_dataloader, model_fn(encoder, classifier)) 186 | print("Epoch", e, "Loss", loss.data[0], "Accuracy", acc) 187 | 188 | if plot_accuracy: 189 | accuracies.append(acc) 190 | 191 | if plot_accuracy: 192 | plt.plot(range(len(accuracies)), accuracies) 193 | plt.title("SVHN test accuracy") 194 | plt.xlabel("Epoch") 195 | plt.ylabel("Accuracy") 196 | plt.show() 197 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | --------------------------------------------------------------------------------