├── requirements.txt ├── generated_images ├── glowing_512_2.png ├── glowing_512_2_copy.png └── glowing_512_2_copy_2.png ├── reference_images ├── glowing_512_1.png ├── glowing_512_1_copy.png └── glowing_512_1_copy_2.png ├── distance.py ├── embedding.py ├── generate_images.py ├── main.py ├── README.md ├── io_util.py └── LICENSE /requirements.txt: -------------------------------------------------------------------------------- 1 | transformers 2 | accelerate 3 | Pillow 4 | tqdm 5 | numpy 6 | absl-py -------------------------------------------------------------------------------- /generated_images/glowing_512_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sayakpaul/cmmd-pytorch/main/generated_images/glowing_512_2.png -------------------------------------------------------------------------------- /reference_images/glowing_512_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sayakpaul/cmmd-pytorch/main/reference_images/glowing_512_1.png -------------------------------------------------------------------------------- /generated_images/glowing_512_2_copy.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sayakpaul/cmmd-pytorch/main/generated_images/glowing_512_2_copy.png -------------------------------------------------------------------------------- /reference_images/glowing_512_1_copy.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sayakpaul/cmmd-pytorch/main/reference_images/glowing_512_1_copy.png -------------------------------------------------------------------------------- /generated_images/glowing_512_2_copy_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sayakpaul/cmmd-pytorch/main/generated_images/glowing_512_2_copy_2.png -------------------------------------------------------------------------------- /reference_images/glowing_512_1_copy_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sayakpaul/cmmd-pytorch/main/reference_images/glowing_512_1_copy_2.png -------------------------------------------------------------------------------- /distance.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | # Copyright 2024 The Google Research Authors. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | """Memory-efficient MMD implementation in JAX.""" 17 | 18 | import torch 19 | 20 | # The bandwidth parameter for the Gaussian RBF kernel. See the paper for more 21 | # details. 22 | _SIGMA = 10 23 | # The following is used to make the metric more human readable. See the paper 24 | # for more details. 25 | _SCALE = 1000 26 | 27 | 28 | def mmd(x, y): 29 | """Memory-efficient MMD implementation in JAX. 30 | 31 | This implements the minimum-variance/biased version of the estimator described 32 | in Eq.(5) of 33 | https://jmlr.csail.mit.edu/papers/volume13/gretton12a/gretton12a.pdf. 34 | As described in Lemma 6's proof in that paper, the unbiased estimate and the 35 | minimum-variance estimate for MMD are almost identical. 36 | 37 | Note that the first invocation of this function will be considerably slow due 38 | to JAX JIT compilation. 39 | 40 | Args: 41 | x: The first set of embeddings of shape (n, embedding_dim). 42 | y: The second set of embeddings of shape (n, embedding_dim). 43 | 44 | Returns: 45 | The MMD distance between x and y embedding sets. 46 | """ 47 | x = torch.from_numpy(x) 48 | y = torch.from_numpy(y) 49 | 50 | x_sqnorms = torch.diag(torch.matmul(x, x.T)) 51 | y_sqnorms = torch.diag(torch.matmul(y, y.T)) 52 | 53 | gamma = 1 / (2 * _SIGMA**2) 54 | k_xx = torch.mean( 55 | torch.exp(-gamma * (-2 * torch.matmul(x, x.T) + torch.unsqueeze(x_sqnorms, 1) + torch.unsqueeze(x_sqnorms, 0))) 56 | ) 57 | k_xy = torch.mean( 58 | torch.exp(-gamma * (-2 * torch.matmul(x, y.T) + torch.unsqueeze(x_sqnorms, 1) + torch.unsqueeze(y_sqnorms, 0))) 59 | ) 60 | k_yy = torch.mean( 61 | torch.exp(-gamma * (-2 * torch.matmul(y, y.T) + torch.unsqueeze(y_sqnorms, 1) + torch.unsqueeze(y_sqnorms, 0))) 62 | ) 63 | 64 | return _SCALE * (k_xx + k_yy - 2 * k_xy) 65 | -------------------------------------------------------------------------------- /embedding.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | # Copyright 2024 The Google Research Authors. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | """Embedding models used in the CMMD calculation.""" 17 | 18 | from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection 19 | import torch 20 | import numpy as np 21 | 22 | _CLIP_MODEL_NAME = "openai/clip-vit-large-patch14-336" 23 | _CUDA_AVAILABLE = torch.cuda.is_available() 24 | 25 | 26 | def _resize_bicubic(images, size): 27 | images = torch.from_numpy(images.transpose(0, 3, 1, 2)) 28 | images = torch.nn.functional.interpolate(images, size=(size, size), mode="bicubic") 29 | images = images.permute(0, 2, 3, 1).numpy() 30 | return images 31 | 32 | 33 | class ClipEmbeddingModel: 34 | """CLIP image embedding calculator.""" 35 | 36 | def __init__(self): 37 | self.image_processor = CLIPImageProcessor.from_pretrained(_CLIP_MODEL_NAME) 38 | 39 | self._model = CLIPVisionModelWithProjection.from_pretrained(_CLIP_MODEL_NAME).eval() 40 | if _CUDA_AVAILABLE: 41 | self._model = self._model.cuda() 42 | 43 | self.input_image_size = self.image_processor.crop_size["height"] 44 | 45 | @torch.no_grad() 46 | def embed(self, images): 47 | """Computes CLIP embeddings for the given images. 48 | 49 | Args: 50 | images: An image array of shape (batch_size, height, width, 3). Values are 51 | in range [0, 1]. 52 | 53 | Returns: 54 | Embedding array of shape (batch_size, embedding_width). 55 | """ 56 | 57 | images = _resize_bicubic(images, self.input_image_size) 58 | inputs = self.image_processor( 59 | images=images, 60 | do_normalize=True, 61 | do_center_crop=False, 62 | do_resize=False, 63 | do_rescale=False, 64 | return_tensors="pt", 65 | ) 66 | if _CUDA_AVAILABLE: 67 | inputs = {k: v.to("cuda") for k, v in inputs.items()} 68 | 69 | image_embs = self._model(**inputs).image_embeds.cpu() 70 | image_embs /= torch.linalg.norm(image_embs, axis=-1, keepdims=True) 71 | return image_embs 72 | -------------------------------------------------------------------------------- /generate_images.py: -------------------------------------------------------------------------------- 1 | from diffusers import DiffusionPipeline 2 | from concurrent.futures import ThreadPoolExecutor 3 | import pandas as pd 4 | import argparse 5 | import torch 6 | import os 7 | 8 | 9 | ALL_CKPTS = [ 10 | "runwayml/stable-diffusion-v1-5", 11 | "segmind/SSD-1B", 12 | "PixArt-alpha/PixArt-XL-2-1024-MS", 13 | "stabilityai/stable-diffusion-xl-base-1.0", 14 | "stabilityai/sdxl-turbo", 15 | ] 16 | SEED = 2024 17 | 18 | 19 | def load_dataframe(): 20 | dataframe = pd.read_csv( 21 | "https://huggingface.co/datasets/sayakpaul/sample-datasets/raw/main/coco_30k_randomly_sampled_2014_val.csv" 22 | ) 23 | return dataframe 24 | 25 | 26 | def load_pipeline(args): 27 | if "runway" in args.pipeline_id: 28 | pipeline = DiffusionPipeline.from_pretrained( 29 | args.pipeline_id, torch_dtype=torch.float16, safety_checker=None 30 | ).to("cuda") 31 | else: 32 | pipeline = DiffusionPipeline.from_pretrained(args.pipeline_id, torch_dtype=torch.float16).to("cuda") 33 | pipeline.set_progress_bar_config(disable=True) 34 | return pipeline 35 | 36 | 37 | def generate_images(args, dataframe, pipeline): 38 | all_images = [] 39 | for i in range(0, len(dataframe), args.chunk_size): 40 | if "sdxl-turbo" not in args.pipeline_id: 41 | images = pipeline( 42 | dataframe.iloc[i : i + args.chunk_size]["caption"].tolist(), 43 | num_inference_steps=args.num_inference_steps, 44 | generator=torch.manual_seed(SEED), 45 | ).images 46 | else: 47 | images = pipeline( 48 | dataframe.iloc[i : i + args.chunk_size]["caption"].tolist(), 49 | num_inference_steps=args.num_inference_steps, 50 | generator=torch.manual_seed(SEED), 51 | guidance_scale=0.0, 52 | ).images 53 | all_images.extend(images) 54 | return all_images 55 | 56 | 57 | def serialize_image(image, path): 58 | image.save(path) 59 | 60 | 61 | if __name__ == "__main__": 62 | parser = argparse.ArgumentParser() 63 | parser.add_argument("--pipeline_id", default="runwayml/stable-diffusion-v1-5", type=str, choices=ALL_CKPTS) 64 | parser.add_argument("--num_inference_steps", default=30, type=int) 65 | parser.add_argument("--chunk_size", default=2, type=int) 66 | parser.add_argument("--root_img_path", default="sdv15", type=str) 67 | parser.add_argument("--num_workers", type=int, default=4) 68 | args = parser.parse_args() 69 | 70 | dataset = load_dataframe() 71 | pipeline = load_pipeline(args) 72 | images = generate_images(args, dataset, pipeline) 73 | image_paths = [os.path.join(args.root_img_path, f"{i}.jpg") for i in range(len(images))] 74 | 75 | if not os.path.exists(args.root_img_path): 76 | os.makedirs(args.root_img_path) 77 | 78 | with ThreadPoolExecutor(max_workers=args.num_workers) as executor: 79 | executor.map(serialize_image, images, image_paths) 80 | -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | # Copyright 2024 The Google Research Authors. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | """The main entry point for the CMMD calculation.""" 17 | 18 | from absl import app 19 | from absl import flags 20 | import distance 21 | import embedding 22 | import io_util 23 | import numpy as np 24 | 25 | 26 | _BATCH_SIZE = flags.DEFINE_integer("batch_size", 32, "Batch size for embedding generation.") 27 | _MAX_COUNT = flags.DEFINE_integer("max_count", -1, "Maximum number of images to read from each directory.") 28 | _REF_EMBED_FILE = flags.DEFINE_string( 29 | "ref_embed_file", None, "Path to the pre-computed embedding file for the reference images." 30 | ) 31 | 32 | 33 | def compute_cmmd(ref_dir, eval_dir, ref_embed_file=None, batch_size=32, max_count=-1): 34 | """Calculates the CMMD distance between reference and eval image sets. 35 | 36 | Args: 37 | ref_dir: Path to the directory containing reference images. 38 | eval_dir: Path to the directory containing images to be evaluated. 39 | ref_embed_file: Path to the pre-computed embedding file for the reference images. 40 | batch_size: Batch size used in the CLIP embedding calculation. 41 | max_count: Maximum number of images to use from each directory. A 42 | non-positive value reads all images available except for the images 43 | dropped due to batching. 44 | 45 | Returns: 46 | The CMMD value between the image sets. 47 | """ 48 | if ref_dir and ref_embed_file: 49 | raise ValueError("`ref_dir` and `ref_embed_file` both cannot be set at the same time.") 50 | embedding_model = embedding.ClipEmbeddingModel() 51 | if ref_embed_file is not None: 52 | ref_embs = np.load(ref_embed_file).astype("float32") 53 | else: 54 | ref_embs = io_util.compute_embeddings_for_dir(ref_dir, embedding_model, batch_size, max_count).astype( 55 | "float32" 56 | ) 57 | eval_embs = io_util.compute_embeddings_for_dir(eval_dir, embedding_model, batch_size, max_count).astype("float32") 58 | val = distance.mmd(ref_embs, eval_embs) 59 | return val.numpy() 60 | 61 | 62 | def main(argv): 63 | if len(argv) != 3: 64 | raise app.UsageError("Too few/too many command-line arguments.") 65 | _, dir1, dir2 = argv 66 | print( 67 | "The CMMD value is: " 68 | f" {compute_cmmd(dir1, dir2, _REF_EMBED_FILE.value, _BATCH_SIZE.value, _MAX_COUNT.value):.3f}" 69 | ) 70 | 71 | 72 | if __name__ == "__main__": 73 | app.run(main) 74 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # cmmd-pytorch 2 | 3 | (Unofficial) PyTorch implementation of CLIP Maximum Mean Discrepancy (CMMD) for evaluating image generation models, proposed in [Rethinking FID: Towards a Better Evaluation Metric for Image Generation](https://arxiv.org/abs/2401.09603). CMMD stands out to be a better metric than FID and tries to mitigate the longstanding issues of FID. 4 | 5 | This implementation is a super simple PyTorch port of the [original codebase](https://github.com/google-research/google-research/tree/master/cmmd). I have only focused on the JAX and TensorFlow specific bits and replaced them PyTorch. Some differences: 6 | 7 | * The original codebase relies on [`scenic`](https://github.com/google-research/scenic) for computing CLIP embeddings. This repository uses [`transformers`](https://github.com/huggingface/transformers). 8 | * For the data loading, the original codebase uses TensorFlow, this one uses PyTorch `Dataset` and `DataLoader`. 9 | 10 | ## Setup 11 | 12 | First, install PyTorch following instructions from the [official website](https://pytorch.org/). 13 | 14 | Then install the depdencies: 15 | 16 | ```bash 17 | pip install -r requirements.txt 18 | ``` 19 | 20 | ## Running 21 | 22 | ```bash 23 | python main.py /path/to/reference/images /path/to/eval/images --batch_size=32 --max_count=30000 24 | ``` 25 | 26 | A working example command: 27 | 28 | ```bash 29 | python main.py reference_images generated_images --batch_size=1 30 | ``` 31 | 32 | It should output: 33 | 34 | ```bash 35 | The CMMD value is: 7.696 36 | ``` 37 | 38 | This is the same as the original codebase, so, that confirms the implementation correctness 🤗 39 | 40 | > [!TIP] 41 | > GPU execution is supported when a GPU is available. 42 | 43 | ## Results 44 | 45 | Below, we report the CMMD metric for some popular pipelines on the COCO-30k dataset, as commonly used by the community. CMMD, like FID, is better when it's lower. 46 | 47 | | **Pipeline** | **Inference Steps** | **Resolution** | **CMMD** | 48 | |:------------:|:-------------------:|:--------------:|:--------:| 49 | | [`stabilityai/stable-diffusion-xl-base-1.0`](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) | 30 | 1024x1024 | 0.696 | 50 | | [`segmind/SSD-1B`](https://huggingface.co/segmind/SSD-1B) | 30 | 1024x1024 | 0.669 | 51 | | [`stabilityai/sdxl-turbo`](https://huggingface.co/stabilityai/sdxl-turbo) | 1 | 512x512 | 0.548 | 52 | | [`runwayml/stable-diffusion-v1-5`](https://huggingface.co/runwayml/stable-diffusion-v1-5) | 50 | 512x512 | 0.582 | 53 | | [`PixArt-alpha/PixArt-XL-2-1024-MS`](https://huggingface.co/PixArt-alpha/PixArt-XL-2-1024-MS) | 20 | 1024x1024 | 1.140 | 54 | | [`SPRIGHT-T2I/spright-t2i-sd2`](https://huggingface.co/SPRIGHT-T2I/spright-t2i-sd2) | 50 | 768x768 | 0.512 | 55 | 56 | **Notes**: 57 | 58 | * For SDXL Turbo, `guidance_scale` is set to 0 following the [official guide](https://huggingface.co/docs/diffusers/main/en/using-diffusers/sdxl_turbo) in `diffusers`. 59 | * For all other pipelines, default `guidace_scale` was used. Refer to the official pipeline documentation pages [here](https://huggingface.co/docs/diffusers/main/en/index) for more details. 60 | 61 | > [!CAUTION] 62 | > As per the CMMD authors, with models producing high-quality/high-resolution images, COCO images don't seem to be a good reference set (they are of pretty small resolution). This might help explain why SD v1.5 has a better CMMD than SDXL. 63 | 64 | ## Obtaining CMMD for your pipelines 65 | 66 | One can refer to the `generate_images.py` script that generates images from the [COCO-30k randomly sampled captions](https://huggingface.co/datasets/sayakpaul/sample-datasets/raw/main/coco_30k_randomly_sampled_2014_val.csv) using `diffusers`. 67 | 68 | Once the images are generated, run: 69 | 70 | ```bash 71 | python main.py /path/to/reference/images /path/to/generated/images --batch_size=32 --max_count=30000 72 | ``` 73 | 74 | Reference images are COCO-30k images and can be downloaded from [here](https://huggingface.co/datasets/sayakpaul/coco-30-val-2014). 75 | 76 | Pre-computed embeddings for the COCO-30k images can be found [here](https://huggingface.co/datasets/sayakpaul/coco-30-val-2014/blob/main/ref_embs_coco_30k.npy). 77 | 78 | To use the pre-computed reference embeddings, run: 79 | 80 | ```bash 81 | python main.py None /path/to/generated/images ref_embed_file=ref_embs.npy --batch_size=32 --max_count=30000 82 | ``` 83 | 84 | ## Acknowledgements 85 | 86 | Thanks to Sadeep Jayasumana (first author of CMMD) for all the helpful discussions. 87 | 88 | 89 | -------------------------------------------------------------------------------- /io_util.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | # Copyright 2024 The Google Research Authors. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | """IO utilities.""" 17 | 18 | import glob 19 | from torch.utils.data import Dataset, DataLoader 20 | import numpy as np 21 | from PIL import Image 22 | import tqdm 23 | 24 | 25 | class CMMDDataset(Dataset): 26 | def __init__(self, path, reshape_to, max_count=-1): 27 | self.path = path 28 | self.reshape_to = reshape_to 29 | 30 | self.max_count = max_count 31 | img_path_list = self._get_image_list() 32 | if max_count > 0: 33 | img_path_list = img_path_list[:max_count] 34 | self.img_path_list = img_path_list 35 | 36 | def __len__(self): 37 | return len(self.img_path_list) 38 | 39 | def _get_image_list(self): 40 | ext_list = ["png", "jpg", "jpeg"] 41 | image_list = [] 42 | for ext in ext_list: 43 | image_list.extend(glob.glob(f"{self.path}/*{ext}")) 44 | image_list.extend(glob.glob(f"{self.path}/*.{ext.upper()}")) 45 | # Sort the list to ensure a deterministic output. 46 | image_list.sort() 47 | return image_list 48 | 49 | def _center_crop_and_resize(self, im, size): 50 | w, h = im.size 51 | l = min(w, h) 52 | top = (h - l) // 2 53 | left = (w - l) // 2 54 | box = (left, top, left + l, top + l) 55 | im = im.crop(box) 56 | # Note that the following performs anti-aliasing as well. 57 | return im.resize((size, size), resample=Image.BICUBIC) # pytype: disable=module-attr 58 | 59 | def _read_image(self, path, size): 60 | im = Image.open(path) 61 | if size > 0: 62 | im = self._center_crop_and_resize(im, size) 63 | return np.asarray(im).astype(np.float32) 64 | 65 | def __getitem__(self, idx): 66 | img_path = self.img_path_list[idx] 67 | 68 | x = self._read_image(img_path, self.reshape_to) 69 | if x.ndim == 3: 70 | return x 71 | elif x.ndim == 2: 72 | # Convert grayscale to RGB by duplicating the channel dimension. 73 | return np.tile(x[Ellipsis, np.newaxis], (1, 1, 3)) 74 | 75 | 76 | def compute_embeddings_for_dir( 77 | img_dir, 78 | embedding_model, 79 | batch_size, 80 | max_count=-1, 81 | ): 82 | """Computes embeddings for the images in the given directory. 83 | 84 | This drops the remainder of the images after batching with the provided 85 | batch_size to enable efficient computation on TPUs. This usually does not 86 | affect results assuming we have a large number of images in the directory. 87 | 88 | Args: 89 | img_dir: Directory containing .jpg or .png image files. 90 | embedding_model: The embedding model to use. 91 | batch_size: Batch size for the embedding model inference. 92 | max_count: Max number of images in the directory to use. 93 | 94 | Returns: 95 | Computed embeddings of shape (num_images, embedding_dim). 96 | """ 97 | dataset = CMMDDataset(img_dir, reshape_to=embedding_model.input_image_size, max_count=max_count) 98 | count = len(dataset) 99 | print(f"Calculating embeddings for {count} images from {img_dir}.") 100 | 101 | dataloader = DataLoader(dataset, batch_size=batch_size) 102 | 103 | all_embs = [] 104 | for batch in tqdm.tqdm(dataloader, total=count // batch_size): 105 | image_batch = batch.numpy() 106 | 107 | # Normalize to the [0, 1] range. 108 | image_batch = image_batch / 255.0 109 | 110 | if np.min(image_batch) < 0 or np.max(image_batch) > 1: 111 | raise ValueError( 112 | "Image values are expected to be in [0, 1]. Found:" f" [{np.min(image_batch)}, {np.max(image_batch)}]." 113 | ) 114 | 115 | # Compute the embeddings using a pmapped function. 116 | embs = np.asarray( 117 | embedding_model.embed(image_batch) 118 | ) # The output has shape (num_devices, batch_size, embedding_dim). 119 | all_embs.append(embs) 120 | 121 | all_embs = np.concatenate(all_embs, axis=0) 122 | 123 | return all_embs 124 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | --------------------------------------------------------------------------------