├── object-recognition.pdf ├── classifier_loader.py ├── README.md ├── labels.py ├── evaluator.py ├── run.sh ├── localizer_loader.py ├── classifier.py ├── crop.py ├── localizer.py ├── batch_writer.py └── LICENSE /object-recognition.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anlthms/whale-2015/HEAD/object-recognition.pdf -------------------------------------------------------------------------------- /classifier_loader.py: -------------------------------------------------------------------------------- 1 | # ---------------------------------------------------------------------------- 2 | # Copyright 2015 Nervana Systems Inc. 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ---------------------------------------------------------------------------- 15 | """ 16 | Load data for classification 17 | """ 18 | from neon.data import ImageLoader 19 | 20 | 21 | class ClassifierLoader(ImageLoader): 22 | 23 | def __iter__(self): 24 | for start in range(self.start_idx, self.ndata, self.bsz): 25 | end = min(start + self.bsz, self.ndata) 26 | if end == self.ndata: 27 | self.start_idx = self.bsz - (self.ndata - start) 28 | self.loaderlib.next(self.loader) 29 | self.data[:] = self.buffers[self.idx] 30 | self.data[:] = self.data / 255. 31 | self.onehot_labels[:] = self.be.onehot(self.labels[self.idx], 32 | axis=0) 33 | self.idx = 1 if self.idx == 0 else 0 34 | yield self.data, self.onehot_labels 35 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ### Description 2 | 3 | Right Whale Recognition 4 | 5 | - [Competition page at Kaggle](https://kaggle.com/c/noaa-right-whale-recognition) 6 | - [Video recording of presentation](https://youtu.be/WfuDrJA6JBE) 7 | 8 | ### Usage 9 | 10 | These steps take about 6 hours on a system with 8 processors and an NVIDIA 11 | Titan X GPU. **Tested only on Ubuntu**. 12 | 13 | 1. Download and install neon 1.1.4 14 | 15 | ``` 16 | git clone https://github.com/NervanaSystems/neon.git 17 | cd neon 18 | git checkout e09fc11 19 | make 20 | source .venv/bin/activate 21 | ``` 22 | 2. Install prerequisites 23 | 24 | ``` 25 | pip install scipy scikit-image 26 | ``` 27 | 3. Download the following files from [Kaggle](https://kaggle.com/c/noaa-right-whale-recognition/data): 28 | 29 | ``` 30 | imgs.zip 31 | train.csv 32 | w_7489.jpg 33 | sample_submission.csv 34 | ``` 35 | Save these to a directory that we will refer to as /path/to/data. 36 | 4. Clone this repository 37 | 38 | ``` 39 | git clone https://github.com/anlthms/whale-2015.git 40 | cd whale-2015 41 | ``` 42 | 5. Train models and generate predictions 43 | 44 | ``` 45 | ./run.sh /path/to/data 46 | ``` 47 | 6. Evaluate predictions 48 | 49 | Submit subm.csv.gz to [Kaggle](https://kaggle.com/c/noaa-right-whale-recognition/submissions/attach) 50 | 51 | ### Notes 52 | 53 | - To run on a system that does not have a GPU: 54 | ``` 55 | ./run.sh /path/to/data -bcpu 56 | ``` 57 | - For quicker results, decrease `imwidth` in run.sh. 58 | - The script run.sh first prepares the data for training. If you want to repeat 59 | the preparation step, delete the file /path/to/data/prepdone before invoking 60 | run.sh again. 61 | - If using a GPU, the results are non-deterministic regardless of how the 62 | random number generator is seeded. 63 | - The localizer uses a heuristic to determine when to stop training. If a good 64 | optimum is not detected, a message that says "WARNING: model may not be 65 | optimal" is displayed. 66 | -------------------------------------------------------------------------------- /labels.py: -------------------------------------------------------------------------------- 1 | # ---------------------------------------------------------------------------- 2 | # Copyright 2015 Nervana Systems Inc. 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ---------------------------------------------------------------------------- 15 | """ 16 | Read labels 17 | """ 18 | import numpy as np 19 | import json 20 | import os 21 | from PIL import Image 22 | 23 | 24 | def read_labels(traindir, points1_file, points2_file, imwidth): 25 | datadir = os.path.dirname(traindir) 26 | train = np.genfromtxt(os.path.join(datadir, 'train.csv'), delimiter=',', 27 | skip_header=1, dtype=str) 28 | nrows = train.shape[0] 29 | paths = [os.path.join(traindir, train[i, 0]) for i in range(nrows)] 30 | filemap = {} 31 | # Make a mapping from filename to id. 32 | for i in range(len(paths)): 33 | filemap[paths[i]] = train[i, 1] 34 | idents = np.unique(train[:, 1]) 35 | # Make a mapping from id to numeric label. 36 | idmap = {} 37 | i = 0 38 | for ident in idents: 39 | idmap[ident] = i 40 | i += 1 41 | if points1_file is None or points2_file is None: 42 | return filemap, idmap, None, None, None, None 43 | 44 | # Read annotations 45 | xmap = [{}, {}] 46 | ymap = [{}, {}] 47 | for idx in range(2): 48 | points_file = [points1_file, points2_file][idx] 49 | assert os.path.exists(points_file) 50 | points = json.load(file(points_file)) 51 | for point in points: 52 | assert len(point['annotations']) == 1 53 | path = os.path.join(traindir, point['filename']) 54 | im = Image.open(path) 55 | width, height = im.size 56 | xmap[idx][path] = int( 57 | 1.0 * point['annotations'][0]['x'] * imwidth / width) 58 | ymap[idx][path] = int( 59 | 1.0 * point['annotations'][0]['y'] * imwidth / height) 60 | return filemap, idmap, xmap[0], ymap[0], xmap[1], ymap[1] 61 | -------------------------------------------------------------------------------- /evaluator.py: -------------------------------------------------------------------------------- 1 | import math 2 | import os 3 | import numpy as np 4 | from neon.callbacks.callbacks import Callback 5 | from labels import read_labels 6 | 7 | 8 | class Evaluator(Callback): 9 | 10 | def __init__(self, callback_data, model, dataset, imwidth, 11 | epochs, datadir, pointnum): 12 | super(Evaluator, self).__init__() 13 | self.model = model 14 | self.dataset = dataset 15 | self.callback_data = callback_data 16 | self.imwidth = imwidth 17 | self.epochs = epochs 18 | self.min_dist = 4 * imwidth 19 | traindir = os.path.join(os.path.dirname(datadir), 'train') 20 | _, _, x1map, y1map, x2map, y2map = read_labels( 21 | traindir, 'points1.json', 'points2.json', imwidth) 22 | xmap, ymap = (x1map, y1map) if pointnum == 1 else (x2map, y2map) 23 | self.xymean = np.array([np.mean(xmap.values()), 24 | np.mean(ymap.values())]) 25 | 26 | def get_xy(self, inds): 27 | preds = np.empty((inds.shape[0], 2)) 28 | preds[:, 0] = inds % self.imwidth 29 | preds[:, 1] = inds / self.imwidth 30 | return preds 31 | 32 | def get_outputs(self): 33 | self.model.initialize(self.dataset) 34 | self.dataset.reset() 35 | preds = None 36 | for idx, (x, t) in enumerate(self.dataset): 37 | if preds is None: 38 | (dim0, dim1) = x.shape 39 | preds = np.empty((self.dataset.nbatches * dim1, 2), 40 | dtype=x.dtype) 41 | cur_batch = slice(idx * dim1, (idx + 1) * dim1) 42 | x = self.model.fprop(x, inference=True) 43 | probs = x.get().T 44 | inds = np.argmax(probs, axis=1) 45 | preds[cur_batch] = self.get_xy(inds) 46 | return preds[:self.dataset.ndata] 47 | 48 | def on_epoch_end(self, epoch): 49 | preds = self.get_outputs() 50 | diffs = preds - self.xymean 51 | dist = math.hypot(*np.mean(diffs, axis=0)) 52 | dist += np.mean(np.sqrt(np.sum(diffs * diffs, axis=1))) 53 | print('Heuristic estimate of test error %.2f' % dist) 54 | if dist < self.min_dist: 55 | self.min_dist = dist 56 | if epoch >= self.epochs / 4: 57 | self.model.finished = True 58 | print('Stopping early.') 59 | return 60 | if epoch == self.epochs - 1: 61 | # Early stopping did not kick in. We probably didn't converge well. 62 | print('WARNING: model may not be optimal.') 63 | -------------------------------------------------------------------------------- /run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | # 3 | # Train models and generate predictions. 4 | # 5 | 6 | function prep() { 7 | for file in imgs.zip train.csv w_7489.jpg sample_submission.csv 8 | do 9 | if [ ! -f $data_dir/$file ] 10 | then 11 | echo $data_dir/$file not found 12 | exit 13 | fi 14 | done 15 | echo Unzipping: `date` 16 | unzip -qu $data_dir/imgs.zip -d $data_dir 17 | cp $data_dir/w_7489.jpg $data_dir/imgs 18 | mkdir -p $data_dir/train $data_dir/test 19 | for file in `cat $data_dir/train.csv | cut -f1 -d',' | tail -n +2` 20 | do 21 | mv $data_dir/imgs/$file $data_dir/train/ 22 | done 23 | mv $data_dir/imgs/* $data_dir/test 24 | 25 | echo Cropping training images: `date` 26 | python crop.py points1.json points2.json $data_dir/train $data_dir/traincrops $imwidth 0 27 | 28 | echo Writing macrobatches: `date` 29 | python batch_writer.py --image_dir=$data_dir/train --data_dir=$data_dir/macrotrain --points1_file points1.json --points2_file points2.json --target_size $imwidth --val_pct 0 30 | python batch_writer.py --image_dir=$data_dir/test --data_dir=$data_dir/macrotest --target_size $imwidth --val_pct 100 31 | python batch_writer.py --image_dir=$data_dir/traincrops --data_dir=$data_dir/macrotraincrops --id_label 1 --target_size $imwidth --val_pct 0 32 | 33 | touch $data_dir/prepdone 34 | echo Prep done: `date` 35 | } 36 | 37 | if [ "$1" == "" ] 38 | then 39 | echo Usage: $0 /path/to/data 40 | exit 41 | fi 42 | 43 | data_dir=$1 44 | num_epochs=40 45 | imwidth=384 46 | 47 | echo Starting: `date` 48 | echo data_dir=$data_dir, num_epochs=$num_epochs, imwidth=$imwidth 49 | 50 | if [ -f $data_dir/prepdone ] 51 | then 52 | echo $data_dir/prepdone exists. Skipping prep... 53 | else 54 | prep 55 | fi 56 | 57 | echo Localizing first point: `date` 58 | ./localizer.py -z32 -e $num_epochs -w $data_dir/macrotrain -tw $data_dir/macrotest -r0 -s model1.pkl -bgpu -pn 1 -iw $imwidth --serialize 1 ${@:2} 59 | echo Localizing second point: `date` 60 | ./localizer.py -z32 -e $num_epochs -w $data_dir/macrotrain -tw $data_dir/macrotest -r0 -s model2.pkl -bgpu -pn 2 -iw $imwidth --serialize 1 ${@:2} 61 | 62 | echo Cropping test images: `date` 63 | python crop.py testpoints1.json testpoints2.json $data_dir/test $data_dir/testcrops $imwidth 1 64 | 65 | echo Writing macrobatches: `date` 66 | python batch_writer.py --image_dir=$data_dir/testcrops --data_dir=$data_dir/macrotestcrops --id_label 1 --target_size $imwidth --val_pct 100 67 | 68 | echo Classifying: `date` 69 | ./classifier.py -z32 -e 60 -w $data_dir/macrotraincrops -tw $data_dir/macrotestcrops -r0 -s model3.pkl -bgpu -iw $imwidth --serialize 1 ${@:2} 70 | echo Done: `date` 71 | -------------------------------------------------------------------------------- /localizer_loader.py: -------------------------------------------------------------------------------- 1 | # ---------------------------------------------------------------------------- 2 | # Copyright 2015 Nervana Systems Inc. 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ---------------------------------------------------------------------------- 15 | """ 16 | Load data for localization 17 | """ 18 | import numpy as np 19 | import math 20 | from neon.data import ImageLoader 21 | 22 | 23 | class LocalizerLoader(ImageLoader): 24 | 25 | def __init__(self, repo_dir, inner_size, point_num=None, 26 | do_transforms=True, rgb=True, multiview=False, 27 | set_name='train', subset_pct=100, 28 | nlabels=1, macro=True, dtype=np.float32): 29 | super(LocalizerLoader, self).__init__( 30 | repo_dir, inner_size, do_transforms, rgb, multiview, set_name, 31 | subset_pct, nlabels, macro, dtype) 32 | self.imgheight = inner_size 33 | self.imgwidth = inner_size 34 | self.mask = self.be.iobuf(self.imgheight*self.imgwidth, dtype=dtype) 35 | assert point_num is 1 or point_num is 2 36 | self.point_num = point_num 37 | 38 | def maketarget(self, idx, xc, yc, width, imgheight, imgwidth): 39 | xstart = 0 if xc <= width else xc - width 40 | ystart = 0 if yc <= width else yc - width 41 | xend = imgwidth if imgwidth - xc <= width else xc + width + 1 42 | yend = imgheight if imgheight - yc <= width else yc + width + 1 43 | inds = [] 44 | for x in range(xstart, xend): 45 | for y in range(ystart, yend): 46 | dist = math.hypot(xc - x, yc - y) 47 | if dist >= width: 48 | continue 49 | self.mask[y * imgwidth + x, idx] = (1 - dist / width) 50 | 51 | def maketargets(self, labels): 52 | self.mask[:] = 0 53 | for i in range(self.be.bsz): 54 | xc = labels[0, i] 55 | yc = labels[1, i] 56 | self.maketarget(i, xc, yc, self.imgwidth/60, 57 | self.imgheight, self.imgwidth) 58 | return labels 59 | 60 | def __iter__(self): 61 | for start in range(self.start_idx, self.ndata, self.bsz): 62 | end = min(start + self.bsz, self.ndata) 63 | if end == self.ndata: 64 | self.start_idx = self.bsz - (self.ndata - start) 65 | self.loaderlib.next(self.loader) 66 | self.data[:] = self.buffers[self.idx] 67 | self.data[:] /= 255. 68 | labels = self.labels[self.idx].get() 69 | labels = labels[:2] if self.point_num is 1 else labels[2:] 70 | self.idx = 1 if self.idx == 0 else 0 71 | if self.set_name == 'train': 72 | self.maketargets(labels) 73 | yield self.data, self.mask 74 | else: 75 | yield self.data, labels 76 | -------------------------------------------------------------------------------- /classifier.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # ---------------------------------------------------------------------------- 3 | # Copyright 2015 Nervana Systems Inc. 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # ---------------------------------------------------------------------------- 16 | """ 17 | Classify images 18 | """ 19 | 20 | import os 21 | import gzip 22 | import numpy as np 23 | from neon.util.argparser import NeonArgparser 24 | from neon.initializers import Gaussian 25 | from neon.layers import Conv, DropoutBinary, Pooling, GeneralizedCost, Affine 26 | from neon.optimizers import Adadelta 27 | from neon.transforms import Rectlin, Softmax, CrossEntropyMulti 28 | from neon.models import Model 29 | from neon.callbacks.callbacks import Callbacks 30 | from classifier_loader import ClassifierLoader 31 | 32 | 33 | parser = NeonArgparser(__doc__) 34 | parser.add_argument('-tw', '--test_data_dir', default='', 35 | help='directory in which to find test images') 36 | parser.add_argument('-iw', '--image_width', default=384, help='image width') 37 | args = parser.parse_args() 38 | imwidth = int(args.image_width) 39 | 40 | train = ClassifierLoader(repo_dir=args.data_dir, inner_size=imwidth, 41 | set_name='train', do_transforms=False) 42 | train.init_batch_provider() 43 | init = Gaussian(scale=0.01) 44 | opt = Adadelta(decay=0.9) 45 | common = dict(init=init, batch_norm=True, activation=Rectlin()) 46 | 47 | layers = [] 48 | nchan = 64 49 | layers.append(Conv((2, 2, nchan), strides=2, **common)) 50 | for idx in range(6): 51 | if nchan > 1024: 52 | nchan = 1024 53 | layers.append(Conv((3, 3, nchan), strides=1, **common)) 54 | layers.append(Pooling(2, strides=2)) 55 | nchan *= 2 56 | layers.append(DropoutBinary(keep=0.2)) 57 | layers.append(Affine(nout=447, init=init, activation=Softmax())) 58 | 59 | cost = GeneralizedCost(costfunc=CrossEntropyMulti()) 60 | mlp = Model(layers=layers) 61 | callbacks = Callbacks(mlp, train, **args.callback_args) 62 | mlp.fit(train, optimizer=opt, num_epochs=args.epochs, cost=cost, 63 | callbacks=callbacks) 64 | train.exit_batch_provider() 65 | 66 | test = ClassifierLoader(repo_dir=args.test_data_dir, inner_size=imwidth, 67 | set_name='validation', do_transforms=False) 68 | test.init_batch_provider() 69 | probs = mlp.get_outputs(test) 70 | test.exit_batch_provider() 71 | 72 | filcsv = np.loadtxt(os.path.join(args.test_data_dir, 'val_file.csv'), 73 | delimiter=',', skiprows=1, dtype=str) 74 | files = [os.path.basename(row[0]) for row in filcsv] 75 | datadir = os.path.dirname(args.data_dir) 76 | 77 | with open(os.path.join(datadir, 'sample_submission.csv'), 'r') as fd: 78 | header = fd.readline() 79 | 80 | with gzip.open('subm.csv.gz', 'wb') as fd: 81 | fd.write(header) 82 | for i in range(probs.shape[0]): 83 | fd.write('{},'.format(files[i])) 84 | row = probs[i].tolist() 85 | fd.write(','.join(['{:.3e}'.format(elem) for elem in row])) 86 | fd.write('\n') 87 | -------------------------------------------------------------------------------- /crop.py: -------------------------------------------------------------------------------- 1 | # ---------------------------------------------------------------------------- 2 | # Copyright 2015 Nervana Systems Inc. 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ---------------------------------------------------------------------------- 15 | """ 16 | Crop images 17 | """ 18 | import numpy as np 19 | import json 20 | import sys 21 | import os 22 | import math 23 | import functools 24 | import multiprocessing 25 | from skimage import io 26 | from skimage import transform as tf 27 | from multiprocessing import Pool 28 | 29 | 30 | def load(bonnetfile, blowholefile): 31 | bonnets = json.load(file(bonnetfile)) 32 | blowholes = json.load(file(blowholefile)) 33 | return bonnets, blowholes 34 | 35 | 36 | def crop(path, bonnet, blowhole): 37 | im = io.imread(path).astype(np.uint8) 38 | if doscale == 1: 39 | bonnet['y'] *= float(im.shape[0]) / imwidth 40 | bonnet['x'] *= float(im.shape[1]) / imwidth 41 | blowhole['y'] *= float(im.shape[0]) / imwidth 42 | blowhole['x'] *= float(im.shape[1]) / imwidth 43 | y = bonnet['y'] - blowhole['y'] 44 | x = bonnet['x'] - blowhole['x'] 45 | dist = math.hypot(x, y) 46 | minh = 10 47 | minw = 20 48 | croph = int((im.shape[0] - 1.0 * dist) // 2) 49 | cropw = int((im.shape[1] - 2.0 * dist) // 2) 50 | newh = im.shape[0] - 2 * croph 51 | neww = im.shape[1] - 2 * cropw 52 | if croph <= 0 or cropw <= 0 or newh < minh or neww < minw: 53 | print(' %s unchanged' % os.path.basename(path)) 54 | else: 55 | angle = math.atan2(y, x) * 180 / math.pi 56 | centery = 0.4 * bonnet['y'] + 0.6 * blowhole['y'] 57 | centerx = 0.4 * bonnet['x'] + 0.6 * blowhole['x'] 58 | center = (centerx, centery) 59 | im = tf.rotate(im, angle, resize=False, center=center, 60 | preserve_range=True) 61 | imcenter = (im.shape[1] / 2, im.shape[0] / 2) 62 | trans = (center[0] - imcenter[0], center[1] - imcenter[1]) 63 | tform = tf.SimilarityTransform(translation=trans) 64 | im = tf.warp(im, tform) 65 | im = im[croph:-croph, cropw:-cropw] 66 | path = os.path.join(dstdir, os.path.basename(path)) 67 | io.imsave(path, im.astype(np.uint8)) 68 | return im.shape[0], im.shape[1] 69 | 70 | 71 | def cropbatch(points1, points2, count, maxind, index): 72 | start = index * count 73 | end = min(start+count, maxind) 74 | for i in range(start, end): 75 | point1 = points1[i] 76 | point2 = points2[i] 77 | path = os.path.join(srcdir, point1['filename']) 78 | height, width = crop(path, point1['annotations'][0], 79 | point2['annotations'][0]) 80 | 81 | 82 | if len(sys.argv) < 7: 83 | print('Usage: %s bonnet-points blowhole-points srcdir dstdir ' 84 | 'imwidth scale (0/1)' % sys.argv[0]) 85 | sys.exit(0) 86 | srcdir = sys.argv[3] 87 | dstdir = sys.argv[4] 88 | imwidth = int(sys.argv[5]) 89 | doscale = int(sys.argv[6]) 90 | bonnets, blowholes = load(sys.argv[1], sys.argv[2]) 91 | assert len(bonnets) == len(blowholes) 92 | if os.path.exists(dstdir) is False: 93 | os.mkdir(dstdir) 94 | maxind = len(bonnets) 95 | pcount = multiprocessing.cpu_count() 96 | count = (maxind - 1) / pcount + 1 97 | cropfunc = functools.partial(cropbatch, bonnets, blowholes, count, maxind) 98 | pool = Pool(processes=pcount) 99 | pool.map(cropfunc, range(pcount)) 100 | -------------------------------------------------------------------------------- /localizer.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # ---------------------------------------------------------------------------- 3 | # Copyright 2015 Nervana Systems Inc. 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # ---------------------------------------------------------------------------- 16 | """ 17 | Localize points within images 18 | """ 19 | 20 | import sys 21 | import os 22 | import json 23 | import numpy as np 24 | from neon.util.argparser import NeonArgparser 25 | from neon.initializers import Gaussian 26 | from neon.layers import Conv, Deconv, GeneralizedCost 27 | from neon.optimizers import Adadelta 28 | from neon.transforms import Rectlin, Logistic, SumSquared 29 | from neon.models import Model 30 | from neon.callbacks.callbacks import Callbacks 31 | from localizer_loader import LocalizerLoader 32 | from evaluator import Evaluator 33 | 34 | 35 | parser = NeonArgparser(__doc__) 36 | parser.add_argument('-tw', '--test_data_dir', 37 | default='', 38 | help='directory in which to find test images') 39 | parser.add_argument('-pn', '--point_num', default=None, help='1 or 2') 40 | parser.add_argument('-iw', '--image_width', default=384, help='image width') 41 | args = parser.parse_args() 42 | point_num = int(args.point_num) 43 | imwidth = int(args.image_width) 44 | 45 | train = LocalizerLoader(repo_dir=args.data_dir, inner_size=imwidth, 46 | set_name='train', nlabels=4, do_transforms=False, 47 | point_num=point_num) 48 | test = LocalizerLoader(repo_dir=args.test_data_dir, inner_size=imwidth, 49 | set_name='validation', nlabels=4, do_transforms=False, 50 | point_num=point_num) 51 | train.init_batch_provider() 52 | test.init_batch_provider() 53 | init = Gaussian(scale=0.1) 54 | opt = Adadelta(decay=0.9) 55 | common = dict(init=init, batch_norm=True, activation=Rectlin()) 56 | 57 | # Set up the model layers 58 | layers = [] 59 | nchan = 128 60 | layers.append(Conv((2, 2, nchan), strides=2, **common)) 61 | for idx in range(16): 62 | layers.append(Conv((3, 3, nchan), **common)) 63 | if nchan > 16: 64 | nchan /= 2 65 | for idx in range(15): 66 | layers.append(Deconv((3, 3, nchan), **common)) 67 | layers.append(Deconv((4, 4, nchan), strides=2, **common)) 68 | layers.append(Deconv((3, 3, 1), init=init, activation=Logistic(shortcut=True))) 69 | 70 | cost = GeneralizedCost(costfunc=SumSquared()) 71 | mlp = Model(layers=layers) 72 | callbacks = Callbacks(mlp, train, **args.callback_args) 73 | evaluator = Evaluator(callbacks.callback_data, mlp, test, imwidth, args.epochs, 74 | args.data_dir, point_num) 75 | callbacks.add_callback(evaluator) 76 | mlp.fit(train, optimizer=opt, num_epochs=args.epochs, cost=cost, 77 | callbacks=callbacks) 78 | train.exit_batch_provider() 79 | 80 | preds = evaluator.get_outputs() 81 | paths = np.genfromtxt(os.path.join(args.test_data_dir, 'val_file.csv'), 82 | dtype=str)[1:] 83 | basenames = [os.path.basename(path) for path in paths] 84 | filenames = [path.split(',')[0] for path in basenames] 85 | filenames.sort() 86 | content = [] 87 | for i, filename in enumerate(filenames): 88 | item = { 89 | "annotations": 90 | [ 91 | { 92 | "class": "point", 93 | "x": int(preds[i, 0]), 94 | "y": int(preds[i, 1]) 95 | } 96 | ], 97 | "class": "image", 98 | "filename": filename 99 | } 100 | content.append(item) 101 | 102 | json.dump(content, file('testpoints' + args.point_num + '.json', 'w'), 103 | indent=4) 104 | test.exit_batch_provider() 105 | -------------------------------------------------------------------------------- /batch_writer.py: -------------------------------------------------------------------------------- 1 | # ---------------------------------------------------------------------------- 2 | # Copyright 2015 Nervana Systems Inc. 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ---------------------------------------------------------------------------- 15 | """ 16 | Write macro batches of data 17 | """ 18 | 19 | import numpy as np 20 | import functools 21 | import os 22 | import tarfile 23 | import struct 24 | from glob import glob 25 | from multiprocessing import Pool 26 | from PIL import Image as Image 27 | from PIL import ImageFile 28 | ImageFile.LOAD_TRUNCATED_IMAGES = True 29 | from neon.util.compat import range, StringIO 30 | from neon.util.persist import save_obj 31 | from neon.util.argparser import NeonArgparser 32 | from labels import read_labels 33 | 34 | 35 | parser = NeonArgparser(__doc__) 36 | parser.add_argument('--image_dir', help='Directory to find images', 37 | required=True) 38 | parser.add_argument('--target_size', type=int, default=384, 39 | help='Size in pixels to scale images') 40 | parser.add_argument('--points1_file', 41 | help='json file with co-ordinates of point1', default=None) 42 | parser.add_argument('--points2_file', 43 | help='json file with co-ordinates of point2', default=None) 44 | parser.add_argument('--id_label', type=int, default=0, 45 | help='Whether the labels are IDs') 46 | parser.add_argument('--val_pct', type=int, default=20, 47 | help='Validation set percentage') 48 | args = parser.parse_args() 49 | 50 | 51 | def proc_img(target_size, imgfile=None): 52 | im = Image.open(imgfile) 53 | scale_factor = target_size / np.float32(min(im.size)) 54 | filt = Image.BICUBIC if scale_factor > 1 else Image.ANTIALIAS 55 | im = im.resize((target_size, target_size), filt) 56 | buf = StringIO() 57 | im.save(buf, format='JPEG', quality=95) 58 | return buf.getvalue() 59 | 60 | 61 | class BatchWriter(object): 62 | 63 | def __init__(self, out_dir, image_dir, points1_file, points2_file, 64 | id_label, target_size, val_pct=20, 65 | class_samples_max=None, file_pattern='*.jpg', 66 | macro_size=3072): 67 | self.out_dir = os.path.expanduser(out_dir) 68 | self.image_dir = os.path.expanduser(image_dir) 69 | self.macro_size = macro_size 70 | self.num_workers = 8 71 | self.target_size = target_size 72 | self.file_pattern = file_pattern 73 | self.class_samples_max = class_samples_max 74 | self.val_frac = val_pct / 100. 75 | self.train_file = os.path.join(self.out_dir, 'train_file.csv') 76 | self.val_file = os.path.join(self.out_dir, 'val_file.csv') 77 | self.meta_file = os.path.join(self.out_dir, 'dataset_cache.pkl') 78 | self.batch_prefix = 'data_batch_' 79 | self.points1_file = points1_file 80 | self.points2_file = points2_file 81 | self.id_label = id_label 82 | 83 | def write_csv_files(self): 84 | files = glob(os.path.join(self.image_dir, '*.jpg')) 85 | files.sort() 86 | if self.val_frac != 1.0: 87 | filemap, idmap, x1map, y1map, x2map, y2map = ( 88 | read_labels(self.image_dir, 89 | self.points1_file, 90 | self.points2_file, 91 | self.target_size)) 92 | if self.id_label == 1: 93 | self.label_names = ['id'] 94 | else: 95 | self.label_names = ['x1', 'y1', 'x2', 'y2'] 96 | 97 | indexes = range(len(self.label_names)) 98 | self.label_dict = {k: v for k, v in zip(self.label_names, indexes)} 99 | 100 | tlines = [] 101 | vlines = [] 102 | 103 | np.random.shuffle(files) 104 | v_idx = int(self.val_frac * len(files)) 105 | tfiles = files[v_idx:] 106 | vfiles = files[:v_idx] 107 | vfiles.sort() 108 | if self.id_label == 1: 109 | if self.val_frac == 1.0: 110 | vlines = [(f, 0) for f in vfiles] 111 | else: 112 | tlines = [(f, idmap[filemap[f]]) for f in tfiles] 113 | else: 114 | if self.val_frac == 1.0: 115 | vlines = [(f, 0, 0, 0, 0) for f in vfiles] 116 | else: 117 | tlines = [(f, x1map[f], y1map[f], 118 | x2map[f], y2map[f]) for f in tfiles] 119 | np.random.shuffle(tlines) 120 | 121 | if not os.path.exists(self.out_dir): 122 | os.makedirs(self.out_dir) 123 | 124 | for ff, ll in zip([self.train_file, self.val_file], [tlines, vlines]): 125 | with open(ff, 'wb') as f: 126 | if self.id_label == 1: 127 | f.write('filename,id\n') 128 | for tup in ll: 129 | f.write('{},{}\n'.format(*tup)) 130 | else: 131 | f.write('filename,x,y\n') 132 | for tup in ll: 133 | f.write('{},{},{},{},{}\n'.format(*tup)) 134 | 135 | self.train_nrec = len(tlines) 136 | self.ntrain = -(-self.train_nrec // self.macro_size) 137 | self.train_start = 0 138 | 139 | self.val_nrec = len(vlines) 140 | self.nval = -(-self.val_nrec // self.macro_size) 141 | if self.ntrain == 0: 142 | self.val_start = 100 143 | else: 144 | self.val_start = 10 ** int(np.log10(self.ntrain * 10)) 145 | 146 | def parse_file_list(self, infile): 147 | if self.id_label == 1: 148 | lines = np.loadtxt(infile, delimiter=',', 149 | skiprows=1, 150 | dtype={'names': ('fname', 'id'), 151 | 'formats': (object, 'i4')}) 152 | imfiles = [l[0] for l in lines] 153 | labels = {'id': [l[1] for l in lines]} 154 | self.nclass = 447 155 | else: 156 | lines = np.loadtxt( 157 | infile, delimiter=',', skiprows=1, 158 | dtype={'names': ('fname', 'x1', 'y1', 'x2', 'y2'), 159 | 'formats': (object, 'i4', 'i4', 'i4', 'i4')}) 160 | imfiles = [l[0] for l in lines] 161 | labels = {'x1': [l[1] for l in lines], 'y1': [l[2] for l in lines], 162 | 'x2': [l[3] for l in lines], 'y2': [l[4] for l in lines]} 163 | self.nclass = 4 164 | return imfiles, labels 165 | 166 | def write_batches(self, name, offset, labels, imfiles): 167 | pool = Pool(processes=self.num_workers) 168 | npts = -(-len(imfiles) // self.macro_size) 169 | starts = [i * self.macro_size for i in range(npts)] 170 | imfiles = [imfiles[s:s + self.macro_size] for s in starts] 171 | labels = [{k: v[s:s + self.macro_size] for k, 172 | v in labels.iteritems()} for s in starts] 173 | 174 | print("Writing %d %s batches..." % (len(imfiles), name)) 175 | for i, jpeg_file_batch in enumerate(imfiles): 176 | proc_img_func = functools.partial(proc_img, self.target_size) 177 | jpeg_strings = pool.map(proc_img_func, jpeg_file_batch) 178 | bfile = os.path.join( 179 | self.out_dir, '%s%d' % (self.batch_prefix, offset + i)) 180 | self.write_binary(jpeg_strings, labels[i], bfile) 181 | pool.close() 182 | 183 | def write_binary(self, jpegs, labels, ofname): 184 | num_imgs = len(jpegs) 185 | if self.id_label == 1: 186 | keylist = ['id'] 187 | else: 188 | keylist = ['x1', 'y1', 'x2', 'y2'] 189 | with open(ofname, 'wb') as f: 190 | f.write(struct.pack('I', num_imgs)) 191 | f.write(struct.pack('I', len(keylist))) 192 | 193 | for key in keylist: 194 | ksz = len(key) 195 | f.write(struct.pack('L' + 'B' * ksz, ksz, *bytearray(key))) 196 | f.write(struct.pack('I' * num_imgs, *labels[key])) 197 | 198 | for i in range(num_imgs): 199 | jsz = len(jpegs[i]) 200 | bin = struct.pack('I' + 'B' * jsz, jsz, *bytearray(jpegs[i])) 201 | f.write(bin) 202 | 203 | def save_meta(self): 204 | save_obj({'ntrain': self.ntrain, 205 | 'nval': self.nval, 206 | 'train_start': self.train_start, 207 | 'val_start': self.val_start, 208 | 'macro_size': self.macro_size, 209 | 'batch_prefix': self.batch_prefix, 210 | 'global_mean': self.global_mean, 211 | 'label_dict': self.label_dict, 212 | 'label_names': self.label_names, 213 | 'val_nrec': self.val_nrec, 214 | 'train_nrec': self.train_nrec, 215 | 'img_size': self.target_size, 216 | 'nclass': self.nclass}, self.meta_file) 217 | 218 | def run(self): 219 | self.write_csv_files() 220 | if self.val_frac == 0.0: 221 | namelist = ['train'] 222 | filelist = [self.train_file] 223 | startlist = [self.train_start] 224 | elif self.val_frac == 1.0: 225 | namelist = ['validation'] 226 | filelist = [self.val_file] 227 | startlist = [self.val_start] 228 | else: 229 | namelist = ['train', 'validation'] 230 | filelist = [self.train_file, self.val_file] 231 | startlist = [self.train_start, self.val_start] 232 | for sname, fname, start in zip(namelist, filelist, startlist): 233 | if fname is not None and os.path.exists(fname): 234 | imgs, labels = self.parse_file_list(fname) 235 | if len(imgs) > 0: 236 | self.write_batches(sname, start, labels, imgs) 237 | else: 238 | print("Skipping %s, file missing" % (sname)) 239 | self.global_mean = np.empty((3, 1)) 240 | self.global_mean[:] = 127 241 | self.save_meta() 242 | 243 | 244 | if __name__ == "__main__": 245 | np.random.seed(0) 246 | bw = BatchWriter(out_dir=args.data_dir, image_dir=args.image_dir, 247 | target_size=args.target_size, macro_size=256, 248 | file_pattern='*.jpg', 249 | points1_file=args.points1_file, 250 | points2_file=args.points2_file, 251 | id_label=args.id_label, 252 | val_pct=args.val_pct) 253 | bw.run() 254 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | --------------------------------------------------------------------------------