├── tests ├── __init__.py ├── fixtures │ ├── tiles.csv │ ├── parking │ │ ├── tiles.csv │ │ ├── labels │ │ │ └── 18 │ │ │ │ ├── 69623 │ │ │ │ └── 104946.png │ │ │ │ ├── 70761 │ │ │ │ └── 104120.png │ │ │ │ ├── 70762 │ │ │ │ └── 104119.png │ │ │ │ └── 70763 │ │ │ │ └── 104119.png │ │ ├── images │ │ │ └── 18 │ │ │ │ ├── 69623 │ │ │ │ └── 104946.webp │ │ │ │ ├── 70761 │ │ │ │ └── 104120.webp │ │ │ │ ├── 70762 │ │ │ │ └── 104119.webp │ │ │ │ └── 70763 │ │ │ │ └── 104119.webp │ │ └── features.geojson │ ├── osm │ │ └── 18 │ │ │ ├── 69105 │ │ │ └── 105093.png │ │ │ └── 69108 │ │ │ ├── 105091.png │ │ │ └── 105092.png │ ├── images │ │ └── 18 │ │ │ ├── 69105 │ │ │ └── 105093.jpg │ │ │ └── 69108 │ │ │ ├── 105091.jpg │ │ │ └── 105092.jpg │ └── labels │ │ └── 18 │ │ ├── 69105 │ │ └── 105093.png │ │ └── 69108 │ │ ├── 105091.png │ │ └── 105092.png ├── test_tiles.py ├── test_datasets.py └── tools │ └── test_rasterize.py ├── robosat ├── osm │ ├── __init__.py │ ├── parking.py │ ├── core.py │ ├── building.py │ └── road.py ├── features │ ├── __init__.py │ ├── parking.py │ └── core.py ├── graph │ ├── __init__.py │ └── core.py ├── spatial │ ├── __init__.py │ └── core.py ├── tools │ ├── __init__.py │ ├── cover.py │ ├── extract.py │ ├── subset.py │ ├── export.py │ ├── __main__.py │ ├── features.py │ ├── weights.py │ ├── templates │ │ └── map.html │ ├── dedupe.py │ ├── compare.py │ ├── download.py │ ├── masks.py │ ├── merge.py │ ├── predict.py │ ├── rasterize.py │ ├── serve.py │ └── train.py ├── __init__.py ├── utils.py ├── log.py ├── config.py ├── metrics.py ├── colors.py ├── losses.py ├── datasets.py ├── unet.py ├── transforms.py └── tiles.py ├── rs ├── .dockerignore ├── .flake8 ├── assets ├── buildings.png ├── pipeline-01.png ├── pipeline-02.png └── pipeline-03.png ├── .gitignore ├── AUTHORS.md ├── requirements.in ├── config ├── model-unet.toml └── dataset-parking.toml ├── Makefile ├── LICENSE ├── setup.py ├── docker ├── Dockerfile.cpu └── Dockerfile.gpu ├── .travis.yml ├── README.md └── requirements.txt /tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /robosat/osm/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /robosat/features/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /robosat/graph/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /robosat/spatial/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /rs: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | python3 -m robosat.tools "$@" 4 | -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | * 2 | !requirements.txt 3 | !robosat 4 | !config 5 | !rs 6 | -------------------------------------------------------------------------------- /.flake8: -------------------------------------------------------------------------------- 1 | [flake8] 2 | ignore = E201, E202, E203, E221 3 | max-line-length = 120 4 | -------------------------------------------------------------------------------- /tests/fixtures/tiles.csv: -------------------------------------------------------------------------------- 1 | 69623,104945,18 2 | 69622,104945,18 3 | 69623,104946,18 4 | -------------------------------------------------------------------------------- /assets/buildings.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mapbox/robosat/HEAD/assets/buildings.png -------------------------------------------------------------------------------- /assets/pipeline-01.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mapbox/robosat/HEAD/assets/pipeline-01.png -------------------------------------------------------------------------------- /assets/pipeline-02.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mapbox/robosat/HEAD/assets/pipeline-02.png -------------------------------------------------------------------------------- /assets/pipeline-03.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mapbox/robosat/HEAD/assets/pipeline-03.png -------------------------------------------------------------------------------- /tests/fixtures/parking/tiles.csv: -------------------------------------------------------------------------------- 1 | 70762,104119,18 2 | 69623,104946,18 3 | 70763,104119,18 4 | 70761,104120,18 5 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .env 2 | .python-version 3 | .idea 4 | __pycache__ 5 | test 6 | data 7 | 8 | *.pth 9 | *.pb 10 | -------------------------------------------------------------------------------- /tests/fixtures/osm/18/69105/105093.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mapbox/robosat/HEAD/tests/fixtures/osm/18/69105/105093.png -------------------------------------------------------------------------------- /tests/fixtures/osm/18/69108/105091.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mapbox/robosat/HEAD/tests/fixtures/osm/18/69108/105091.png -------------------------------------------------------------------------------- /tests/fixtures/osm/18/69108/105092.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mapbox/robosat/HEAD/tests/fixtures/osm/18/69108/105092.png -------------------------------------------------------------------------------- /tests/fixtures/images/18/69105/105093.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mapbox/robosat/HEAD/tests/fixtures/images/18/69105/105093.jpg -------------------------------------------------------------------------------- /tests/fixtures/images/18/69108/105091.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mapbox/robosat/HEAD/tests/fixtures/images/18/69108/105091.jpg -------------------------------------------------------------------------------- /tests/fixtures/images/18/69108/105092.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mapbox/robosat/HEAD/tests/fixtures/images/18/69108/105092.jpg -------------------------------------------------------------------------------- /tests/fixtures/labels/18/69105/105093.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mapbox/robosat/HEAD/tests/fixtures/labels/18/69105/105093.png -------------------------------------------------------------------------------- /tests/fixtures/labels/18/69108/105091.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mapbox/robosat/HEAD/tests/fixtures/labels/18/69108/105091.png -------------------------------------------------------------------------------- /tests/fixtures/labels/18/69108/105092.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mapbox/robosat/HEAD/tests/fixtures/labels/18/69108/105092.png -------------------------------------------------------------------------------- /tests/fixtures/parking/labels/18/69623/104946.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mapbox/robosat/HEAD/tests/fixtures/parking/labels/18/69623/104946.png -------------------------------------------------------------------------------- /tests/fixtures/parking/labels/18/70761/104120.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mapbox/robosat/HEAD/tests/fixtures/parking/labels/18/70761/104120.png -------------------------------------------------------------------------------- /tests/fixtures/parking/labels/18/70762/104119.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mapbox/robosat/HEAD/tests/fixtures/parking/labels/18/70762/104119.png -------------------------------------------------------------------------------- /tests/fixtures/parking/labels/18/70763/104119.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mapbox/robosat/HEAD/tests/fixtures/parking/labels/18/70763/104119.png -------------------------------------------------------------------------------- /tests/fixtures/parking/images/18/69623/104946.webp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mapbox/robosat/HEAD/tests/fixtures/parking/images/18/69623/104946.webp -------------------------------------------------------------------------------- /tests/fixtures/parking/images/18/70761/104120.webp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mapbox/robosat/HEAD/tests/fixtures/parking/images/18/70761/104120.webp -------------------------------------------------------------------------------- /tests/fixtures/parking/images/18/70762/104119.webp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mapbox/robosat/HEAD/tests/fixtures/parking/images/18/70762/104119.webp -------------------------------------------------------------------------------- /tests/fixtures/parking/images/18/70763/104119.webp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mapbox/robosat/HEAD/tests/fixtures/parking/images/18/70763/104119.webp -------------------------------------------------------------------------------- /AUTHORS.md: -------------------------------------------------------------------------------- 1 | Daniel J. Hofmann https://github.com/daniel-j-h 2 | 3 | Bhargav Kowshik https://github.com/bkowshik 4 | -------------------------------------------------------------------------------- /robosat/tools/__init__.py: -------------------------------------------------------------------------------- 1 | """RoboSat sub-package for command line tools. 2 | 3 | The package's tools and their sub-command help pages can be seen via: 4 | 5 | python3 -m robosat.tools --help 6 | """ 7 | -------------------------------------------------------------------------------- /requirements.in: -------------------------------------------------------------------------------- 1 | numpy 2 | pillow 3 | scipy 4 | opencv-contrib-python-headless 5 | tqdm 6 | flask 7 | requests 8 | geojson 9 | mercantile 10 | osmium 11 | matplotlib 12 | rasterio 13 | supermercado 14 | shapely 15 | rtree 16 | pyproj 17 | toml 18 | pytest 19 | -------------------------------------------------------------------------------- /robosat/__init__.py: -------------------------------------------------------------------------------- 1 | """RoboSat - semantic segmentation on aerial and satellite imagery. 2 | 3 | Exracts features such as: buildings, parking lots, roads, water. 4 | 5 | For available command line tools see the `robosat.tools` sub-package. 6 | """ 7 | 8 | __version__ = '1.2.0' 9 | -------------------------------------------------------------------------------- /robosat/utils.py: -------------------------------------------------------------------------------- 1 | import matplotlib 2 | 3 | matplotlib.use("Agg") 4 | import matplotlib.pyplot as plt # noqa: E402 5 | 6 | 7 | def plot(out, history): 8 | plt.figure() 9 | 10 | n = max(map(len, history.values())) 11 | plt.xticks(range(n), [v + 1 for v in range(n)]) 12 | 13 | plt.grid() 14 | 15 | for values in history.values(): 16 | plt.plot(values) 17 | 18 | plt.xlabel("epoch") 19 | plt.legend(list(history)) 20 | 21 | plt.savefig(out, format="png") 22 | plt.close() 23 | -------------------------------------------------------------------------------- /robosat/log.py: -------------------------------------------------------------------------------- 1 | """Log facilitator 2 | """ 3 | 4 | import os 5 | import sys 6 | 7 | 8 | class Log: 9 | 10 | """Create a log instance on a log file 11 | """ 12 | 13 | def __init__(self, path, out=sys.stdout): 14 | self.out = out 15 | self.fp = open(path, "a") 16 | assert self.fp, "Unable to open log file" 17 | 18 | """Log a new message to the opened log file, and optionnaly on stdout or stderr too 19 | """ 20 | 21 | def log(self, msg): 22 | assert self.fp, "Unable to write in log file" 23 | self.fp.write(msg + os.linesep) 24 | self.fp.flush() 25 | 26 | if self.out: 27 | print(msg, file=self.out) 28 | -------------------------------------------------------------------------------- /robosat/config.py: -------------------------------------------------------------------------------- 1 | """Configuration handling. 2 | 3 | Dictionary-based configuration with a TOML-based on-disk represenation. 4 | 5 | See https://github.com/toml-lang/toml 6 | """ 7 | 8 | import toml 9 | 10 | 11 | def load_config(path): 12 | """Loads a dictionary from configuration file. 13 | 14 | Args: 15 | path: the path to load the configuration from. 16 | 17 | Returns: 18 | The configuration dictionary loaded from the file. 19 | """ 20 | 21 | return toml.load(path) 22 | 23 | 24 | def save_config(attrs, path): 25 | """Saves a configuration dictionary to a file. 26 | Args: 27 | path: the path to save the configuration dictionary to. 28 | """ 29 | 30 | toml.dump(attrs, path) 31 | -------------------------------------------------------------------------------- /config/model-unet.toml: -------------------------------------------------------------------------------- 1 | # Configuration related to a specific model. 2 | # For syntax see: https://github.com/toml-lang/toml#table-of-contents 3 | 4 | 5 | # Model specific common attributes. 6 | [common] 7 | 8 | # Use CUDA for GPU acceleration. 9 | cuda = true 10 | 11 | # Batch size for training. 12 | batch_size = 2 13 | 14 | # Image side size in pixels. 15 | image_size = 512 16 | 17 | # Directory where to save checkpoints to during training. 18 | checkpoint = '/tmp/pth/' 19 | 20 | 21 | # Model specific optimization parameters. 22 | [opt] 23 | 24 | # Total number of epochs to train for. 25 | epochs = 10 26 | 27 | # Learning rate for the optimizer. 28 | lr = 0.0001 29 | 30 | # Loss function name (e.g 'Lovasz', 'mIoU' or 'CrossEntropy') 31 | loss = 'Lovasz' 32 | -------------------------------------------------------------------------------- /config/dataset-parking.toml: -------------------------------------------------------------------------------- 1 | # Configuration related to a specific dataset. 2 | # For syntax see: https://github.com/toml-lang/toml#table-of-contents 3 | 4 | 5 | # Dataset specific common attributes. 6 | [common] 7 | 8 | # The slippy map dataset's base directory. 9 | dataset = '/tmp/slippy-map-dir/' 10 | 11 | # Human representation for classes. 12 | classes = ['background', 'parking'] 13 | 14 | # Color map for visualization and representing classes in masks. 15 | # Note: available colors can be found in `robosat/colors.py` 16 | colors = ['denim', 'orange'] 17 | 18 | 19 | # Dataset specific class weights computes on the training data. 20 | # Needed by 'mIoU' and 'CrossEntropy' losses to deal with unbalanced classes. 21 | # Note: use `./rs weights -h` to compute these for new datasets. 22 | [weights] 23 | values = [1.6248, 5.762827] 24 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | dockerimage ?= mapbox/robosat 2 | dockerfile ?= docker/Dockerfile.cpu 3 | srcdir ?= $(shell pwd) 4 | datadir ?= $(shell pwd) 5 | 6 | install: 7 | @docker build -t $(dockerimage) -f $(dockerfile) . 8 | 9 | i: install 10 | 11 | 12 | update: 13 | @docker build -t $(dockerimage) -f $(dockerfile) . --pull --no-cache 14 | 15 | u: update 16 | 17 | 18 | run: 19 | @docker run -it --rm --ipc="host" --network="host" -v $(srcdir)/robosat:/usr/src/app/robosat -v $(datadir):/data --entrypoint=/bin/bash $(dockerimage) 20 | 21 | r: run 22 | 23 | 24 | publish: 25 | @docker image save $(dockerimage) \ 26 | | pv -N "Publish $(dockerimage) to $(sshopts)" -s $(shell docker image inspect $(dockerimage) --format "{{.Size}}") \ 27 | | ssh $(sshopts) "docker image load" 28 | 29 | p: publish 30 | 31 | 32 | .PHONY: install i run r update u publish p 33 | 34 | -------------------------------------------------------------------------------- /tests/test_tiles.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import mercantile 4 | 5 | from robosat.tiles import tiles_from_slippy_map, tiles_from_csv 6 | 7 | 8 | class TestSlippyMapTiles(unittest.TestCase): 9 | def test_slippy_map_directory(self): 10 | root = "tests/fixtures/images" 11 | tiles = [tile for tile in tiles_from_slippy_map(root)] 12 | self.assertEqual(len(tiles), 3) 13 | 14 | tile, path = tiles[0] 15 | self.assertEqual(type(tile), mercantile.Tile) 16 | self.assertEqual(path, "tests/fixtures/images/18/69105/105093.jpg") 17 | 18 | 19 | class TestReadTiles(unittest.TestCase): 20 | def test_read_tiles(self): 21 | filename = "tests/fixtures/tiles.csv" 22 | tiles = [tile for tile in tiles_from_csv(filename)] 23 | 24 | self.assertEqual(len(tiles), 3) 25 | self.assertEqual(tiles[0], mercantile.Tile(69623, 104945, 18)) 26 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2018 Mapbox 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /robosat/tools/cover.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import csv 3 | import json 4 | 5 | from supermercado import burntiles 6 | from tqdm import tqdm 7 | 8 | 9 | def add_parser(subparser): 10 | parser = subparser.add_parser( 11 | "cover", 12 | help="generates tiles covering GeoJSON features", 13 | formatter_class=argparse.ArgumentDefaultsHelpFormatter, 14 | ) 15 | 16 | parser.add_argument("--zoom", type=int, required=True, help="zoom level of tiles") 17 | parser.add_argument("features", type=str, help="path to GeoJSON features") 18 | parser.add_argument("out", type=str, help="path to csv file to store tiles in") 19 | 20 | parser.set_defaults(func=main) 21 | 22 | 23 | def main(args): 24 | with open(args.features) as f: 25 | features = json.load(f) 26 | 27 | tiles = [] 28 | 29 | for feature in tqdm(features["features"], ascii=True, unit="feature"): 30 | tiles.extend(map(tuple, burntiles.burn([feature], args.zoom).tolist())) 31 | 32 | # tiles can overlap for multiple features; unique tile ids 33 | tiles = list(set(tiles)) 34 | 35 | with open(args.out, "w") as fp: 36 | writer = csv.writer(fp) 37 | writer.writerows(tiles) 38 | -------------------------------------------------------------------------------- /robosat/tools/extract.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | 3 | from robosat.osm.parking import ParkingHandler 4 | from robosat.osm.building import BuildingHandler 5 | from robosat.osm.road import RoadHandler 6 | 7 | # Register your osmium handlers here; in addition to the osmium handler interface 8 | # they need to support a `save(path)` function for GeoJSON serialization to a file. 9 | handlers = {"parking": ParkingHandler, "building": BuildingHandler, "road": RoadHandler} 10 | 11 | 12 | def add_parser(subparser): 13 | parser = subparser.add_parser( 14 | "extract", 15 | help="extracts GeoJSON features from OpenStreetMap", 16 | formatter_class=argparse.ArgumentDefaultsHelpFormatter, 17 | ) 18 | 19 | parser.add_argument("--type", type=str, required=True, choices=handlers.keys(), help="type of feature to extract") 20 | parser.add_argument("--batch", type=int, default=100000, help="number of features to save per file") 21 | parser.add_argument("map", type=str, help="path to .osm.pbf base map") 22 | parser.add_argument("out", type=str, help="path to GeoJSON file to store features in") 23 | 24 | parser.set_defaults(func=main) 25 | 26 | 27 | def main(args): 28 | handler = handlers[args.type](args.out, args.batch) 29 | handler.apply_file(filename=args.map, locations=True) 30 | handler.flush() 31 | -------------------------------------------------------------------------------- /robosat/tools/subset.py: -------------------------------------------------------------------------------- 1 | import os 2 | import argparse 3 | import shutil 4 | 5 | from tqdm import tqdm 6 | 7 | from robosat.tiles import tiles_from_slippy_map, tiles_from_csv 8 | 9 | 10 | def add_parser(subparser): 11 | parser = subparser.add_parser( 12 | "subset", 13 | help="filter images in a slippy map directory using a csv", 14 | formatter_class=argparse.ArgumentDefaultsHelpFormatter, 15 | ) 16 | parser.add_argument("images", type=str, help="directory to read slippy map image tiles from for filtering") 17 | parser.add_argument("tiles", type=str, help="csv to filter images by") 18 | parser.add_argument("out", type=str, help="directory to save filtered images to") 19 | 20 | parser.set_defaults(func=main) 21 | 22 | 23 | def main(args): 24 | images = tiles_from_slippy_map(args.images) 25 | 26 | tiles = set(tiles_from_csv(args.tiles)) 27 | 28 | for tile, src in tqdm(list(images), desc="Subset", unit="image", ascii=True): 29 | if tile not in tiles: 30 | continue 31 | 32 | # The extention also includes the period. 33 | extention = os.path.splitext(src)[1] 34 | 35 | os.makedirs(os.path.join(args.out, str(tile.z), str(tile.x)), exist_ok=True) 36 | dst = os.path.join(args.out, str(tile.z), str(tile.x), "{}{}".format(tile.y, extention)) 37 | 38 | shutil.copyfile(src, dst) 39 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import setuptools 2 | 3 | with open('README.md', 'r') as fh: 4 | long_description = fh.read() 5 | 6 | setuptools.setup( 7 | name='robosat', 8 | version='1.2.0', 9 | author='Mapbox', 10 | description='Semantic segmentation on aerial and satellite imagery.', 11 | long_description=long_description, 12 | long_description_content_type='text/markdown', 13 | url='https://github.com/mapbox/robosat', 14 | packages=setuptools.find_packages(), 15 | classifiers=( 16 | 'Programming Language :: Python :: 3', 17 | "License :: OSI Approved :: MIT License", 18 | 'Operating System :: OS Independent', 19 | ), 20 | install_requires=[ 21 | 'flask~=1.0', 22 | 'geojson~=2.4', 23 | 'matplotlib~=3.1', 24 | 'mercantile~=1.0', 25 | 'numpy~=1.16', 26 | 'opencv-contrib-python-headless~=4.0', 27 | 'osmium==2.15.2', 28 | 'pillow~=6.0', 29 | 'pyproj~=2.1', 30 | 'rasterio~=1.0', 31 | 'requests~=2.22', 32 | 'rtree~=0.8', 33 | 'scipy~=1.3', 34 | 'shapely~=1.6', 35 | 'supermercado~=0.0.5', 36 | 'toml~=0.10', 37 | 'torch~=1.1', 38 | 'torchvision~=0.3', 39 | 'tqdm~=4.32', 40 | ], 41 | entry_points = { 42 | 'console_scripts': [ 43 | 'rs=robosat.tools.__main__:main' 44 | ] 45 | } 46 | ) 47 | -------------------------------------------------------------------------------- /robosat/osm/parking.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | import osmium 4 | import geojson 5 | import shapely.geometry 6 | 7 | from robosat.osm.core import FeatureStorage, is_polygon 8 | 9 | 10 | class ParkingHandler(osmium.SimpleHandler): 11 | """Extracts parking lot polygon features (visible in satellite imagery) from the map. 12 | """ 13 | 14 | # parking=* to discard because these features are not vislible in satellite imagery 15 | parking_filter = set(["underground", "sheds", "carports", "garage_boxes"]) 16 | 17 | def __init__(self, out, batch): 18 | super().__init__() 19 | self.storage = FeatureStorage(out, batch) 20 | 21 | def way(self, w): 22 | if not is_polygon(w): 23 | return 24 | 25 | if "amenity" not in w.tags or w.tags["amenity"] != "parking": 26 | return 27 | 28 | if "parking" in w.tags: 29 | if w.tags["parking"] in self.parking_filter: 30 | return 31 | 32 | geometry = geojson.Polygon([[(n.lon, n.lat) for n in w.nodes]]) 33 | shape = shapely.geometry.shape(geometry) 34 | 35 | if shape.is_valid: 36 | feature = geojson.Feature(geometry=geometry) 37 | self.storage.add(feature) 38 | else: 39 | print("Warning: invalid feature: https://www.openstreetmap.org/way/{}".format(w.id), file=sys.stderr) 40 | 41 | def flush(self): 42 | self.storage.flush() 43 | -------------------------------------------------------------------------------- /robosat/tools/export.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | 3 | import torch 4 | import torch.onnx 5 | import torch.autograd 6 | 7 | from robosat.config import load_config 8 | from robosat.unet import UNet 9 | 10 | 11 | def add_parser(subparser): 12 | parser = subparser.add_parser( 13 | "export", help="exports model in ONNX format", formatter_class=argparse.ArgumentDefaultsHelpFormatter 14 | ) 15 | 16 | parser.add_argument("--dataset", type=str, required=True, help="path to dataset configuration file") 17 | parser.add_argument("--image_size", type=int, default=512, help="image size to use for model") 18 | parser.add_argument("--checkpoint", type=str, required=True, help="model checkpoint to load") 19 | parser.add_argument("model", type=str, help="path to save ONNX GraphProto .pb model to") 20 | 21 | parser.set_defaults(func=main) 22 | 23 | 24 | def main(args): 25 | dataset = load_config(args.dataset) 26 | 27 | num_classes = len(dataset["common"]["classes"]) 28 | net = UNet(num_classes) 29 | 30 | def map_location(storage, _): 31 | return storage.cpu() 32 | 33 | chkpt = torch.load(args.checkpoint, map_location=map_location) 34 | net = torch.nn.DataParallel(net) 35 | net.load_state_dict(chkpt["state_dict"]) 36 | 37 | # Todo: make input channels configurable, not hard-coded to three channels for RGB 38 | batch = torch.autograd.Variable(torch.randn(1, 3, args.image_size, args.image_size)) 39 | 40 | torch.onnx.export(net, batch, args.model) 41 | -------------------------------------------------------------------------------- /robosat/osm/core.py: -------------------------------------------------------------------------------- 1 | import os 2 | import uuid 3 | 4 | import geojson 5 | 6 | 7 | class FeatureStorage: 8 | """Stores features on disk and handles batching. 9 | 10 | Note: you have to call flush at the end to flush the last partial batch. 11 | """ 12 | 13 | def __init__(self, out, batch): 14 | assert batch > 0 15 | 16 | self.out = out 17 | self.batch = batch 18 | 19 | self.features = [] 20 | 21 | def add(self, feature): 22 | if len(self.features) >= self.batch: 23 | self.flush() 24 | 25 | self.features.append(feature) 26 | 27 | def flush(self): 28 | if not self.features: 29 | return 30 | 31 | collection = geojson.FeatureCollection(self.features) 32 | 33 | base, ext = os.path.splitext(self.out) 34 | suffix = uuid.uuid4().hex 35 | 36 | out = "{}-{}{}".format(base, suffix, ext) 37 | 38 | with open(out, "w") as fp: 39 | geojson.dump(collection, fp) 40 | 41 | self.features.clear() 42 | 43 | 44 | def is_polygon(way): 45 | """Checks if the way is a polygon. 46 | 47 | Args 48 | way: the osmium.osm.Way to check. 49 | 50 | Returns: 51 | True if the way is a polygon, False otherwise. 52 | 53 | Note: The geometry shape can still be invalid (e.g. self-intersecting). 54 | """ 55 | 56 | if not way.is_closed(): 57 | return False 58 | 59 | if len(way.nodes) < 4: 60 | return False 61 | 62 | return True 63 | -------------------------------------------------------------------------------- /robosat/tools/__main__.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | 3 | from robosat.tools import ( 4 | compare, 5 | cover, 6 | dedupe, 7 | download, 8 | extract, 9 | export, 10 | features, 11 | masks, 12 | merge, 13 | predict, 14 | rasterize, 15 | serve, 16 | subset, 17 | train, 18 | weights, 19 | ) 20 | 21 | 22 | def add_parsers(): 23 | parser = argparse.ArgumentParser(prog="./rs") 24 | subparser = parser.add_subparsers(title="robosat tools", metavar="") 25 | 26 | # Add your tool's entry point below. 27 | 28 | extract.add_parser(subparser) 29 | cover.add_parser(subparser) 30 | download.add_parser(subparser) 31 | rasterize.add_parser(subparser) 32 | 33 | train.add_parser(subparser) 34 | export.add_parser(subparser) 35 | predict.add_parser(subparser) 36 | masks.add_parser(subparser) 37 | features.add_parser(subparser) 38 | merge.add_parser(subparser) 39 | dedupe.add_parser(subparser) 40 | 41 | serve.add_parser(subparser) 42 | 43 | weights.add_parser(subparser) 44 | 45 | compare.add_parser(subparser) 46 | subset.add_parser(subparser) 47 | 48 | # We return the parsed arguments, but the sub-command parsers 49 | # are responsible for adding a function hook to their command. 50 | 51 | subparser.required = True 52 | 53 | return parser.parse_args() 54 | 55 | 56 | def main(): 57 | """main entrypoint for robosat tools""" 58 | args = add_parsers() 59 | args.func(args) 60 | 61 | 62 | if __name__ == "__main__": 63 | main() 64 | -------------------------------------------------------------------------------- /robosat/osm/building.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | import osmium 4 | import geojson 5 | import shapely.geometry 6 | 7 | from robosat.osm.core import FeatureStorage, is_polygon 8 | 9 | 10 | class BuildingHandler(osmium.SimpleHandler): 11 | """Extracts building polygon features (visible in satellite imagery) from the map. 12 | """ 13 | 14 | # building=* to discard because these features are not vislible in satellite imagery 15 | building_filter = set( 16 | ["construction", "houseboat", "static_caravan", "stadium", "conservatory", "digester", "greenhouse", "ruins"] 17 | ) 18 | 19 | # location=* to discard because these features are not vislible in satellite imagery 20 | location_filter = set(["underground", "underwater"]) 21 | 22 | def __init__(self, out, batch): 23 | super().__init__() 24 | self.storage = FeatureStorage(out, batch) 25 | 26 | def way(self, w): 27 | if not is_polygon(w): 28 | return 29 | 30 | if "building" not in w.tags: 31 | return 32 | 33 | if w.tags["building"] in self.building_filter: 34 | return 35 | 36 | if "location" in w.tags and w.tags["location"] in self.location_filter: 37 | return 38 | 39 | geometry = geojson.Polygon([[(n.lon, n.lat) for n in w.nodes]]) 40 | shape = shapely.geometry.shape(geometry) 41 | 42 | if shape.is_valid: 43 | feature = geojson.Feature(geometry=geometry) 44 | self.storage.add(feature) 45 | else: 46 | print("Warning: invalid feature: https://www.openstreetmap.org/way/{}".format(w.id), file=sys.stderr) 47 | 48 | def flush(self): 49 | self.storage.flush() 50 | -------------------------------------------------------------------------------- /docker/Dockerfile.cpu: -------------------------------------------------------------------------------- 1 | FROM ubuntu:18.04 2 | 3 | WORKDIR /usr/src/app 4 | 5 | ENV LANG="C.UTF-8" LC_ALL="C.UTF-8" PATH="/opt/venv/bin:$PATH" PIP_NO_CACHE_DIR="false" CFLAGS="-mavx2" 6 | 7 | RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ 8 | python3 python3-pip python3-venv libspatialindex-c4v5 libglib2.0-0 \ 9 | wget gcc yasm cmake make python3-dev zlib1g-dev libwebp-dev && \ 10 | rm -rf /var/lib/apt/lists/* 11 | 12 | COPY requirements.txt . 13 | 14 | RUN python3 -m venv /opt/venv && \ 15 | python3 -m pip install pip==19.2.3 pip-tools==4.1.0 && \ 16 | python3 -m piptools sync 17 | 18 | RUN python3 -m pip install https://download.pytorch.org/whl/cpu/torch-1.1.0-cp36-cp36m-linux_x86_64.whl && \ 19 | python3 -m pip install https://download.pytorch.org/whl/cpu/torchvision-0.3.0-cp36-cp36m-linux_x86_64.whl 20 | 21 | RUN python3 -c "from torchvision.models import resnet50; resnet50(True)" 22 | 23 | RUN wget -q https://github.com/libjpeg-turbo/libjpeg-turbo/archive/2.0.3.tar.gz -O libjpeg-turbo.tar.gz && \ 24 | echo "a69598bf079463b34d45ca7268462a18b6507fdaa62bb1dfd212f02041499b5d libjpeg-turbo.tar.gz" | sha256sum -c && \ 25 | tar xf libjpeg-turbo.tar.gz && \ 26 | rm libjpeg-turbo.tar.gz && \ 27 | cd libjpeg-turbo* && \ 28 | mkdir build && \ 29 | cd build && \ 30 | cmake -DCMAKE_BUILD_TYPE=Release -DREQUIRE_SIMD=On -DCMAKE_INSTALL_PREFIX=/usr/local .. && \ 31 | make -j $(nproc) && \ 32 | make install && \ 33 | ldconfig && \ 34 | cd ../../ && \ 35 | rm -rf libjpeg-turbo* 36 | 37 | RUN python3 -m pip uninstall -y pillow && \ 38 | python3 -m pip install --no-binary :all: --compile pillow-simd==6.0.0.post0 39 | 40 | COPY . . 41 | 42 | ENTRYPOINT ["/usr/src/app/rs"] 43 | CMD ["-h"] 44 | -------------------------------------------------------------------------------- /docker/Dockerfile.gpu: -------------------------------------------------------------------------------- 1 | FROM nvidia/cuda:10.1-cudnn7-runtime 2 | 3 | WORKDIR /usr/src/app 4 | 5 | ENV LANG="C.UTF-8" LC_ALL="C.UTF-8" PATH="/opt/venv/bin:$PATH" PIP_NO_CACHE_DIR="false" CFLAGS="-mavx2" 6 | 7 | RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ 8 | python3 python3-pip python3-venv libspatialindex-c4v5 libglib2.0-0 \ 9 | wget gcc yasm cmake make python3-dev zlib1g-dev libwebp-dev && \ 10 | rm -rf /var/lib/apt/lists/* 11 | 12 | COPY requirements.txt . 13 | 14 | RUN python3 -m venv /opt/venv && \ 15 | python3 -m pip install pip==19.2.3 pip-tools==4.1.0 && \ 16 | python3 -m piptools sync 17 | 18 | RUN python3 -m pip install https://download.pytorch.org/whl/cu100/torch-1.1.0-cp36-cp36m-linux_x86_64.whl && \ 19 | python3 -m pip install https://download.pytorch.org/whl/cu100/torchvision-0.3.0-cp36-cp36m-linux_x86_64.whl 20 | 21 | RUN python3 -c "from torchvision.models import resnet50; resnet50(True)" 22 | 23 | RUN wget -q https://github.com/libjpeg-turbo/libjpeg-turbo/archive/2.0.3.tar.gz -O libjpeg-turbo.tar.gz && \ 24 | echo "a69598bf079463b34d45ca7268462a18b6507fdaa62bb1dfd212f02041499b5d libjpeg-turbo.tar.gz" | sha256sum -c && \ 25 | tar xf libjpeg-turbo.tar.gz && \ 26 | rm libjpeg-turbo.tar.gz && \ 27 | cd libjpeg-turbo* && \ 28 | mkdir build && \ 29 | cd build && \ 30 | cmake -DCMAKE_BUILD_TYPE=Release -DREQUIRE_SIMD=On -DCMAKE_INSTALL_PREFIX=/usr/local .. && \ 31 | make -j $(nproc) && \ 32 | make install && \ 33 | ldconfig && \ 34 | cd ../../ && \ 35 | rm -rf libjpeg-turbo* 36 | 37 | RUN python3 -m pip uninstall -y pillow && \ 38 | python3 -m pip install --no-binary :all: --compile pillow-simd==6.0.0.post0 39 | 40 | COPY . . 41 | 42 | ENTRYPOINT ["/usr/src/app/rs"] 43 | CMD ["-h"] 44 | -------------------------------------------------------------------------------- /robosat/tools/features.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | 3 | import numpy as np 4 | 5 | from PIL import Image 6 | from tqdm import tqdm 7 | 8 | from robosat.tiles import tiles_from_slippy_map 9 | from robosat.config import load_config 10 | 11 | from robosat.features.parking import ParkingHandler 12 | 13 | 14 | # Register post-processing handlers here; they need to support a `apply(tile, mask)` function 15 | # for handling one mask and a `save(path)` function for GeoJSON serialization to a file. 16 | handlers = {"parking": ParkingHandler} 17 | 18 | 19 | def add_parser(subparser): 20 | parser = subparser.add_parser( 21 | "features", 22 | help="extracts simplified GeoJSON features from segmentation masks", 23 | formatter_class=argparse.ArgumentDefaultsHelpFormatter, 24 | ) 25 | 26 | parser.add_argument("masks", type=str, help="slippy map directory with segmentation masks") 27 | parser.add_argument("--type", type=str, required=True, choices=handlers.keys(), help="type of feature to extract") 28 | parser.add_argument("--dataset", type=str, required=True, help="path to dataset configuration file") 29 | parser.add_argument("out", type=str, help="path to GeoJSON file to store features in") 30 | 31 | parser.set_defaults(func=main) 32 | 33 | 34 | def main(args): 35 | dataset = load_config(args.dataset) 36 | 37 | labels = dataset["common"]["classes"] 38 | assert set(labels).issuperset(set(handlers.keys())), "handlers have a class label" 39 | index = labels.index(args.type) 40 | 41 | handler = handlers[args.type]() 42 | 43 | tiles = list(tiles_from_slippy_map(args.masks)) 44 | 45 | for tile, path in tqdm(tiles, ascii=True, unit="mask"): 46 | image = np.array(Image.open(path).convert("P"), dtype=np.uint8) 47 | mask = (image == index).astype(np.uint8) 48 | 49 | handler.apply(tile, mask) 50 | 51 | handler.save(args.out) 52 | -------------------------------------------------------------------------------- /tests/test_datasets.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import torch 4 | from robosat.transforms import JointCompose, JointTransform, ImageToTensor, MaskToTensor 5 | import mercantile 6 | 7 | from robosat.datasets import SlippyMapTiles, SlippyMapTilesConcatenation 8 | 9 | 10 | class TestSlippyMapTiles(unittest.TestCase): 11 | 12 | images = "tests/fixtures/images/" 13 | 14 | def test_len(self): 15 | dataset = SlippyMapTiles(TestSlippyMapTiles.images) 16 | self.assertEqual(len(dataset), 3) 17 | 18 | def test_getitem(self): 19 | dataset = SlippyMapTiles(TestSlippyMapTiles.images) 20 | image, tile = dataset[0] 21 | 22 | assert tile == mercantile.Tile(69105, 105093, 18) 23 | # Inspired by: https://github.com/python-pillow/Pillow/blob/master/Tests/test_image.py#L37-L38 24 | self.assertEqual(repr(image)[:45], " 0, "dataset with masks must not be empty" 48 | 49 | # Class weighting scheme `w = 1 / ln(c + p)` see: 50 | # - https://arxiv.org/abs/1707.03718 51 | # LinkNet: Exploiting Encoder Representations for Efficient Semantic Segmentation 52 | # - https://arxiv.org/abs/1606.02147 53 | # ENet: A Deep Neural Network Architecture for Real-Time Semantic Segmentation 54 | 55 | probs = counts / n 56 | weights = 1 / np.log(1.02 + probs) 57 | 58 | weights.round(6, out=weights) 59 | print(weights.tolist()) 60 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: generic 2 | sudo: required 3 | dist: trusty 4 | 5 | services: 6 | - docker 7 | 8 | matrix: 9 | fast_finish: true 10 | 11 | include: 12 | - os: linux 13 | compiler: 'python3.6' 14 | addons: 15 | apt: 16 | sources: ['deadsnakes'] 17 | packages: ['python3.6', 'python3.6-venv', 'libspatialindex-dev', 'libglib2.0-0'] 18 | 19 | before_install: 20 | - curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add - 21 | - sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" 22 | - sudo apt-get update && sudo apt-get -y install docker-ce 23 | - sudo update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.6 10 24 | - python3 -m venv .env && . .env/bin/activate 25 | - python3 -m pip install pip==19.1.1 pip-tools==3.7.0 26 | - python3 -m piptools sync 27 | - python3 -m pip install https://download.pytorch.org/whl/cpu/torch-1.1.0-cp36-cp36m-linux_x86_64.whl 28 | - python3 -m pip install https://download.pytorch.org/whl/cpu/torchvision-0.3.0-cp36-cp36m-linux_x86_64.whl 29 | 30 | script: 31 | - python3 -m pytest 32 | - python3 -m pip install flake8==3.5.0 33 | - flake8 robosat 34 | - python3 -m pip install black==18.6b4 35 | - black . --exclude .env --check --line-length 120 36 | 37 | after_success: 38 | - | 39 | if [[ $TRAVIS_BRANCH == "master" ]]; then 40 | docker build -t mapbox/robosat:latest-cpu -f docker/Dockerfile.cpu . 41 | docker build -t mapbox/robosat:latest-gpu -f docker/Dockerfile.gpu . 42 | 43 | docker login -u $DOCKER_USERNAME -p $DOCKER_PASSWORD 44 | 45 | docker push mapbox/robosat:latest-cpu 46 | docker push mapbox/robosat:latest-gpu 47 | fi 48 | - | 49 | if [[ ! -z $TRAVIS_TAG ]]; then 50 | docker build -t mapbox/robosat:$TRAVIS_TAG-cpu -f docker/Dockerfile.cpu . 51 | docker build -t mapbox/robosat:$TRAVIS_TAG-gpu -f docker/Dockerfile.gpu . 52 | 53 | docker login -u $DOCKER_USERNAME -p $DOCKER_PASSWORD 54 | 55 | docker push mapbox/robosat:$TRAVIS_TAG-cpu 56 | docker push mapbox/robosat:$TRAVIS_TAG-gpu 57 | fi 58 | -------------------------------------------------------------------------------- /robosat/tools/templates/map.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | Robosat 6 | 7 | 8 | 9 | 13 | 14 | 15 | 16 | 36 | 37 | 38 | 39 | 40 |
41 |
42 | 43 | 82 | 83 | 84 | 85 | -------------------------------------------------------------------------------- /robosat/metrics.py: -------------------------------------------------------------------------------- 1 | """Metrics for segmentation. 2 | """ 3 | 4 | import torch 5 | import math 6 | import numpy as np 7 | 8 | 9 | class Metrics: 10 | """Tracking mean metrics 11 | """ 12 | 13 | def __init__(self, labels): 14 | """Creates an new `Metrics` instance. 15 | 16 | Args: 17 | labels: the labels for all classes. 18 | """ 19 | 20 | self.labels = labels 21 | 22 | self.tn = 0 23 | self.fn = 0 24 | self.fp = 0 25 | self.tp = 0 26 | 27 | def add(self, actual, predicted): 28 | """Adds an observation to the tracker. 29 | 30 | Args: 31 | actual: the ground truth labels. 32 | predicted: the predicted labels. 33 | """ 34 | 35 | masks = torch.argmax(predicted, 0) 36 | confusion = masks.view(-1).float() / actual.view(-1).float() 37 | 38 | self.tn += torch.sum(torch.isnan(confusion)).item() 39 | self.fn += torch.sum(confusion == float("inf")).item() 40 | self.fp += torch.sum(confusion == 0).item() 41 | self.tp += torch.sum(confusion == 1).item() 42 | 43 | def get_miou(self): 44 | """Retrieves the mean Intersection over Union score. 45 | 46 | Returns: 47 | The mean Intersection over Union score for all observations seen so far. 48 | """ 49 | try: 50 | miou = np.nanmean([self.tn / (self.tn + self.fn + self.fp), self.tp / (self.tp + self.fn + self.fp)]) 51 | except ZeroDivisionError: 52 | miou = float("NaN") 53 | 54 | return miou 55 | 56 | def get_fg_iou(self): 57 | """Retrieves the foreground Intersection over Union score. 58 | 59 | Returns: 60 | The foreground Intersection over Union score for all observations seen so far. 61 | """ 62 | 63 | try: 64 | iou = self.tp / (self.tp + self.fn + self.fp) 65 | except ZeroDivisionError: 66 | iou = float("NaN") 67 | 68 | return iou 69 | 70 | def get_mcc(self): 71 | """Retrieves the Matthew's Coefficient Correlation score. 72 | 73 | Returns: 74 | The Matthew's Coefficient Correlation score for all observations seen so far. 75 | """ 76 | 77 | try: 78 | mcc = (self.tp * self.tn - self.fp * self.fn) / math.sqrt( 79 | (self.tp + self.fp) * (self.tp + self.fn) * (self.tn + self.fp) * (self.tn + self.fn) 80 | ) 81 | except ZeroDivisionError: 82 | mcc = float("NaN") 83 | 84 | return mcc 85 | 86 | 87 | # Todo: 88 | # - Rewrite mIoU to handle N classes (and not only binary SemSeg) 89 | -------------------------------------------------------------------------------- /robosat/tools/dedupe.py: -------------------------------------------------------------------------------- 1 | import json 2 | import argparse 3 | import functools 4 | 5 | import geojson 6 | from tqdm import tqdm 7 | 8 | import shapely.geometry 9 | 10 | from robosat.spatial.core import make_index, iou 11 | 12 | 13 | def add_parser(subparser): 14 | parser = subparser.add_parser( 15 | "dedupe", 16 | help="deduplicates features against OpenStreetMap", 17 | formatter_class=argparse.ArgumentDefaultsHelpFormatter, 18 | ) 19 | 20 | parser.add_argument("osm", type=str, help="ground truth GeoJSON feature collection from OpenStreetMap") 21 | parser.add_argument("predicted", type=str, help="predicted GeoJSON feature collection to deduplicate") 22 | parser.add_argument( 23 | "--threshold", type=float, required=True, help="maximum allowed IoU to keep predictions, between 0.0 and 1.0" 24 | ) 25 | parser.add_argument("out", type=str, help="path to GeoJSON to save deduplicated features to") 26 | 27 | parser.set_defaults(func=main) 28 | 29 | 30 | def main(args): 31 | with open(args.osm) as fp: 32 | osm = json.load(fp) 33 | 34 | # Todo: at the moment we load all OSM shapes. It would be more efficient to tile 35 | # cover and load only OSM shapes in the tiles covering the predicted shapes. 36 | osm_shapes = [shapely.geometry.shape(feature["geometry"]) for feature in osm["features"]] 37 | del osm 38 | 39 | with open(args.predicted) as fp: 40 | predicted = json.load(fp) 41 | 42 | predicted_shapes = [shapely.geometry.shape(features["geometry"]) for features in predicted["features"]] 43 | del predicted 44 | 45 | idx = make_index(osm_shapes) 46 | features = [] 47 | 48 | for predicted_shape in tqdm(predicted_shapes, desc="Deduplicating", unit="shapes", ascii=True): 49 | nearby = [osm_shapes[i] for i in idx.intersection(predicted_shape.bounds, objects=False)] 50 | 51 | keep = False 52 | 53 | if not nearby: 54 | keep = True 55 | else: 56 | intersecting = [shape for shape in nearby if predicted_shape.intersects(shape)] 57 | 58 | if not intersecting: 59 | keep = True 60 | else: 61 | intersecting_shapes = functools.reduce(lambda lhs, rhs: lhs.union(rhs), intersecting) 62 | 63 | if iou(predicted_shape, intersecting_shapes) < args.threshold: 64 | keep = True 65 | 66 | if keep: 67 | feature = geojson.Feature(geometry=shapely.geometry.mapping(predicted_shape)) 68 | features.append(feature) 69 | 70 | collection = geojson.FeatureCollection(features) 71 | 72 | with open(args.out, "w") as fp: 73 | geojson.dump(collection, fp) 74 | -------------------------------------------------------------------------------- /robosat/colors.py: -------------------------------------------------------------------------------- 1 | """Color handling, color maps, color palettes. 2 | """ 3 | 4 | import colorsys 5 | 6 | from enum import Enum, unique 7 | 8 | 9 | # Todo: user should be able to bring her own color palette. 10 | # Functions need to account for that and not use one palette. 11 | 12 | 13 | def _rgb(v): 14 | r, g, b = v[1:3], v[3:5], v[5:7] 15 | return int(r, 16), int(g, 16), int(b, 16) 16 | 17 | 18 | @unique 19 | class Mapbox(Enum): 20 | """Mapbox-themed colors. 21 | 22 | See: https://www.mapbox.com/base/styling/color/ 23 | """ 24 | 25 | dark = _rgb("#404040") 26 | gray = _rgb("#eeeeee") 27 | light = _rgb("#f8f8f8") 28 | white = _rgb("#ffffff") 29 | cyan = _rgb("#3bb2d0") 30 | blue = _rgb("#3887be") 31 | bluedark = _rgb("#223b53") 32 | denim = _rgb("#50667f") 33 | navy = _rgb("#28353d") 34 | navydark = _rgb("#222b30") 35 | purple = _rgb("#8a8acb") 36 | teal = _rgb("#41afa5") 37 | green = _rgb("#56b881") 38 | yellow = _rgb("#f1f075") 39 | mustard = _rgb("#fbb03b") 40 | orange = _rgb("#f9886c") 41 | red = _rgb("#e55e5e") 42 | pink = _rgb("#ed6498") 43 | 44 | 45 | def make_palette(*colors): 46 | """Builds a PIL-compatible color palette from color names. 47 | 48 | Args: 49 | colors: variable number of color names. 50 | """ 51 | 52 | rgbs = [Mapbox[color].value for color in colors] 53 | flattened = sum(rgbs, ()) 54 | return list(flattened) 55 | 56 | 57 | def color_string_to_rgb(color): 58 | """Convert color string to a list of RBG integers. 59 | 60 | Args: 61 | color: the string color value for example "250,0,0" 62 | 63 | Returns: 64 | color: as a list of RGB integers for example [250,0,0] 65 | """ 66 | 67 | return [*map(int, color.split(","))] 68 | 69 | 70 | def continuous_palette_for_color(color, bins=256): 71 | """Creates a continuous color palette based on a single color. 72 | 73 | Args: 74 | color: the rgb color tuple to create a continuous palette for. 75 | bins: the number of colors to create in the continuous palette. 76 | 77 | Returns: 78 | The continuous rgb color palette with 3*bins values represented as [r0,g0,b0,r1,g1,b1,..] 79 | """ 80 | 81 | # A quick and dirty way to create a continuous color palette is to convert from the RGB color 82 | # space into the HSV color space and then only adapt the color's saturation (S component). 83 | 84 | r, g, b = [v / 255 for v in Mapbox[color].value] 85 | h, s, v = colorsys.rgb_to_hsv(r, g, b) 86 | 87 | palette = [] 88 | 89 | for i in range(bins): 90 | ns = (1 / bins) * (i + 1) 91 | palette.extend([int(v * 255) for v in colorsys.hsv_to_rgb(h, ns, v)]) 92 | 93 | assert len(palette) // 3 == bins 94 | 95 | return palette 96 | -------------------------------------------------------------------------------- /robosat/tools/compare.py: -------------------------------------------------------------------------------- 1 | import os 2 | import argparse 3 | 4 | from PIL import Image 5 | from tqdm import tqdm 6 | import numpy as np 7 | 8 | from robosat.tiles import tiles_from_slippy_map 9 | 10 | 11 | def add_parser(subparser): 12 | parser = subparser.add_parser( 13 | "compare", 14 | help="compare images, labels and masks side by side", 15 | formatter_class=argparse.ArgumentDefaultsHelpFormatter, 16 | ) 17 | parser.add_argument("out", type=str, help="directory to save visualizations to") 18 | parser.add_argument("images", type=str, help="directory to read slippy map images from") 19 | parser.add_argument("labels", type=str, help="directory to read slippy map labels from") 20 | parser.add_argument("masks", type=str, nargs="+", help="slippy map directories to read masks from") 21 | parser.add_argument("--minimum", type=float, default=0.0, help="minimum percentage of mask not background") 22 | parser.add_argument("--maximum", type=float, default=1.0, help="maximum percentage of mask not background") 23 | 24 | parser.set_defaults(func=main) 25 | 26 | 27 | def main(args): 28 | images = tiles_from_slippy_map(args.images) 29 | 30 | for tile, path in tqdm(list(images), desc="Compare", unit="image", ascii=True): 31 | x, y, z = list(map(str, tile)) 32 | 33 | image = Image.open(path).convert("RGB") 34 | label = Image.open(os.path.join(args.labels, z, x, "{}.png".format(y))).convert("P") 35 | assert image.size == label.size 36 | 37 | keep = False 38 | masks = [] 39 | for path in args.masks: 40 | mask = Image.open(os.path.join(path, z, x, "{}.png".format(y))).convert("P") 41 | assert image.size == mask.size 42 | masks.append(mask) 43 | 44 | # TODO: The calculation below does not work for multi-class. 45 | percentage = np.sum(np.array(mask) != 0) / np.prod(image.size) 46 | 47 | # Keep this image when percentage is within required threshold. 48 | if percentage >= args.minimum and percentage <= args.maximum: 49 | keep = True 50 | 51 | if not keep: 52 | continue 53 | 54 | width, height = image.size 55 | 56 | # Columns for image, label and all the masks. 57 | columns = 2 + len(masks) 58 | combined = Image.new(mode="RGB", size=(columns * width, height)) 59 | 60 | combined.paste(image, box=(0 * width, 0)) 61 | combined.paste(label, box=(1 * width, 0)) 62 | for i, mask in enumerate(masks): 63 | combined.paste(mask, box=((2 + i) * width, 0)) 64 | 65 | os.makedirs(os.path.join(args.out, z, x), exist_ok=True) 66 | path = os.path.join(args.out, z, x, "{}.png".format(y)) 67 | combined.save(path, optimize=True) 68 | -------------------------------------------------------------------------------- /robosat/tools/download.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import time 4 | import argparse 5 | import concurrent.futures as futures 6 | 7 | import requests 8 | from PIL import Image 9 | from tqdm import tqdm 10 | 11 | from robosat.tiles import tiles_from_csv, fetch_image 12 | 13 | 14 | def add_parser(subparser): 15 | parser = subparser.add_parser( 16 | "download", help="downloads images from Mapbox Maps API", formatter_class=argparse.ArgumentDefaultsHelpFormatter 17 | ) 18 | 19 | parser.add_argument("url", type=str, help="endpoint with {z}/{x}/{y} variables to fetch image tiles from") 20 | parser.add_argument("--ext", type=str, default="webp", help="file format to save images in") 21 | parser.add_argument("--rate", type=int, default=10, help="rate limit in max. requests per second") 22 | parser.add_argument("tiles", type=str, help="path to .csv tiles file") 23 | parser.add_argument("out", type=str, help="path to slippy map directory for storing tiles") 24 | 25 | parser.set_defaults(func=main) 26 | 27 | 28 | def main(args): 29 | tiles = list(tiles_from_csv(args.tiles)) 30 | 31 | with requests.Session() as session: 32 | num_workers = args.rate 33 | 34 | # tqdm has problems with concurrent.futures.ThreadPoolExecutor; explicitly call `.update` 35 | # https://github.com/tqdm/tqdm/issues/97 36 | progress = tqdm(total=len(tiles), ascii=True, unit="image") 37 | 38 | with futures.ThreadPoolExecutor(num_workers) as executor: 39 | 40 | def worker(tile): 41 | tick = time.monotonic() 42 | 43 | x, y, z = map(str, [tile.x, tile.y, tile.z]) 44 | 45 | os.makedirs(os.path.join(args.out, z, x), exist_ok=True) 46 | path = os.path.join(args.out, z, x, "{}.{}".format(y, args.ext)) 47 | 48 | if os.path.isfile(path): 49 | return tile, True 50 | 51 | url = args.url.format(x=tile.x, y=tile.y, z=tile.z) 52 | 53 | res = fetch_image(session, url) 54 | 55 | if not res: 56 | return tile, False 57 | 58 | try: 59 | image = Image.open(res) 60 | image.save(path, optimize=True) 61 | except OSError: 62 | return tile, False 63 | 64 | tock = time.monotonic() 65 | 66 | time_for_req = tock - tick 67 | time_per_worker = num_workers / args.rate 68 | 69 | if time_for_req < time_per_worker: 70 | time.sleep(time_per_worker - time_for_req) 71 | 72 | progress.update() 73 | 74 | return tile, True 75 | 76 | for tile, ok in executor.map(worker, tiles): 77 | if not ok: 78 | print("Warning: {} failed, skipping".format(tile), file=sys.stderr) 79 | -------------------------------------------------------------------------------- /robosat/spatial/core.py: -------------------------------------------------------------------------------- 1 | import functools 2 | 3 | import pyproj 4 | import shapely.ops 5 | 6 | from rtree.index import Index, Property 7 | 8 | 9 | def project(shape, source, target): 10 | """Projects a geometry from one coordinate system into another. 11 | 12 | Args: 13 | shape: the geometry to project. 14 | source: the source EPSG spatial reference system identifier. 15 | target: the target EPSG spatial reference system identifier. 16 | 17 | Returns: 18 | The projected geometry in the target coordinate system. 19 | """ 20 | 21 | transformer = pyproj.Transformer.from_crs(source, target) 22 | return shapely.ops.transform(transformer.transform, shape) 23 | 24 | 25 | def union(shapes): 26 | """Returns the union of all shapes. 27 | 28 | Args: 29 | shapes: the geometries to merge into one. 30 | 31 | Returns: 32 | The union of all shapes as one shape. 33 | """ 34 | 35 | assert shapes 36 | 37 | def fn(lhs, rhs): 38 | return lhs.union(rhs) 39 | 40 | return functools.reduce(fn, shapes) 41 | 42 | ea_transformer = pyproj.Transformer.from_crs("epsg:4326", "esri:54009") 43 | wgs_ellipsoid_transformer = pyproj.Transformer.from_crs("epsg:4326", "epsg:3395") 44 | ellipsoid_wgs_transformer = pyproj.Transformer.from_crs("epsg:3395", "epsg:4326") 45 | 46 | def project_ea(shape): 47 | return shapely.ops.transform(ea_transformer.transform, shape) 48 | 49 | def project_wgs_el(shape): 50 | return shapely.ops.transform(wgs_ellipsoid_transformer.transform, shape) 51 | 52 | def project_el_wgs(shape): 53 | return shapely.ops.transform(ellipsoid_wgs_transformer.transform, shape) 54 | 55 | 56 | def iou(lhs, rhs): 57 | """Calculates intersection over union metric between two shapes.. 58 | 59 | Args: 60 | lhs: first shape for IoU calculation. 61 | rhs: second shape for IoU calculation. 62 | 63 | Returns: 64 | IoU metric in range [0, 1] 65 | """ 66 | 67 | # equal-area projection for comparing shape areas 68 | lhs = project_ea(lhs) 69 | rhs = project_ea(rhs) 70 | 71 | intersection = lhs.intersection(rhs) 72 | union = lhs.union(rhs) 73 | 74 | rv = intersection.area / union.area 75 | assert 0 <= rv <= 1 76 | 77 | return rv 78 | 79 | 80 | def make_index(shapes): 81 | """Creates an index for fast and efficient spatial queries. 82 | 83 | Args: 84 | shapes: shapely shapes to bulk-insert bounding boxes for into the spatial index. 85 | 86 | Returns: 87 | The spatial index created from the shape's bounding boxes. 88 | """ 89 | 90 | # Todo: benchmark these for our use-cases 91 | prop = Property() 92 | prop.dimension = 2 93 | prop.leaf_capacity = 1000 94 | prop.fill_factor = 0.9 95 | 96 | def bounded(): 97 | for i, shape in enumerate(shapes): 98 | yield (i, shape.bounds, None) 99 | 100 | return Index(bounded(), properties=prop) 101 | -------------------------------------------------------------------------------- /robosat/graph/core.py: -------------------------------------------------------------------------------- 1 | import collections 2 | 3 | 4 | class UndirectedGraph: 5 | """Simple undirected graph. 6 | 7 | Note: stores edges; can not store vertices without edges. 8 | """ 9 | 10 | def __init__(self): 11 | """Creates an empty `UndirectedGraph` instance. 12 | """ 13 | 14 | # Todo: We might need a compressed sparse row graph (i.e. adjacency array) 15 | # to make this scale. Let's circle back when we run into this limitation. 16 | self.edges = collections.defaultdict(set) 17 | 18 | def add_edge(self, s, t): 19 | """Adds an edge to the graph. 20 | 21 | Args: 22 | s: the source vertex. 23 | t: the target vertex. 24 | 25 | Note: because this is an undirected graph for every edge `s, t` an edge `t, s` is added. 26 | """ 27 | 28 | self.edges[s].add(t) 29 | self.edges[t].add(s) 30 | 31 | def targets(self, v): 32 | """Returns all outgoing targets for a vertex. 33 | 34 | Args: 35 | v: the vertex to return targets for. 36 | 37 | Returns: 38 | A list of all outgoing targets for the vertex. 39 | """ 40 | 41 | return self.edges[v] 42 | 43 | def vertices(self): 44 | """Returns all vertices in the graph. 45 | 46 | Returns: 47 | A set of all vertices in the graph. 48 | """ 49 | 50 | return self.edges.keys() 51 | 52 | def empty(self): 53 | """Returns true if the graph is empty, false otherwise. 54 | 55 | Returns: 56 | True if the graph has no edges or vertices, false otherwise. 57 | """ 58 | return len(self.edges) == 0 59 | 60 | def dfs(self, v): 61 | """Applies a depth-first search to the graph. 62 | 63 | Args: 64 | v: the vertex to start the depth-first search at. 65 | 66 | Yields: 67 | The visited graph vertices in depth-first search order. 68 | 69 | Note: does not include the start vertex `v` (except if an edge targets it). 70 | """ 71 | 72 | stack = [] 73 | stack.append(v) 74 | 75 | seen = set() 76 | 77 | while stack: 78 | s = stack.pop() 79 | 80 | if s not in seen: 81 | seen.add(s) 82 | 83 | for t in self.targets(s): 84 | stack.append(t) 85 | 86 | yield s 87 | 88 | def components(self): 89 | """Computes connected components for the graph. 90 | 91 | Yields: 92 | The connected component sub-graphs consisting of vertices; in no particular order. 93 | """ 94 | 95 | seen = set() 96 | 97 | for v in self.vertices(): 98 | if v not in seen: 99 | component = set(self.dfs(v)) 100 | component.add(v) 101 | 102 | seen.update(component) 103 | 104 | yield component 105 | -------------------------------------------------------------------------------- /robosat/tools/masks.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import argparse 4 | 5 | import numpy as np 6 | 7 | from tqdm import tqdm 8 | from PIL import Image 9 | 10 | from robosat.tiles import tiles_from_slippy_map 11 | from robosat.colors import make_palette 12 | 13 | 14 | def add_parser(subparser): 15 | parser = subparser.add_parser( 16 | "masks", 17 | help="compute masks from prediction probabilities", 18 | formatter_class=argparse.ArgumentDefaultsHelpFormatter, 19 | ) 20 | 21 | parser.add_argument("masks", type=str, help="slippy map directory to save masks to") 22 | parser.add_argument("probs", type=str, nargs="+", help="slippy map directories with class probabilities") 23 | parser.add_argument("--weights", type=float, nargs="+", help="weights for weighted average soft-voting") 24 | 25 | parser.set_defaults(func=main) 26 | 27 | 28 | def main(args): 29 | if args.weights and len(args.probs) != len(args.weights): 30 | sys.exit("Error: number of slippy map directories and weights must be the same") 31 | 32 | tilesets = map(tiles_from_slippy_map, args.probs) 33 | 34 | for tileset in tqdm(list(zip(*tilesets)), desc="Masks", unit="tile", ascii=True): 35 | tiles = [tile for tile, _ in tileset] 36 | paths = [path for _, path in tileset] 37 | 38 | assert len(set(tiles)), "tilesets in sync" 39 | x, y, z = tiles[0] 40 | 41 | # Un-quantize the probabilities in [0,255] to floating point values in [0,1] 42 | anchors = np.linspace(0, 1, 256) 43 | 44 | def load(path): 45 | # Note: assumes binary case and probability sums up to one. 46 | # Needs to be in sync with how we store them in prediction. 47 | 48 | quantized = np.array(Image.open(path).convert("P")) 49 | 50 | # (512, 512, 1) -> (1, 512, 512) 51 | foreground = np.rollaxis(np.expand_dims(anchors[quantized], axis=0), axis=0) 52 | background = np.rollaxis(1. - foreground, axis=0) 53 | 54 | # (1, 512, 512) + (1, 512, 512) -> (2, 512, 512) 55 | return np.concatenate((background, foreground), axis=0) 56 | 57 | probs = [load(path) for path in paths] 58 | 59 | mask = softvote(probs, axis=0, weights=args.weights) 60 | mask = mask.astype(np.uint8) 61 | 62 | palette = make_palette("denim", "orange") 63 | out = Image.fromarray(mask, mode="P") 64 | out.putpalette(palette) 65 | 66 | os.makedirs(os.path.join(args.masks, str(z), str(x)), exist_ok=True) 67 | 68 | path = os.path.join(args.masks, str(z), str(x), str(y) + ".png") 69 | out.save(path, optimize=True) 70 | 71 | 72 | def softvote(probs, axis=0, weights=None): 73 | """Weighted average soft-voting to transform class probabilities into class indices. 74 | 75 | Args: 76 | probs: array-like probabilities to average. 77 | axis: axis or axes along which to soft-vote. 78 | weights: array-like for weighting probabilities. 79 | 80 | Notes: 81 | See http://scikit-learn.org/stable/modules/ensemble.html#weighted-average-probabilities-soft-voting 82 | """ 83 | 84 | return np.argmax(np.average(probs, axis=axis, weights=weights), axis=axis) 85 | -------------------------------------------------------------------------------- /robosat/tools/merge.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import argparse 3 | 4 | import geojson 5 | 6 | from tqdm import tqdm 7 | import shapely.geometry 8 | 9 | from robosat.spatial.core import make_index, union, project_ea, project_wgs_el, project_el_wgs 10 | from robosat.graph.core import UndirectedGraph 11 | 12 | 13 | def add_parser(subparser): 14 | parser = subparser.add_parser( 15 | "merge", help="merged adjacent GeoJSON features", formatter_class=argparse.ArgumentDefaultsHelpFormatter 16 | ) 17 | 18 | parser.add_argument("features", type=str, help="GeoJSON file to read features from") 19 | parser.add_argument("--threshold", type=int, required=True, help="minimum distance to adjacent features, in m") 20 | parser.add_argument("out", type=str, help="path to GeoJSON to save merged features to") 21 | 22 | parser.set_defaults(func=main) 23 | 24 | 25 | def main(args): 26 | with open(args.features) as fp: 27 | collection = geojson.load(fp) 28 | 29 | shapes = [shapely.geometry.shape(feature["geometry"]) for feature in collection["features"]] 30 | del collection 31 | 32 | graph = UndirectedGraph() 33 | idx = make_index(shapes) 34 | 35 | def buffered(shape, args): 36 | projected = project_wgs_el(shape) 37 | buffered = projected.buffer(args.threshold) 38 | unprojected = project_el_wgs(buffered) 39 | return unprojected 40 | 41 | def unbuffered(shape,args): 42 | projected = project_wgs_el(shape) 43 | unbuffered = projected.buffer(-1 * args.threshold) 44 | unprojected = project_el_wgs(unbuffered) 45 | return unprojected 46 | 47 | for i, shape in enumerate(tqdm(shapes, desc="Building graph", unit="shapes", ascii=True)): 48 | embiggened = buffered(shape, args) 49 | 50 | graph.add_edge(i, i) 51 | 52 | nearest = [j for j in idx.intersection(embiggened.bounds, objects=False) if i != j] 53 | 54 | for t in nearest: 55 | if embiggened.intersects(shapes[t]): 56 | graph.add_edge(i, t) 57 | 58 | components = list(graph.components()) 59 | assert sum([len(v) for v in components]) == len(shapes), "components capture all shape indices" 60 | 61 | features = [] 62 | 63 | for component in tqdm(components, desc="Merging components", unit="component", ascii=True): 64 | embiggened = [buffered(shapes[v], args) for v in component] 65 | merged = unbuffered(union(embiggened), args) 66 | 67 | if merged.is_valid: 68 | # Orient exterior ring of the polygon in counter-clockwise direction. 69 | if isinstance(merged, shapely.geometry.polygon.Polygon): 70 | merged = shapely.geometry.polygon.orient(merged, sign=1.0) 71 | elif isinstance(merged, shapely.geometry.multipolygon.MultiPolygon): 72 | merged = [shapely.geometry.polygon.orient(geom, sign=1.0) for geom in merged.geoms] 73 | merged = shapely.geometry.MultiPolygon(merged) 74 | else: 75 | print("Warning: merged feature is neither Polygon nor MultiPoylgon, skipping", file=sys.stderr) 76 | continue 77 | 78 | # equal-area projection; round to full m^2, we're not that precise anyway 79 | area = int(round(project_ea(merged).area)) 80 | 81 | feature = geojson.Feature(geometry=shapely.geometry.mapping(merged), properties={"area": area}) 82 | features.append(feature) 83 | else: 84 | print("Warning: merged feature is not valid, skipping", file=sys.stderr) 85 | 86 | collection = geojson.FeatureCollection(features) 87 | 88 | with open(args.out, "w") as fp: 89 | geojson.dump(collection, fp) 90 | -------------------------------------------------------------------------------- /robosat/losses.py: -------------------------------------------------------------------------------- 1 | """PyTorch-compatible losses and loss functions. 2 | """ 3 | 4 | import torch 5 | import torch.nn as nn 6 | 7 | 8 | class CrossEntropyLoss2d(nn.Module): 9 | """Cross-entropy. 10 | 11 | See: http://cs231n.github.io/neural-networks-2/#losses 12 | """ 13 | 14 | def __init__(self, weight=None): 15 | """Creates an `CrossEntropyLoss2d` instance. 16 | 17 | Args: 18 | weight: rescaling weight for each class. 19 | """ 20 | 21 | super().__init__() 22 | self.nll_loss = nn.NLLLoss(weight) 23 | 24 | def forward(self, inputs, targets): 25 | return self.nll_loss(nn.functional.log_softmax(inputs, dim=1), targets) 26 | 27 | 28 | class FocalLoss2d(nn.Module): 29 | """Focal Loss. 30 | 31 | Reduces loss for well-classified samples putting focus on hard mis-classified samples. 32 | 33 | See: https://arxiv.org/abs/1708.02002 34 | """ 35 | 36 | def __init__(self, gamma=2, weight=None): 37 | """Creates a `FocalLoss2d` instance. 38 | 39 | Args: 40 | gamma: the focusing parameter; if zero this loss is equivalent with `CrossEntropyLoss2d`. 41 | weight: rescaling weight for each class. 42 | """ 43 | 44 | super().__init__() 45 | self.nll_loss = nn.NLLLoss(weight) 46 | self.gamma = gamma 47 | 48 | def forward(self, inputs, targets): 49 | penalty = (1 - nn.functional.softmax(inputs, dim=1)) ** self.gamma 50 | return self.nll_loss(penalty * nn.functional.log_softmax(inputs, dim=1), targets) 51 | 52 | 53 | class mIoULoss2d(nn.Module): 54 | """mIoU Loss. 55 | 56 | See: 57 | - http://www.cs.umanitoba.ca/~ywang/papers/isvc16.pdf 58 | - http://www.cs.toronto.edu/~wenjie/papers/iccv17/mattyus_etal_iccv17.pdf 59 | """ 60 | 61 | def __init__(self, weight=None): 62 | """Creates a `mIoULoss2d` instance. 63 | 64 | Args: 65 | weight: rescaling weight for each class. 66 | """ 67 | 68 | super().__init__() 69 | self.nll_loss = nn.NLLLoss(weight) 70 | 71 | def forward(self, inputs, targets): 72 | 73 | N, C, H, W = inputs.size() 74 | 75 | softs = nn.functional.softmax(inputs, dim=1).permute(1, 0, 2, 3) 76 | masks = torch.zeros(N, C, H, W).to(targets.device).scatter_(1, targets.view(N, 1, H, W), 1).permute(1, 0, 2, 3) 77 | 78 | inters = softs * masks 79 | unions = (softs + masks) - (softs * masks) 80 | 81 | miou = 1. - (inters.view(C, N, -1).sum(2) / unions.view(C, N, -1).sum(2)).mean() 82 | 83 | return max(miou, self.nll_loss(nn.functional.log_softmax(inputs, dim=1), targets)) 84 | 85 | 86 | class LovaszLoss2d(nn.Module): 87 | """Lovasz Loss. 88 | 89 | See: https://arxiv.org/abs/1705.08790 90 | """ 91 | 92 | def __init__(self): 93 | """Creates a `LovaszLoss2d` instance.""" 94 | super().__init__() 95 | 96 | def forward(self, inputs, targets): 97 | 98 | N, C, H, W = inputs.size() 99 | masks = torch.zeros(N, C, H, W).to(targets.device).scatter_(1, targets.view(N, 1, H, W), 1) 100 | 101 | loss = 0. 102 | 103 | for mask, input in zip(masks.view(N, -1), inputs.view(N, -1)): 104 | 105 | max_margin_errors = 1. - ((mask * 2 - 1) * input) 106 | errors_sorted, indices = torch.sort(max_margin_errors, descending=True) 107 | labels_sorted = mask[indices.data] 108 | 109 | inter = labels_sorted.sum() - labels_sorted.cumsum(0) 110 | union = labels_sorted.sum() + (1. - labels_sorted).cumsum(0) 111 | iou = 1. - inter / union 112 | 113 | p = len(labels_sorted) 114 | if p > 1: 115 | iou[1:p] = iou[1:p] - iou[0:-1] 116 | 117 | loss += torch.dot(nn.functional.relu(errors_sorted), iou) 118 | 119 | return loss / N 120 | -------------------------------------------------------------------------------- /robosat/features/parking.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import collections 3 | 4 | import geojson 5 | 6 | import shapely.geometry 7 | 8 | from robosat.features.core import denoise, grow, contours, simplify, featurize, parents_in_hierarchy 9 | 10 | 11 | class ParkingHandler: 12 | kernel_size_denoise = 20 13 | kernel_size_grow = 20 14 | simplify_threshold = 0.01 15 | 16 | def __init__(self): 17 | self.features = [] 18 | 19 | def apply(self, tile, mask): 20 | if tile.z != 18: 21 | raise NotImplementedError("Parking lot post-processing thresholds are tuned for z18") 22 | 23 | # The post-processing pipeline removes noise and fills in smaller holes. We then 24 | # extract contours, simplify them and transform tile pixels into coordinates. 25 | 26 | denoised = denoise(mask, self.kernel_size_denoise) 27 | grown = grow(denoised, self.kernel_size_grow) 28 | 29 | # Contours have a hierarchy: for example an outer ring, and an inner ring for a polygon with a hole. 30 | # 31 | # The ith hierarchy entry is a tuple with (next, prev, fst child, parent) for the ith polygon with: 32 | # - next is the index into the polygons for the next polygon on the same hierarchy level 33 | # - prev is the index into the polygons for the previous polygon on the same hierarchy level 34 | # - fst child is the index into the polygons for the ith polygon's first child polygon 35 | # - parent is the index into the polygons for the ith polygon's single parent polygon 36 | # 37 | # In case of non-existend indices their index value is -1. 38 | 39 | multipolygons, hierarchy = contours(grown) 40 | 41 | if hierarchy is None: 42 | return 43 | 44 | # In the following we re-construct the hierarchy walking from polygons up to the top-most polygon. 45 | # We then crete a GeoJSON polygon with a single outer ring and potentially multiple inner rings. 46 | # 47 | # Note: we currently do not handle multipolygons which are nested even deeper. 48 | 49 | # This seems to be a bug in the OpenCV Python bindings; the C++ interface 50 | # returns a vector but here it's always wrapped in an extra list. 51 | assert len(hierarchy) == 1, "always single hierarchy for all polygons in multipolygon" 52 | hierarchy = hierarchy[0] 53 | 54 | assert len(multipolygons) == len(hierarchy), "polygons and hierarchy in sync" 55 | 56 | polygons = [simplify(polygon, self.simplify_threshold) for polygon in multipolygons] 57 | 58 | # Todo: generalize and move to features.core 59 | 60 | # All child ids in hierarchy tree, keyed by root id. 61 | features = collections.defaultdict(set) 62 | 63 | for i, (polygon, node) in enumerate(zip(polygons, hierarchy)): 64 | if len(polygon) < 3: 65 | print("Warning: simplified feature no longer valid polygon, skipping", file=sys.stderr) 66 | continue 67 | 68 | _, _, _, parent_idx = node 69 | 70 | ancestors = list(parents_in_hierarchy(i, hierarchy)) 71 | 72 | # Only handles polygons with a nesting of two levels for now => no multipolygons. 73 | if len(ancestors) > 1: 74 | print("Warning: polygon ring nesting level too deep, skipping", file=sys.stderr) 75 | continue 76 | 77 | # A single mapping: i => {i} implies single free-standing polygon, no inner rings. 78 | # Otherwise: i => {i, j, k, l} implies: outer ring i, inner rings j, k, l. 79 | root = ancestors[-1] if ancestors else i 80 | 81 | features[root].add(i) 82 | 83 | for outer, inner in features.items(): 84 | rings = [featurize(tile, polygons[outer], mask.shape[:2])] 85 | 86 | # In mapping i => {i, ..} i is not a child. 87 | children = inner.difference(set([outer])) 88 | 89 | for child in children: 90 | rings.append(featurize(tile, polygons[child], mask.shape[:2])) 91 | 92 | assert 0 < len(rings), "at least one outer ring in a polygon" 93 | 94 | geometry = geojson.Polygon(rings) 95 | shape = shapely.geometry.shape(geometry) 96 | 97 | if shape.is_valid: 98 | self.features.append(geojson.Feature(geometry=geometry)) 99 | else: 100 | print("Warning: extracted feature is not valid, skipping", file=sys.stderr) 101 | 102 | def save(self, out): 103 | collection = geojson.FeatureCollection(self.features) 104 | 105 | with open(out, "w") as fp: 106 | geojson.dump(collection, fp) 107 | -------------------------------------------------------------------------------- /robosat/features/core.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | from PIL import Image 4 | 5 | from robosat.tiles import pixel_to_location 6 | 7 | 8 | def visualize(mask, path): 9 | """Writes a visual representation `.png` file for a binary mask. 10 | 11 | Args: 12 | mask: the binary mask to visualize. 13 | path: the path to save the `.png` image to. 14 | """ 15 | 16 | out = Image.fromarray(mask, mode="P") 17 | out.putpalette([0, 0, 0, 255, 255, 255]) 18 | out.save(path) 19 | 20 | 21 | def contours_to_mask(contours, shape): 22 | """Creates a binary mask for contours. 23 | 24 | Args: 25 | contours: the contours to create a mask for. 26 | shape: the resulting mask's shape 27 | 28 | Returns: 29 | The binary mask with rasterized contours. 30 | """ 31 | 32 | canvas = np.zeros(shape, np.uint8) 33 | cv2.drawContours(canvas, contours, contourIdx=-1, color=1) 34 | return canvas 35 | 36 | 37 | def featurize(tile, polygon, shape): 38 | """Transforms polygons in image pixel coordinates into world coordinates. 39 | 40 | Args: 41 | tile: the tile this polygon is in for coordinate calculation. 42 | polygon: the polygon to transform from pixel to world coordinates. 43 | shape: the image's max x and y coordinates. 44 | 45 | Returns: 46 | The closed polygon transformed into world coordinates. 47 | """ 48 | 49 | xmax, ymax = shape 50 | 51 | feature = [] 52 | 53 | for point in polygon: 54 | px, py = point[0] 55 | dx, dy = px / xmax, py / ymax 56 | 57 | feature.append(pixel_to_location(tile, dx, 1. - dy)) 58 | 59 | assert feature, "at least one location in polygon" 60 | feature.append(feature[0]) # polygons are closed 61 | 62 | return feature 63 | 64 | 65 | def denoise(mask, eps): 66 | """Removes noise from a mask. 67 | 68 | Args: 69 | mask: the mask to remove noise from. 70 | eps: the morphological operation's kernel size for noise removal, in pixel. 71 | 72 | Returns: 73 | The mask after applying denoising. 74 | """ 75 | 76 | struct = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (eps, eps)) 77 | return cv2.morphologyEx(mask, cv2.MORPH_OPEN, struct) 78 | 79 | 80 | def grow(mask, eps): 81 | """Grows a mask to fill in small holes, e.g. to establish connectivity. 82 | 83 | Args: 84 | mask: the mask to grow. 85 | eps: the morphological operation's kernel size for growing, in pixel. 86 | 87 | Returns: 88 | The mask after filling in small holes. 89 | """ 90 | 91 | struct = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (eps, eps)) 92 | return cv2.morphologyEx(mask, cv2.MORPH_CLOSE, struct) 93 | 94 | 95 | def contours(mask): 96 | """Extracts contours and the relationship between them from a binary mask. 97 | 98 | Args: 99 | mask: the binary mask to find contours in. 100 | 101 | Returns: 102 | The detected contours as a list of points and the contour hierarchy. 103 | 104 | Note: the hierarchy can be used to re-construct polygons with holes as one entity. 105 | """ 106 | 107 | contours, hierarchy = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) 108 | return contours, hierarchy 109 | 110 | 111 | # Todo: should work for lines, too, but then needs other epsilon criterion than arc length 112 | def simplify(polygon, eps): 113 | """Simplifies a polygon to minimize the polygon's vertices. 114 | 115 | Args: 116 | polygon: the polygon made up of a list of vertices. 117 | eps: the approximation accuracy as max. percentage of the arc length, in [0, 1] 118 | 119 | """ 120 | 121 | assert 0 <= eps <= 1, "approximation accuracy is percentage in [0, 1]" 122 | 123 | epsilon = eps * cv2.arcLength(polygon, closed=True) 124 | return cv2.approxPolyDP(polygon, epsilon=epsilon, closed=True) 125 | 126 | 127 | def parents_in_hierarchy(node, tree): 128 | """Walks a hierarchy tree upwards from a starting node collecting all nodes on the way. 129 | 130 | Args: 131 | node: the index for the starting node in the hierarchy. 132 | tree: the hierarchy tree containing tuples of (next, prev, first child, parent) ids. 133 | 134 | Yields: 135 | The node ids on the upwards path in the hierarchy tree. 136 | """ 137 | 138 | def parent(n): 139 | # next, prev, fst child, parent 140 | return n[3] 141 | 142 | at = tree[node] 143 | up = parent(at) 144 | 145 | while up != -1: 146 | index = up 147 | at = tree[index] 148 | up = parent(at) 149 | 150 | assert index != node, "upward path does not include starting node" 151 | 152 | yield index 153 | -------------------------------------------------------------------------------- /robosat/datasets.py: -------------------------------------------------------------------------------- 1 | """PyTorch-compatible datasets. 2 | 3 | Guaranteed to implement `__len__`, and `__getitem__`. 4 | 5 | See: http://pytorch.org/docs/0.3.1/data.html 6 | """ 7 | 8 | import torch 9 | from PIL import Image 10 | import torch.utils.data 11 | 12 | from robosat.tiles import tiles_from_slippy_map, buffer_tile_image 13 | 14 | 15 | # Single Slippy Map directory structure 16 | class SlippyMapTiles(torch.utils.data.Dataset): 17 | """Dataset for images stored in slippy map format. 18 | """ 19 | 20 | def __init__(self, root, transform=None): 21 | super().__init__() 22 | 23 | self.tiles = [] 24 | self.transform = transform 25 | 26 | self.tiles = [(tile, path) for tile, path in tiles_from_slippy_map(root)] 27 | self.tiles.sort(key=lambda tile: tile[0]) 28 | 29 | def __len__(self): 30 | return len(self.tiles) 31 | 32 | def __getitem__(self, i): 33 | tile, path = self.tiles[i] 34 | image = Image.open(path) 35 | 36 | if self.transform is not None: 37 | image = self.transform(image) 38 | 39 | return image, tile 40 | 41 | 42 | # Multiple Slippy Map directories. 43 | # Think: one with images, one with masks, one with rasterized traces. 44 | class SlippyMapTilesConcatenation(torch.utils.data.Dataset): 45 | """Dataset to concate multiple input images stored in slippy map format. 46 | """ 47 | 48 | def __init__(self, inputs, target, joint_transform=None): 49 | super().__init__() 50 | 51 | # No transformations in the `SlippyMapTiles` instead joint transformations in getitem 52 | self.joint_transform = joint_transform 53 | 54 | self.inputs = [SlippyMapTiles(inp) for inp in inputs] 55 | self.target = SlippyMapTiles(target) 56 | 57 | assert len(set([len(dataset) for dataset in self.inputs])) == 1, "same number of tiles in all images" 58 | assert len(self.target) == len(self.inputs[0]), "same number of tiles in images and label" 59 | 60 | def __len__(self): 61 | return len(self.target) 62 | 63 | def __getitem__(self, i): 64 | # at this point all transformations are applied and we expect to work with raw tensors 65 | inputs = [dataset[i] for dataset in self.inputs] 66 | 67 | images = [image for image, _ in inputs] 68 | tiles = [tile for _, tile in inputs] 69 | 70 | mask, mask_tile = self.target[i] 71 | 72 | assert len(set(tiles)) == 1, "all images are for the same tile" 73 | assert tiles[0] == mask_tile, "image tile is the same as label tile" 74 | 75 | if self.joint_transform is not None: 76 | images, mask = self.joint_transform(images, mask) 77 | 78 | return torch.cat(images, dim=0), mask, tiles 79 | 80 | 81 | # Todo: once we have the SlippyMapDataset this dataset should wrap 82 | # it adding buffer and unbuffer glue on top of the raw tile dataset. 83 | class BufferedSlippyMapDirectory(torch.utils.data.Dataset): 84 | """Dataset for buffered slippy map tiles with overlap. 85 | """ 86 | 87 | def __init__(self, root, transform=None, size=512, overlap=32): 88 | """ 89 | Args: 90 | root: the slippy map directory root with a `z/x/y.png` sub-structure. 91 | transform: the transformation to run on the buffered tile. 92 | size: the Slippy Map tile size in pixels 93 | overlap: the tile border to add on every side; in pixel. 94 | 95 | Note: 96 | The overlap must not span multiple tiles. 97 | 98 | Use `unbuffer` to get back the original tile. 99 | """ 100 | 101 | super().__init__() 102 | 103 | assert overlap >= 0 104 | assert size >= 256 105 | 106 | self.transform = transform 107 | self.size = size 108 | self.overlap = overlap 109 | self.tiles = list(tiles_from_slippy_map(root)) 110 | 111 | def __len__(self): 112 | return len(self.tiles) 113 | 114 | def __getitem__(self, i): 115 | tile, path = self.tiles[i] 116 | image = buffer_tile_image(tile, self.tiles, overlap=self.overlap, tile_size=self.size) 117 | 118 | if self.transform is not None: 119 | image = self.transform(image) 120 | 121 | return image, torch.IntTensor([tile.x, tile.y, tile.z]) 122 | 123 | def unbuffer(self, probs): 124 | """Removes borders from segmentation probabilities added to the original tile image. 125 | 126 | Args: 127 | probs: the segmentation probability mask to remove buffered borders. 128 | 129 | Returns: 130 | The probability mask with the original tile's dimensions without added overlap borders. 131 | """ 132 | 133 | o = self.overlap 134 | _, x, y = probs.shape 135 | 136 | return probs[:, o : x - o, o : y - o] 137 | -------------------------------------------------------------------------------- /robosat/tools/predict.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import os 3 | import sys 4 | 5 | import numpy as np 6 | 7 | import torch 8 | import torch.nn as nn 9 | import torch.backends.cudnn 10 | from torch.utils.data import DataLoader 11 | from torchvision.transforms import Compose, Normalize 12 | 13 | from tqdm import tqdm 14 | from PIL import Image 15 | 16 | from robosat.datasets import BufferedSlippyMapDirectory 17 | from robosat.unet import UNet 18 | from robosat.config import load_config 19 | from robosat.colors import continuous_palette_for_color 20 | from robosat.transforms import ConvertImageMode, ImageToTensor 21 | 22 | 23 | def add_parser(subparser): 24 | parser = subparser.add_parser( 25 | "predict", 26 | help="predicts probability masks for slippy map tiles", 27 | formatter_class=argparse.ArgumentDefaultsHelpFormatter, 28 | ) 29 | 30 | parser.add_argument("--batch_size", type=int, default=1, help="images per batch") 31 | parser.add_argument("--checkpoint", type=str, required=True, help="model checkpoint to load") 32 | parser.add_argument("--overlap", type=int, default=32, help="tile pixel overlap to predict on") 33 | parser.add_argument("--tile_size", type=int, required=True, help="tile size for slippy map tiles") 34 | parser.add_argument("--workers", type=int, default=0, help="number of workers pre-processing images") 35 | parser.add_argument("tiles", type=str, help="directory to read slippy map image tiles from") 36 | parser.add_argument("probs", type=str, help="directory to save slippy map probability masks to") 37 | parser.add_argument("--model", type=str, required=True, help="path to model configuration file") 38 | parser.add_argument("--dataset", type=str, required=True, help="path to dataset configuration file") 39 | 40 | parser.set_defaults(func=main) 41 | 42 | 43 | def main(args): 44 | model = load_config(args.model) 45 | dataset = load_config(args.dataset) 46 | 47 | cuda = model["common"]["cuda"] 48 | 49 | device = torch.device("cuda" if cuda else "cpu") 50 | 51 | def map_location(storage, _): 52 | return storage.cuda() if cuda else storage.cpu() 53 | 54 | if cuda and not torch.cuda.is_available(): 55 | sys.exit("Error: CUDA requested but not available") 56 | 57 | num_classes = len(dataset["common"]["classes"]) 58 | 59 | # https://github.com/pytorch/pytorch/issues/7178 60 | chkpt = torch.load(args.checkpoint, map_location=map_location) 61 | 62 | net = UNet(num_classes).to(device) 63 | net = nn.DataParallel(net) 64 | 65 | if cuda: 66 | torch.backends.cudnn.benchmark = True 67 | 68 | net.load_state_dict(chkpt["state_dict"]) 69 | net.eval() 70 | 71 | mean, std = [0.485, 0.456, 0.406], [0.229, 0.224, 0.225] 72 | 73 | transform = Compose([ConvertImageMode(mode="RGB"), ImageToTensor(), Normalize(mean=mean, std=std)]) 74 | 75 | directory = BufferedSlippyMapDirectory(args.tiles, transform=transform, size=args.tile_size, overlap=args.overlap) 76 | assert len(directory) > 0, "at least one tile in dataset" 77 | 78 | loader = DataLoader(directory, batch_size=args.batch_size, num_workers=args.workers) 79 | 80 | # don't track tensors with autograd during prediction 81 | with torch.no_grad(): 82 | for images, tiles in tqdm(loader, desc="Eval", unit="batch", ascii=True): 83 | images = images.to(device) 84 | outputs = net(images) 85 | 86 | # manually compute segmentation mask class probabilities per pixel 87 | probs = nn.functional.softmax(outputs, dim=1).data.cpu().numpy() 88 | 89 | for tile, prob in zip(tiles, probs): 90 | x, y, z = list(map(int, tile)) 91 | 92 | # we predicted on buffered tiles; now get back probs for original image 93 | prob = directory.unbuffer(prob) 94 | 95 | # Quantize the floating point probabilities in [0,1] to [0,255] and store 96 | # a single-channel `.png` file with a continuous color palette attached. 97 | 98 | assert prob.shape[0] == 2, "single channel requires binary model" 99 | assert np.allclose(np.sum(prob, axis=0), 1.), "single channel requires probabilities to sum up to one" 100 | foreground = prob[1:, :, :] 101 | 102 | anchors = np.linspace(0, 1, 256) 103 | quantized = np.digitize(foreground, anchors).astype(np.uint8) 104 | 105 | palette = continuous_palette_for_color("pink", 256) 106 | 107 | out = Image.fromarray(quantized.squeeze(), mode="P") 108 | out.putpalette(palette) 109 | 110 | os.makedirs(os.path.join(args.probs, str(z), str(x)), exist_ok=True) 111 | path = os.path.join(args.probs, str(z), str(x), str(y) + ".png") 112 | 113 | out.save(path, optimize=True) 114 | -------------------------------------------------------------------------------- /robosat/unet.py: -------------------------------------------------------------------------------- 1 | """U-Net inspired encoder-decoder architecture with a ResNet encoder as proposed by Alexander Buslaev. 2 | 3 | See: 4 | - https://arxiv.org/abs/1505.04597 - U-Net: Convolutional Networks for Biomedical Image Segmentation 5 | - https://arxiv.org/abs/1411.4038 - Fully Convolutional Networks for Semantic Segmentation 6 | - https://arxiv.org/abs/1512.03385 - Deep Residual Learning for Image Recognition 7 | - https://arxiv.org/abs/1801.05746 - TernausNet: U-Net with VGG11 Encoder Pre-Trained on ImageNet for Image Segmentation 8 | - https://arxiv.org/abs/1806.00844 - TernausNetV2: Fully Convolutional Network for Instance Segmentation 9 | 10 | """ 11 | 12 | import torch 13 | import torch.nn as nn 14 | 15 | from torchvision.models import resnet50 16 | 17 | 18 | class ConvRelu(nn.Module): 19 | """3x3 convolution followed by ReLU activation building block. 20 | """ 21 | 22 | def __init__(self, num_in, num_out): 23 | """Creates a `ConvReLU` building block. 24 | 25 | Args: 26 | num_in: number of input feature maps 27 | num_out: number of output feature maps 28 | """ 29 | 30 | super().__init__() 31 | 32 | self.block = nn.Conv2d(num_in, num_out, kernel_size=3, padding=1, bias=False) 33 | 34 | def forward(self, x): 35 | """The networks forward pass for which autograd synthesizes the backwards pass. 36 | 37 | Args: 38 | x: the input tensor 39 | 40 | Returns: 41 | The networks output tensor. 42 | """ 43 | 44 | return nn.functional.relu(self.block(x), inplace=True) 45 | 46 | 47 | class DecoderBlock(nn.Module): 48 | """Decoder building block upsampling resolution by a factor of two. 49 | """ 50 | 51 | def __init__(self, num_in, num_out): 52 | """Creates a `DecoderBlock` building block. 53 | 54 | Args: 55 | num_in: number of input feature maps 56 | num_out: number of output feature maps 57 | """ 58 | 59 | super().__init__() 60 | 61 | self.block = ConvRelu(num_in, num_out) 62 | 63 | def forward(self, x): 64 | """The networks forward pass for which autograd synthesizes the backwards pass. 65 | 66 | Args: 67 | x: the input tensor 68 | 69 | Returns: 70 | The networks output tensor. 71 | """ 72 | 73 | return self.block(nn.functional.interpolate(x, scale_factor=2, mode="nearest")) 74 | 75 | 76 | class UNet(nn.Module): 77 | """The "U-Net" architecture for semantic segmentation, adapted by changing the encoder to a ResNet feature extractor. 78 | 79 | Also known as AlbuNet due to its inventor Alexander Buslaev. 80 | """ 81 | 82 | def __init__(self, num_classes, num_filters=32, pretrained=True): 83 | """Creates an `UNet` instance for semantic segmentation. 84 | 85 | Args: 86 | num_classes: number of classes to predict. 87 | pretrained: use ImageNet pre-trained backbone feature extractor 88 | """ 89 | 90 | super().__init__() 91 | 92 | # Todo: make input channels configurable, not hard-coded to three channels for RGB 93 | 94 | self.resnet = resnet50(pretrained=pretrained) 95 | 96 | # Access resnet directly in forward pass; do not store refs here due to 97 | # https://github.com/pytorch/pytorch/issues/8392 98 | 99 | self.center = DecoderBlock(2048, num_filters * 8) 100 | 101 | self.dec0 = DecoderBlock(2048 + num_filters * 8, num_filters * 8) 102 | self.dec1 = DecoderBlock(1024 + num_filters * 8, num_filters * 8) 103 | self.dec2 = DecoderBlock(512 + num_filters * 8, num_filters * 2) 104 | self.dec3 = DecoderBlock(256 + num_filters * 2, num_filters * 2 * 2) 105 | self.dec4 = DecoderBlock(num_filters * 2 * 2, num_filters) 106 | self.dec5 = ConvRelu(num_filters, num_filters) 107 | 108 | self.final = nn.Conv2d(num_filters, num_classes, kernel_size=1) 109 | 110 | def forward(self, x): 111 | """The networks forward pass for which autograd synthesizes the backwards pass. 112 | 113 | Args: 114 | x: the input tensor 115 | 116 | Returns: 117 | The networks output tensor. 118 | """ 119 | size = x.size() 120 | assert size[-1] % 32 == 0 and size[-2] % 32 == 0, "image resolution has to be divisible by 32 for resnet" 121 | 122 | enc0 = self.resnet.conv1(x) 123 | enc0 = self.resnet.bn1(enc0) 124 | enc0 = self.resnet.relu(enc0) 125 | enc0 = self.resnet.maxpool(enc0) 126 | 127 | enc1 = self.resnet.layer1(enc0) 128 | enc2 = self.resnet.layer2(enc1) 129 | enc3 = self.resnet.layer3(enc2) 130 | enc4 = self.resnet.layer4(enc3) 131 | 132 | center = self.center(nn.functional.max_pool2d(enc4, kernel_size=2, stride=2)) 133 | 134 | dec0 = self.dec0(torch.cat([enc4, center], dim=1)) 135 | dec1 = self.dec1(torch.cat([enc3, dec0], dim=1)) 136 | dec2 = self.dec2(torch.cat([enc2, dec1], dim=1)) 137 | dec3 = self.dec3(torch.cat([enc1, dec2], dim=1)) 138 | dec4 = self.dec4(dec3) 139 | dec5 = self.dec5(dec4) 140 | 141 | return self.final(dec5) 142 | -------------------------------------------------------------------------------- /robosat/tools/rasterize.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import collections 3 | import json 4 | import os 5 | import sys 6 | 7 | import numpy as np 8 | from PIL import Image 9 | from tqdm import tqdm 10 | 11 | import mercantile 12 | from rasterio.crs import CRS 13 | from rasterio.transform import from_bounds 14 | from rasterio.features import rasterize 15 | from rasterio.warp import transform 16 | from supermercado import burntiles 17 | 18 | from robosat.config import load_config 19 | from robosat.colors import make_palette 20 | from robosat.tiles import tiles_from_csv 21 | 22 | 23 | def add_parser(subparser): 24 | parser = subparser.add_parser( 25 | "rasterize", help="rasterize features to label masks", formatter_class=argparse.ArgumentDefaultsHelpFormatter 26 | ) 27 | 28 | parser.add_argument("features", type=str, help="path to GeoJSON features file") 29 | parser.add_argument("tiles", type=str, help="path to .csv tiles file") 30 | parser.add_argument("out", type=str, help="directory to write converted images") 31 | parser.add_argument("--dataset", type=str, required=True, help="path to dataset configuration file") 32 | parser.add_argument("--zoom", type=int, required=True, help="zoom level of tiles") 33 | parser.add_argument("--size", type=int, default=512, help="size of rasterized image tiles in pixels") 34 | 35 | parser.set_defaults(func=main) 36 | 37 | 38 | def feature_to_mercator(feature): 39 | """Normalize feature and converts coords to 3857. 40 | 41 | Args: 42 | feature: geojson feature to convert to mercator geometry. 43 | """ 44 | # Ref: https://gist.github.com/dnomadb/5cbc116aacc352c7126e779c29ab7abe 45 | 46 | src_crs = CRS.from_epsg(4326) 47 | dst_crs = CRS.from_epsg(3857) 48 | 49 | geometry = feature["geometry"] 50 | if geometry["type"] == "Polygon": 51 | xys = (zip(*part) for part in geometry["coordinates"]) 52 | xys = (list(zip(*transform(src_crs, dst_crs, *xy))) for xy in xys) 53 | 54 | yield {"coordinates": list(xys), "type": "Polygon"} 55 | 56 | elif geometry["type"] == "MultiPolygon": 57 | for component in geometry["coordinates"]: 58 | xys = (zip(*part) for part in component) 59 | xys = (list(zip(*transform(src_crs, dst_crs, *xy))) for xy in xys) 60 | 61 | yield {"coordinates": list(xys), "type": "Polygon"} 62 | 63 | 64 | def burn(tile, features, size): 65 | """Burn tile with features. 66 | 67 | Args: 68 | tile: the mercantile tile to burn. 69 | features: the geojson features to burn. 70 | size: the size of burned image. 71 | 72 | Returns: 73 | image: rasterized file of size with features burned. 74 | """ 75 | 76 | # the value you want in the output raster where a shape exists 77 | burnval = 1 78 | shapes = ((geometry, burnval) for feature in features for geometry in feature_to_mercator(feature)) 79 | 80 | bounds = mercantile.xy_bounds(tile) 81 | transform = from_bounds(*bounds, size, size) 82 | 83 | return rasterize(shapes, out_shape=(size, size), transform=transform) 84 | 85 | 86 | def main(args): 87 | dataset = load_config(args.dataset) 88 | 89 | classes = dataset["common"]["classes"] 90 | colors = dataset["common"]["colors"] 91 | assert len(classes) == len(colors), "classes and colors coincide" 92 | 93 | assert len(colors) == 2, "only binary models supported right now" 94 | bg = colors[0] 95 | fg = colors[1] 96 | 97 | os.makedirs(args.out, exist_ok=True) 98 | 99 | # We can only rasterize all tiles at a single zoom. 100 | assert all(tile.z == args.zoom for tile in tiles_from_csv(args.tiles)) 101 | 102 | with open(args.features) as f: 103 | fc = json.load(f) 104 | 105 | # Find all tiles the features cover and make a map object for quick lookup. 106 | feature_map = collections.defaultdict(list) 107 | for i, feature in enumerate(tqdm(fc["features"], ascii=True, unit="feature")): 108 | 109 | if feature["geometry"]["type"] != "Polygon": 110 | continue 111 | 112 | try: 113 | for tile in burntiles.burn([feature], zoom=args.zoom): 114 | feature_map[mercantile.Tile(*tile)].append(feature) 115 | except ValueError as e: 116 | print("Warning: invalid feature {}, skipping".format(i), file=sys.stderr) 117 | continue 118 | 119 | # Burn features to tiles and write to a slippy map directory. 120 | for tile in tqdm(list(tiles_from_csv(args.tiles)), ascii=True, unit="tile"): 121 | if tile in feature_map: 122 | out = burn(tile, feature_map[tile], args.size) 123 | else: 124 | out = np.zeros(shape=(args.size, args.size), dtype=np.uint8) 125 | 126 | out_dir = os.path.join(args.out, str(tile.z), str(tile.x)) 127 | os.makedirs(out_dir, exist_ok=True) 128 | 129 | out_path = os.path.join(out_dir, "{}.png".format(tile.y)) 130 | 131 | if os.path.exists(out_path): 132 | prev = np.array(Image.open(out_path)) 133 | out = np.maximum(out, prev) 134 | 135 | out = Image.fromarray(out, mode="P") 136 | 137 | palette = make_palette(bg, fg) 138 | out.putpalette(palette) 139 | 140 | out.save(out_path, optimize=True) 141 | -------------------------------------------------------------------------------- /robosat/osm/road.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | import math 4 | import osmium 5 | import geojson 6 | import shapely.geometry 7 | 8 | from robosat.osm.core import FeatureStorage 9 | 10 | 11 | class RoadHandler(osmium.SimpleHandler): 12 | """Extracts road polygon features (visible in satellite imagery) from the map. 13 | """ 14 | 15 | highway_attributes = { 16 | "motorway": { 17 | "lanes": 4, 18 | "lane_width": 3.75, 19 | "left_hard_shoulder_width": 0.75, 20 | "right_hard_shoulder_width": 3.0, 21 | }, 22 | "trunk": {"lanes": 3, "lane_width": 3.75, "left_hard_shoulder_width": 0.75, "right_hard_shoulder_width": 3.0}, 23 | "primary": { 24 | "lanes": 2, 25 | "lane_width": 3.75, 26 | "left_hard_shoulder_width": 0.50, 27 | "right_hard_shoulder_width": 1.50, 28 | }, 29 | "secondary": { 30 | "lanes": 1, 31 | "lane_width": 3.50, 32 | "left_hard_shoulder_width": 0.00, 33 | "right_hard_shoulder_width": 0.75, 34 | }, 35 | "tertiary": { 36 | "lanes": 1, 37 | "lane_width": 3.50, 38 | "left_hard_shoulder_width": 0.00, 39 | "right_hard_shoulder_width": 0.75, 40 | }, 41 | "unclassified": { 42 | "lanes": 1, 43 | "lane_width": 3.50, 44 | "left_hard_shoulder_width": 0.00, 45 | "right_hard_shoulder_width": 0.00, 46 | }, 47 | "residential": { 48 | "lanes": 1, 49 | "lane_width": 3.50, 50 | "left_hard_shoulder_width": 0.00, 51 | "right_hard_shoulder_width": 0.75, 52 | }, 53 | "service": { 54 | "lanes": 1, 55 | "lane_width": 3.00, 56 | "left_hard_shoulder_width": 0.00, 57 | "right_hard_shoulder_width": 0.00, 58 | }, 59 | "motorway_link": { 60 | "lanes": 2, 61 | "lane_width": 3.75, 62 | "left_hard_shoulder_width": 0.75, 63 | "right_hard_shoulder_width": 3.00, 64 | }, 65 | "trunk_link": { 66 | "lanes": 2, 67 | "lane_width": 3.75, 68 | "left_hard_shoulder_width": 0.50, 69 | "right_hard_shoulder_width": 1.50, 70 | }, 71 | "primary_link": { 72 | "lanes": 1, 73 | "lane_width": 3.50, 74 | "left_hard_shoulder_width": 0.00, 75 | "right_hard_shoulder_width": 0.75, 76 | }, 77 | "secondary_link": { 78 | "lanes": 1, 79 | "lane_width": 3.50, 80 | "left_hard_shoulder_width": 0.00, 81 | "right_hard_shoulder_width": 0.75, 82 | }, 83 | "tertiary_link": { 84 | "lanes": 1, 85 | "lane_width": 3.50, 86 | "left_hard_shoulder_width": 0.00, 87 | "right_hard_shoulder_width": 0.00, 88 | }, 89 | } 90 | 91 | road_filter = set(highway_attributes.keys()) 92 | 93 | EARTH_MEAN_RADIUS = 6371004.0 94 | 95 | def __init__(self, out, batch): 96 | super().__init__() 97 | self.storage = FeatureStorage(out, batch) 98 | 99 | def way(self, w): 100 | if "highway" not in w.tags: 101 | return 102 | 103 | if w.tags["highway"] not in self.road_filter: 104 | return 105 | 106 | left_hard_shoulder_width = self.highway_attributes[w.tags["highway"]]["left_hard_shoulder_width"] 107 | lane_width = self.highway_attributes[w.tags["highway"]]["lane_width"] 108 | lanes = self.highway_attributes[w.tags["highway"]]["lanes"] 109 | right_hard_shoulder_width = self.highway_attributes[w.tags["highway"]]["right_hard_shoulder_width"] 110 | 111 | if "oneway" not in w.tags: 112 | lanes = lanes * 2 113 | elif w.tags["oneway"] == "no": 114 | lanes = lanes * 2 115 | 116 | if "lanes" in w.tags: 117 | try: 118 | # Roads have at least one lane; guard against data issues. 119 | lanes = max(int(w.tags["lanes"]), 1) 120 | 121 | # Todo: take into account related lane tags 122 | # https://wiki.openstreetmap.org/wiki/Tag:busway%3Dlane 123 | # https://wiki.openstreetmap.org/wiki/Tag:cycleway%3Dlane 124 | # https://wiki.openstreetmap.org/wiki/Key:parking:lane 125 | except ValueError: 126 | print("Warning: invalid feature: https://www.openstreetmap.org/way/{}".format(w.id), file=sys.stderr) 127 | 128 | road_width = left_hard_shoulder_width + lane_width * lanes + right_hard_shoulder_width 129 | 130 | if "width" in w.tags: 131 | try: 132 | # At least one meter wide, for road classes specified above 133 | road_width = max(float(w.tags["width"]), 1.0) 134 | 135 | # Todo: handle optional units such as "2 m" 136 | # https://wiki.openstreetmap.org/wiki/Key:width 137 | except ValueError: 138 | print("Warning: invalid feature: https://www.openstreetmap.org/way/{}".format(w.id), file=sys.stderr) 139 | 140 | geometry = geojson.LineString([(n.lon, n.lat) for n in w.nodes]) 141 | shape = shapely.geometry.shape(geometry) 142 | geometry_buffer = shape.buffer(math.degrees(road_width / 2.0 / self.EARTH_MEAN_RADIUS)) 143 | 144 | if shape.is_valid: 145 | feature = geojson.Feature(geometry=shapely.geometry.mapping(geometry_buffer)) 146 | self.storage.add(feature) 147 | else: 148 | print("Warning: invalid feature: https://www.openstreetmap.org/way/{}".format(w.id), file=sys.stderr) 149 | 150 | def flush(self): 151 | self.storage.flush() 152 | -------------------------------------------------------------------------------- /robosat/tools/serve.py: -------------------------------------------------------------------------------- 1 | import os 2 | import io 3 | import sys 4 | import argparse 5 | 6 | import numpy as np 7 | 8 | import torch 9 | import torch.nn as nn 10 | import torch.backends.cudnn 11 | from torchvision.transforms import Compose, Normalize 12 | 13 | import mercantile 14 | import requests 15 | from PIL import Image 16 | from flask import Flask, send_file, render_template, abort 17 | 18 | from robosat.tiles import fetch_image 19 | from robosat.unet import UNet 20 | from robosat.config import load_config 21 | from robosat.colors import make_palette 22 | from robosat.transforms import ConvertImageMode, ImageToTensor 23 | 24 | """ 25 | Simple tile server running a segmentation model on the fly. 26 | 27 | Endpoints: 28 | /zoom/x/y.png Segmentation mask PNG image for the corresponding tile 29 | 30 | Note: proof of concept for quick visualization only; limitations: 31 | Needs to be single threaded, request runs prediction on the GPU (singleton); should be batch prediction 32 | Does not take surrounding tiles into account for prediction; border predictions do not have to match 33 | Downloads satellite images for each request; should request internal data or at least do some caching 34 | """ 35 | 36 | app = Flask(__name__) 37 | 38 | session = None 39 | predictor = None 40 | tiles = None 41 | token = None 42 | size = None 43 | 44 | 45 | @app.route("/") 46 | def index(): 47 | return render_template("map.html", token=token, size=size) 48 | 49 | 50 | @app.route("///.png") 51 | def tile(z, x, y): 52 | 53 | # Todo: predictor should take care of zoom levels 54 | if z != 18: 55 | abort(404) 56 | 57 | tile = mercantile.Tile(x, y, z) 58 | 59 | url = tiles.format(x=tile.x, y=tile.y, z=tile.z) 60 | res = fetch_image(session, url) 61 | 62 | if not res: 63 | abort(500) 64 | 65 | image = Image.open(res) 66 | 67 | mask = predictor.segment(image) 68 | 69 | return send_png(mask) 70 | 71 | 72 | @app.after_request 73 | def after_request(response): 74 | header = response.headers 75 | header["Access-Control-Allow-Origin"] = "*" 76 | return response 77 | 78 | 79 | def add_parser(subparser): 80 | parser = subparser.add_parser( 81 | "serve", 82 | help="serves predicted masks with on-demand tileserver", 83 | formatter_class=argparse.ArgumentDefaultsHelpFormatter, 84 | ) 85 | 86 | parser.add_argument("--model", type=str, required=True, help="path to model configuration file") 87 | parser.add_argument("--dataset", type=str, required=True, help="path to dataset configuration file") 88 | 89 | parser.add_argument("--url", type=str, help="endpoint with {z}/{x}/{y} variables to fetch image tiles from") 90 | parser.add_argument("--checkpoint", type=str, required=True, help="model checkpoint to load") 91 | parser.add_argument("--tile_size", type=int, default=512, help="tile size for slippy map tiles") 92 | parser.add_argument("--host", type=str, default="127.0.0.1", help="host to serve on") 93 | parser.add_argument("--port", type=int, default=5000, help="port to serve on") 94 | 95 | parser.set_defaults(func=main) 96 | 97 | 98 | def main(args): 99 | model = load_config(args.model) 100 | dataset = load_config(args.dataset) 101 | 102 | cuda = model["common"]["cuda"] 103 | 104 | if cuda and not torch.cuda.is_available(): 105 | sys.exit("Error: CUDA requested but not available") 106 | 107 | global size 108 | size = args.tile_size 109 | 110 | global token 111 | token = os.getenv("MAPBOX_ACCESS_TOKEN") 112 | 113 | if not token: 114 | sys.exit("Error: map token needed visualizing results; export MAPBOX_ACCESS_TOKEN") 115 | 116 | global session 117 | session = requests.Session() 118 | 119 | global tiles 120 | tiles = args.url 121 | 122 | global predictor 123 | predictor = Predictor(args.checkpoint, model, dataset) 124 | 125 | app.run(host=args.host, port=args.port, threaded=False) 126 | 127 | 128 | def send_png(image): 129 | output = io.BytesIO() 130 | image.save(output, format="png", optimize=True) 131 | output.seek(0) 132 | return send_file(output, mimetype="image/png") 133 | 134 | 135 | class Predictor: 136 | def __init__(self, checkpoint, model, dataset): 137 | cuda = model["common"]["cuda"] 138 | 139 | assert torch.cuda.is_available() or not cuda, "cuda is available when requested" 140 | 141 | self.cuda = cuda 142 | self.device = torch.device("cuda" if cuda else "cpu") 143 | 144 | self.checkpoint = checkpoint 145 | self.model = model 146 | self.dataset = dataset 147 | 148 | self.net = self.net_from_chkpt_() 149 | 150 | def segment(self, image): 151 | # don't track tensors with autograd during prediction 152 | with torch.no_grad(): 153 | mean, std = [0.485, 0.456, 0.406], [0.229, 0.224, 0.225] 154 | 155 | transform = Compose([ConvertImageMode(mode="RGB"), ImageToTensor(), Normalize(mean=mean, std=std)]) 156 | image = transform(image) 157 | 158 | batch = image.unsqueeze(0).to(self.device) 159 | 160 | output = self.net(batch) 161 | 162 | output = output.cpu().data.numpy() 163 | output = output.squeeze(0) 164 | 165 | mask = output.argmax(axis=0).astype(np.uint8) 166 | 167 | mask = Image.fromarray(mask, mode="P") 168 | 169 | palette = make_palette(*self.dataset["common"]["colors"]) 170 | mask.putpalette(palette) 171 | 172 | return mask 173 | 174 | def net_from_chkpt_(self): 175 | def map_location(storage, _): 176 | return storage.cuda() if self.cuda else storage.cpu() 177 | 178 | # https://github.com/pytorch/pytorch/issues/7178 179 | chkpt = torch.load(self.checkpoint, map_location=map_location) 180 | 181 | num_classes = len(self.dataset["common"]["classes"]) 182 | 183 | net = UNet(num_classes).to(self.device) 184 | net = nn.DataParallel(net) 185 | 186 | if self.cuda: 187 | torch.backends.cudnn.benchmark = True 188 | 189 | net.load_state_dict(chkpt["state_dict"]) 190 | net.eval() 191 | 192 | return net 193 | -------------------------------------------------------------------------------- /tests/fixtures/parking/features.geojson: -------------------------------------------------------------------------------- 1 | { 2 | "type": "FeatureCollection", 3 | "features": [ 4 | { 5 | "type": "Feature", 6 | "geometry": { 7 | "type": "Polygon", 8 | "coordinates": [ 9 | [ 10 | [ 11 | -82.8224934, 12 | 34.6787452 13 | ], 14 | [ 15 | -82.8216356, 16 | 34.6787385 17 | ], 18 | [ 19 | -82.8215841, 20 | 34.6778632 21 | ], 22 | [ 23 | -82.8218244, 24 | 34.6775386 25 | ], 26 | [ 27 | -82.8220047, 28 | 34.6773692 29 | ], 30 | [ 31 | -82.8234209, 32 | 34.6773974 33 | ], 34 | [ 35 | -82.8234818, 36 | 34.6774475 37 | ], 38 | [ 39 | -82.8235839, 40 | 34.6775315 41 | ], 42 | [ 43 | -82.8236513, 44 | 34.6781899 45 | ], 46 | [ 47 | -82.8230346, 48 | 34.6784279 49 | ], 50 | [ 51 | -82.8226999, 52 | 34.6785903 53 | ], 54 | [ 55 | -82.8224934, 56 | 34.6787452 57 | ] 58 | ] 59 | ] 60 | }, 61 | "properties": {} 62 | }, 63 | { 64 | "type": "Feature", 65 | "geometry": { 66 | "type": "Polygon", 67 | "coordinates": [ 68 | [ 69 | [ 70 | -106.5503557, 71 | 35.1168049 72 | ], 73 | [ 74 | -106.5503088, 75 | 35.1167621 76 | ], 77 | [ 78 | -106.5501478, 79 | 35.1167522 80 | ], 81 | [ 82 | -106.5500325, 83 | 35.1167511 84 | ], 85 | [ 86 | -106.5500271, 87 | 35.1168959 88 | ], 89 | [ 90 | -106.5500285, 91 | 35.1170813 92 | ], 93 | [ 94 | -106.5500244, 95 | 35.1171098 96 | ], 97 | [ 98 | -106.5499386, 99 | 35.117112 100 | ], 101 | [ 102 | -106.5499476, 103 | 35.117322 104 | ], 105 | [ 106 | -106.5500982, 107 | 35.1173248 108 | ], 109 | [ 110 | -106.5502135, 111 | 35.1174938 112 | ], 113 | [ 114 | -106.5502377, 115 | 35.1175256 116 | ], 117 | [ 118 | -106.5502699, 119 | 35.117541 120 | ], 121 | [ 122 | -106.5504858, 123 | 35.1175453 124 | ], 125 | [ 126 | -106.5506865, 127 | 35.117536 128 | ], 129 | [ 130 | -106.5506741, 131 | 35.1172861 132 | ], 133 | [ 134 | -106.5506729, 135 | 35.1171422 136 | ], 137 | [ 138 | -106.550573, 139 | 35.1171366 140 | ], 141 | [ 142 | -106.5505423, 143 | 35.1170818 144 | ], 145 | [ 146 | -106.5505412, 147 | 35.1170446 148 | ], 149 | [ 150 | -106.5502641, 151 | 35.1170428 152 | ], 153 | [ 154 | -106.55023, 155 | 35.1169657 156 | ], 157 | [ 158 | -106.5502289, 159 | 35.1168654 160 | ], 161 | [ 162 | -106.5503061, 163 | 35.1168412 164 | ], 165 | [ 166 | -106.5503557, 167 | 35.1168049 168 | ] 169 | ] 170 | ] 171 | }, 172 | "properties": {} 173 | } 174 | ] 175 | } 176 | -------------------------------------------------------------------------------- /robosat/transforms.py: -------------------------------------------------------------------------------- 1 | """PyTorch-compatible transformations. 2 | """ 3 | 4 | import random 5 | 6 | import torch 7 | import numpy as np 8 | from PIL import Image 9 | 10 | import torchvision 11 | 12 | 13 | # Callable to convert a RGB image into a PyTorch tensor. 14 | ImageToTensor = torchvision.transforms.ToTensor 15 | 16 | 17 | class MaskToTensor: 18 | """Callable to convert a PIL image into a PyTorch tensor. 19 | """ 20 | 21 | def __call__(self, image): 22 | """Converts the image into a tensor. 23 | 24 | Args: 25 | image: the PIL image to convert into a PyTorch tensor. 26 | 27 | Returns: 28 | The converted PyTorch tensor. 29 | """ 30 | 31 | return torch.from_numpy(np.array(image, dtype=np.uint8)).long() 32 | 33 | 34 | class ConvertImageMode: 35 | """Callable to convert a PIL image into a specific image mode (e.g. RGB, P) 36 | """ 37 | 38 | def __init__(self, mode): 39 | """Creates an `ConvertImageMode` instance. 40 | 41 | Args: 42 | mode: the PIL image mode string 43 | """ 44 | 45 | self.mode = mode 46 | 47 | def __call__(self, image): 48 | """Applies to mode conversion to an image. 49 | 50 | Args: 51 | image: the PIL.Image image to transform. 52 | """ 53 | 54 | return image.convert(self.mode) 55 | 56 | 57 | class JointCompose: 58 | """Callable to transform an image and it's mask at the same time. 59 | """ 60 | 61 | def __init__(self, transforms): 62 | """Creates an `JointCompose` instance. 63 | 64 | Args: 65 | transforms: list of tuple with (image, mask) transformations. 66 | """ 67 | 68 | self.transforms = transforms 69 | 70 | def __call__(self, images, mask): 71 | """Applies multiple transformations to the images and the mask at the same time. 72 | 73 | Args: 74 | images: the PIL.Image images to transform. 75 | mask: the PIL.Image mask to transform. 76 | 77 | Returns: 78 | The transformed PIL.Image (images, mask) tuple. 79 | """ 80 | 81 | for transform in self.transforms: 82 | images, mask = transform(images, mask) 83 | 84 | return images, mask 85 | 86 | 87 | class JointTransform: 88 | """Callable to compose non-joint transformations into joint-transformations on images and mask. 89 | 90 | Note: must not be used with stateful transformations (e.g. rngs) which need to be in sync for image and mask. 91 | """ 92 | 93 | def __init__(self, image_transform, mask_transform): 94 | """Creates an `JointTransform` instance. 95 | 96 | Args: 97 | image_transform: the transformation to run on the images or `None` for no-op. 98 | mask_transform: the transformation to run on the mask or `None` for no-op. 99 | 100 | Returns: 101 | The (images, mask) tuple with the transformations applied. 102 | """ 103 | 104 | self.image_transform = image_transform 105 | self.mask_transform = mask_transform 106 | 107 | def __call__(self, images, mask): 108 | """Applies the transformations associated with images and their mask. 109 | 110 | Args: 111 | images: the PIL.Image images to transform. 112 | mask: the PIL.Image mask to transform. 113 | 114 | Returns: 115 | The PIL.Image (images, mask) tuple with images and mask transformed. 116 | """ 117 | 118 | if self.image_transform is not None: 119 | images = [self.image_transform(v) for v in images] 120 | 121 | if self.mask_transform is not None: 122 | mask = self.mask_transform(mask) 123 | 124 | return images, mask 125 | 126 | 127 | class JointRandomVerticalFlip: 128 | """Callable to randomly flip images and its mask top to bottom. 129 | """ 130 | 131 | def __init__(self, p): 132 | """Creates an `JointRandomVerticalFlip` instance. 133 | 134 | Args: 135 | p: the probability for flipping. 136 | """ 137 | 138 | self.p = p 139 | 140 | def __call__(self, images, mask): 141 | """Randomly flips images and their mask top to bottom. 142 | 143 | Args: 144 | images: the PIL.Image image to transform. 145 | mask: the PIL.Image mask to transform. 146 | 147 | Returns: 148 | The PIL.Image (images, mask) tuple with either images and mask flipped or none of them flipped. 149 | """ 150 | 151 | if random.random() < self.p: 152 | return [v.transpose(Image.FLIP_TOP_BOTTOM) for v in images], mask.transpose(Image.FLIP_TOP_BOTTOM) 153 | else: 154 | return images, mask 155 | 156 | 157 | class JointRandomHorizontalFlip: 158 | """Callable to randomly flip images and their mask left to right. 159 | """ 160 | 161 | def __init__(self, p): 162 | """Creates an `JointRandomHorizontalFlip` instance. 163 | 164 | Args: 165 | p: the probability for flipping. 166 | """ 167 | 168 | self.p = p 169 | 170 | def __call__(self, images, mask): 171 | """Randomly flips image and their mask left to right. 172 | 173 | Args: 174 | images: the PIL.Image images to transform. 175 | mask: the PIL.Image mask to transform. 176 | 177 | Returns: 178 | The PIL.Image (images, mask) tuple with either images and mask flipped or none of them flipped. 179 | """ 180 | 181 | if random.random() < self.p: 182 | return [v.transpose(Image.FLIP_LEFT_RIGHT) for v in images], mask.transpose(Image.FLIP_LEFT_RIGHT) 183 | else: 184 | return images, mask 185 | 186 | 187 | class JointRandomRotation: 188 | """Callable to randomly rotate images and their mask. 189 | """ 190 | 191 | def __init__(self, p, degree): 192 | """Creates an `JointRandomRotation` instance. 193 | 194 | Args: 195 | p: the probability for rotating. 196 | """ 197 | 198 | self.p = p 199 | 200 | methods = {90: Image.ROTATE_90, 180: Image.ROTATE_180, 270: Image.ROTATE_270} 201 | 202 | if degree not in methods.keys(): 203 | raise NotImplementedError("We only support multiple of 90 degree rotations for now") 204 | 205 | self.method = methods[degree] 206 | 207 | def __call__(self, images, mask): 208 | """Randomly rotates images and their mask. 209 | 210 | Args: 211 | images: the PIL.Image image to transform. 212 | mask: the PIL.Image mask to transform. 213 | 214 | Returns: 215 | The PIL.Image (images, mask) tuple with either images and mask rotated or none of them rotated. 216 | """ 217 | 218 | if random.random() < self.p: 219 | return [v.transpose(self.method) for v in images], mask.transpose(self.method) 220 | else: 221 | return images, mask 222 | -------------------------------------------------------------------------------- /robosat/tiles.py: -------------------------------------------------------------------------------- 1 | """Slippy Map Tiles. 2 | 3 | The Slippy Map tile spec works with a directory structure of `z/x/y.png` where 4 | - `z` is the zoom level 5 | - `x` is the left / right index 6 | - `y` is the top / bottom index 7 | 8 | See: https://wiki.openstreetmap.org/wiki/Slippy_map_tilenames 9 | """ 10 | 11 | import csv 12 | import io 13 | import os 14 | 15 | from PIL import Image 16 | import mercantile 17 | 18 | 19 | def pixel_to_location(tile, dx, dy): 20 | """Converts a pixel in a tile to a coordinate. 21 | 22 | Args: 23 | tile: the mercantile tile to calculate the location in. 24 | dx: the relative x offset in range [0, 1]. 25 | dy: the relative y offset in range [0, 1]. 26 | 27 | Returns: 28 | The coordinate for the pixel in the tile. 29 | """ 30 | 31 | assert 0 <= dx <= 1, "x offset is in [0, 1]" 32 | assert 0 <= dy <= 1, "y offset is in [0, 1]" 33 | 34 | west, south, east, north = mercantile.bounds(tile) 35 | 36 | def lerp(a, b, c): 37 | return a + c * (b - a) 38 | 39 | lon = lerp(west, east, dx) 40 | lat = lerp(south, north, dy) 41 | 42 | return lon, lat 43 | 44 | 45 | def fetch_image(session, url, timeout=10): 46 | """Fetches the image representation for a tile. 47 | 48 | Args: 49 | session: the HTTP session to fetch the image from. 50 | url: the tile imagery's url to fetch the image from. 51 | timeout: the HTTP timeout in seconds. 52 | 53 | Returns: 54 | The satellite imagery as bytes or None in case of error. 55 | """ 56 | 57 | try: 58 | resp = session.get(url, timeout=timeout) 59 | resp.raise_for_status() 60 | return io.BytesIO(resp.content) 61 | except Exception: 62 | return None 63 | 64 | 65 | def tiles_from_slippy_map(root): 66 | """Loads files from an on-disk slippy map directory structure. 67 | 68 | Args: 69 | root: the base directory with layout `z/x/y.*`. 70 | 71 | Yields: 72 | The mercantile tiles and file paths from the slippy map directory. 73 | """ 74 | 75 | # The Python string functions (.isdigit, .isdecimal, etc.) handle 76 | # unicode codepoints; we only care about digits convertible to int 77 | def isdigit(v): 78 | try: 79 | _ = int(v) # noqa: F841 80 | return True 81 | except ValueError: 82 | return False 83 | 84 | for z in os.listdir(root): 85 | if not isdigit(z): 86 | continue 87 | 88 | for x in os.listdir(os.path.join(root, z)): 89 | if not isdigit(x): 90 | continue 91 | 92 | for name in os.listdir(os.path.join(root, z, x)): 93 | y = os.path.splitext(name)[0] 94 | 95 | if not isdigit(y): 96 | continue 97 | 98 | tile = mercantile.Tile(x=int(x), y=int(y), z=int(z)) 99 | path = os.path.join(root, z, x, name) 100 | yield tile, path 101 | 102 | 103 | def tiles_from_csv(path): 104 | """Read tiles from a line-delimited csv file. 105 | 106 | Args: 107 | file: the path to read the csv file from. 108 | 109 | Yields: 110 | The mercantile tiles from the csv file. 111 | """ 112 | 113 | with open(path) as fp: 114 | reader = csv.reader(fp) 115 | 116 | for row in reader: 117 | if not row: 118 | continue 119 | 120 | yield mercantile.Tile(*map(int, row)) 121 | 122 | 123 | def stitch_image(into, into_box, image, image_box): 124 | """Stitches two images together in-place. 125 | 126 | Args: 127 | into: the image to stitch into and modify in-place. 128 | into_box: left, upper, right, lower image coordinates for where to place `image` in `into`. 129 | image: the image to stitch into `into`. 130 | image_box: left, upper, right, lower image coordinates for where to extract the sub-image from `image`. 131 | 132 | Note: 133 | Both boxes must be of same size. 134 | """ 135 | 136 | into.paste(image.crop(box=image_box), box=into_box) 137 | 138 | 139 | def adjacent_tile(tile, dx, dy, tiles): 140 | """Retrieves an adjacent tile from a tile store. 141 | 142 | Args: 143 | tile: the original tile to get an adjacent tile for. 144 | dx: the offset in tile x direction. 145 | dy: the offset in tile y direction. 146 | tiles: the tile store to get tiles from; must support `__getitem__` with tiles. 147 | 148 | Returns: 149 | The adjacent tile's image or `None` if it does not exist. 150 | """ 151 | 152 | x, y, z = map(int, [tile.x, tile.y, tile.z]) 153 | other = mercantile.Tile(x=x + dx, y=y + dy, z=z) 154 | 155 | try: 156 | path = tiles[other] 157 | return Image.open(path).convert("RGB") 158 | except KeyError: 159 | return None 160 | 161 | 162 | def buffer_tile_image(tile, tiles, overlap, tile_size, nodata=0): 163 | """Buffers a tile image adding borders on all sides based on adjacent tiles. 164 | 165 | Args: 166 | tile: the tile to buffer. 167 | tiles: available tiles; must be a mapping of tiles to their filesystem paths. 168 | overlap: the tile border to add on every side; in pixel. 169 | tile_size: the tile size. 170 | nodata: the color value to use when no adjacent tile is available. 171 | 172 | Returns: 173 | The composite image containing the original tile plus tile overlap on all sides. 174 | It's size is `tile_size` + 2 * `overlap` pixel for each side. 175 | """ 176 | 177 | tiles = dict(tiles) 178 | x, y, z = map(int, [tile.x, tile.y, tile.z]) 179 | 180 | # Todo: instead of nodata we should probably mirror the center image 181 | composite_size = tile_size + 2 * overlap 182 | composite = Image.new(mode="RGB", size=(composite_size, composite_size), color=nodata) 183 | 184 | path = tiles[tile] 185 | center = Image.open(path).convert("RGB") 186 | composite.paste(center, box=(overlap, overlap)) 187 | 188 | top_left = adjacent_tile(tile, -1, -1, tiles) 189 | top_right = adjacent_tile(tile, +1, -1, tiles) 190 | bottom_left = adjacent_tile(tile, -1, +1, tiles) 191 | bottom_right = adjacent_tile(tile, +1, +1, tiles) 192 | 193 | top = adjacent_tile(tile, 0, -1, tiles) 194 | left = adjacent_tile(tile, -1, 0, tiles) 195 | bottom = adjacent_tile(tile, 0, +1, tiles) 196 | right = adjacent_tile(tile, +1, 0, tiles) 197 | 198 | def maybe_stitch(maybe_tile, composite_box, tile_box): 199 | if maybe_tile: 200 | stitch_image(composite, composite_box, maybe_tile, tile_box) 201 | 202 | maybe_stitch(top_left, (0, 0, overlap, overlap), (tile_size - overlap, tile_size - overlap, tile_size, tile_size)) 203 | maybe_stitch( 204 | top_right, (tile_size + overlap, 0, composite_size, overlap), (0, tile_size - overlap, overlap, tile_size) 205 | ) 206 | maybe_stitch( 207 | bottom_left, 208 | (0, composite_size - overlap, overlap, composite_size), 209 | (tile_size - overlap, 0, tile_size, overlap), 210 | ) 211 | maybe_stitch( 212 | bottom_right, 213 | (composite_size - overlap, composite_size - overlap, composite_size, composite_size), 214 | (0, 0, overlap, overlap), 215 | ) 216 | maybe_stitch(top, (overlap, 0, composite_size - overlap, overlap), (0, tile_size - overlap, tile_size, tile_size)) 217 | maybe_stitch(left, (0, overlap, overlap, composite_size - overlap), (tile_size - overlap, 0, tile_size, tile_size)) 218 | maybe_stitch( 219 | bottom, 220 | (overlap, composite_size - overlap, composite_size - overlap, composite_size), 221 | (0, 0, tile_size, overlap), 222 | ) 223 | maybe_stitch( 224 | right, (composite_size - overlap, overlap, composite_size, composite_size - overlap), (0, 0, overlap, tile_size) 225 | ) 226 | 227 | return composite 228 | -------------------------------------------------------------------------------- /robosat/tools/train.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import argparse 4 | import collections 5 | from contextlib import contextmanager 6 | 7 | from PIL import Image 8 | 9 | import torch 10 | import torch.backends.cudnn 11 | from torch.nn import DataParallel 12 | from torch.optim import Adam 13 | from torch.utils.data import DataLoader 14 | from torchvision.transforms import Resize, CenterCrop, Normalize 15 | 16 | from tqdm import tqdm 17 | 18 | from robosat.transforms import ( 19 | JointCompose, 20 | JointTransform, 21 | JointRandomHorizontalFlip, 22 | JointRandomRotation, 23 | ConvertImageMode, 24 | ImageToTensor, 25 | MaskToTensor, 26 | ) 27 | from robosat.datasets import SlippyMapTilesConcatenation 28 | from robosat.metrics import Metrics 29 | from robosat.losses import CrossEntropyLoss2d, mIoULoss2d, FocalLoss2d, LovaszLoss2d 30 | from robosat.unet import UNet 31 | from robosat.utils import plot 32 | from robosat.config import load_config 33 | from robosat.log import Log 34 | 35 | 36 | @contextmanager 37 | def no_grad(): 38 | with torch.no_grad(): 39 | yield 40 | 41 | 42 | def add_parser(subparser): 43 | parser = subparser.add_parser( 44 | "train", help="trains model on dataset", formatter_class=argparse.ArgumentDefaultsHelpFormatter 45 | ) 46 | 47 | parser.add_argument("--model", type=str, required=True, help="path to model configuration file") 48 | parser.add_argument("--dataset", type=str, required=True, help="path to dataset configuration file") 49 | parser.add_argument("--checkpoint", type=str, required=False, help="path to a model checkpoint (to retrain)") 50 | parser.add_argument("--resume", type=bool, default=False, help="resume training or fine-tuning (if checkpoint)") 51 | parser.add_argument("--workers", type=int, default=0, help="number of workers pre-processing images") 52 | 53 | parser.set_defaults(func=main) 54 | 55 | 56 | def main(args): 57 | model = load_config(args.model) 58 | dataset = load_config(args.dataset) 59 | 60 | device = torch.device("cuda" if model["common"]["cuda"] else "cpu") 61 | 62 | if model["common"]["cuda"] and not torch.cuda.is_available(): 63 | sys.exit("Error: CUDA requested but not available") 64 | 65 | os.makedirs(model["common"]["checkpoint"], exist_ok=True) 66 | 67 | num_classes = len(dataset["common"]["classes"]) 68 | net = UNet(num_classes) 69 | net = DataParallel(net) 70 | net = net.to(device) 71 | 72 | if model["common"]["cuda"]: 73 | torch.backends.cudnn.benchmark = True 74 | 75 | try: 76 | weight = torch.Tensor(dataset["weights"]["values"]) 77 | except KeyError: 78 | if model["opt"]["loss"] in ("CrossEntropy", "mIoU", "Focal"): 79 | sys.exit("Error: The loss function used, need dataset weights values") 80 | 81 | optimizer = Adam(net.parameters(), lr=model["opt"]["lr"]) 82 | 83 | resume = 0 84 | if args.checkpoint: 85 | 86 | def map_location(storage, _): 87 | return storage.cuda() if model["common"]["cuda"] else storage.cpu() 88 | 89 | # https://github.com/pytorch/pytorch/issues/7178 90 | chkpt = torch.load(args.checkpoint, map_location=map_location) 91 | net.load_state_dict(chkpt["state_dict"]) 92 | 93 | if args.resume: 94 | optimizer.load_state_dict(chkpt["optimizer"]) 95 | resume = chkpt["epoch"] 96 | 97 | if model["opt"]["loss"] == "CrossEntropy": 98 | criterion = CrossEntropyLoss2d(weight=weight).to(device) 99 | elif model["opt"]["loss"] == "mIoU": 100 | criterion = mIoULoss2d(weight=weight).to(device) 101 | elif model["opt"]["loss"] == "Focal": 102 | criterion = FocalLoss2d(weight=weight).to(device) 103 | elif model["opt"]["loss"] == "Lovasz": 104 | criterion = LovaszLoss2d().to(device) 105 | else: 106 | sys.exit("Error: Unknown [opt][loss] value !") 107 | 108 | train_loader, val_loader = get_dataset_loaders(model, dataset, args.workers) 109 | 110 | num_epochs = model["opt"]["epochs"] 111 | if resume >= num_epochs: 112 | sys.exit("Error: Epoch {} set in {} already reached by the checkpoint provided".format(num_epochs, args.model)) 113 | 114 | history = collections.defaultdict(list) 115 | log = Log(os.path.join(model["common"]["checkpoint"], "log")) 116 | 117 | log.log("--- Hyper Parameters on Dataset: {} ---".format(dataset["common"]["dataset"])) 118 | log.log("Batch Size:\t {}".format(model["common"]["batch_size"])) 119 | log.log("Image Size:\t {}".format(model["common"]["image_size"])) 120 | log.log("Learning Rate:\t {}".format(model["opt"]["lr"])) 121 | log.log("Loss function:\t {}".format(model["opt"]["loss"])) 122 | if "weight" in locals(): 123 | log.log("Weights :\t {}".format(dataset["weights"]["values"])) 124 | log.log("---") 125 | 126 | for epoch in range(resume, num_epochs): 127 | log.log("Epoch: {}/{}".format(epoch + 1, num_epochs)) 128 | 129 | train_hist = train(train_loader, num_classes, device, net, optimizer, criterion) 130 | log.log( 131 | "Train loss: {:.4f}, mIoU: {:.3f}, {} IoU: {:.3f}, MCC: {:.3f}".format( 132 | train_hist["loss"], 133 | train_hist["miou"], 134 | dataset["common"]["classes"][1], 135 | train_hist["fg_iou"], 136 | train_hist["mcc"], 137 | ) 138 | ) 139 | 140 | for k, v in train_hist.items(): 141 | history["train " + k].append(v) 142 | 143 | val_hist = validate(val_loader, num_classes, device, net, criterion) 144 | log.log( 145 | "Validate loss: {:.4f}, mIoU: {:.3f}, {} IoU: {:.3f}, MCC: {:.3f}".format( 146 | val_hist["loss"], val_hist["miou"], dataset["common"]["classes"][1], val_hist["fg_iou"], val_hist["mcc"] 147 | ) 148 | ) 149 | 150 | for k, v in val_hist.items(): 151 | history["val " + k].append(v) 152 | 153 | visual = "history-{:05d}-of-{:05d}.png".format(epoch + 1, num_epochs) 154 | plot(os.path.join(model["common"]["checkpoint"], visual), history) 155 | 156 | checkpoint = "checkpoint-{:05d}-of-{:05d}.pth".format(epoch + 1, num_epochs) 157 | 158 | states = {"epoch": epoch + 1, "state_dict": net.state_dict(), "optimizer": optimizer.state_dict()} 159 | 160 | torch.save(states, os.path.join(model["common"]["checkpoint"], checkpoint)) 161 | 162 | 163 | def train(loader, num_classes, device, net, optimizer, criterion): 164 | num_samples = 0 165 | running_loss = 0 166 | 167 | metrics = Metrics(range(num_classes)) 168 | 169 | net.train() 170 | 171 | for images, masks, tiles in tqdm(loader, desc="Train", unit="batch", ascii=True): 172 | images = images.to(device) 173 | masks = masks.to(device) 174 | 175 | assert images.size()[2:] == masks.size()[1:], "resolutions for images and masks are in sync" 176 | 177 | num_samples += int(images.size(0)) 178 | 179 | optimizer.zero_grad() 180 | outputs = net(images) 181 | 182 | assert outputs.size()[2:] == masks.size()[1:], "resolutions for predictions and masks are in sync" 183 | assert outputs.size()[1] == num_classes, "classes for predictions and dataset are in sync" 184 | 185 | loss = criterion(outputs, masks) 186 | loss.backward() 187 | 188 | optimizer.step() 189 | 190 | running_loss += loss.item() 191 | 192 | for mask, output in zip(masks, outputs): 193 | prediction = output.detach() 194 | metrics.add(mask, prediction) 195 | 196 | return { 197 | "loss": running_loss / num_samples, 198 | "miou": metrics.get_miou(), 199 | "fg_iou": metrics.get_fg_iou(), 200 | "mcc": metrics.get_mcc(), 201 | } 202 | 203 | 204 | @no_grad() 205 | def validate(loader, num_classes, device, net, criterion): 206 | num_samples = 0 207 | running_loss = 0 208 | 209 | metrics = Metrics(range(num_classes)) 210 | 211 | net.eval() 212 | 213 | for images, masks, tiles in tqdm(loader, desc="Validate", unit="batch", ascii=True): 214 | images = images.to(device) 215 | masks = masks.to(device) 216 | 217 | assert images.size()[2:] == masks.size()[1:], "resolutions for images and masks are in sync" 218 | 219 | num_samples += int(images.size(0)) 220 | 221 | outputs = net(images) 222 | 223 | assert outputs.size()[2:] == masks.size()[1:], "resolutions for predictions and masks are in sync" 224 | assert outputs.size()[1] == num_classes, "classes for predictions and dataset are in sync" 225 | 226 | loss = criterion(outputs, masks) 227 | 228 | running_loss += loss.item() 229 | 230 | for mask, output in zip(masks, outputs): 231 | metrics.add(mask, output) 232 | 233 | return { 234 | "loss": running_loss / num_samples, 235 | "miou": metrics.get_miou(), 236 | "fg_iou": metrics.get_fg_iou(), 237 | "mcc": metrics.get_mcc(), 238 | } 239 | 240 | 241 | def get_dataset_loaders(model, dataset, workers): 242 | target_size = (model["common"]["image_size"],) * 2 243 | batch_size = model["common"]["batch_size"] 244 | path = dataset["common"]["dataset"] 245 | 246 | mean, std = [0.485, 0.456, 0.406], [0.229, 0.224, 0.225] 247 | 248 | transform = JointCompose( 249 | [ 250 | JointTransform(ConvertImageMode("RGB"), ConvertImageMode("P")), 251 | JointTransform(Resize(target_size, Image.BILINEAR), Resize(target_size, Image.NEAREST)), 252 | JointTransform(CenterCrop(target_size), CenterCrop(target_size)), 253 | JointRandomHorizontalFlip(0.5), 254 | JointRandomRotation(0.5, 90), 255 | JointRandomRotation(0.5, 90), 256 | JointRandomRotation(0.5, 90), 257 | JointTransform(ImageToTensor(), MaskToTensor()), 258 | JointTransform(Normalize(mean=mean, std=std), None), 259 | ] 260 | ) 261 | 262 | train_dataset = SlippyMapTilesConcatenation( 263 | [os.path.join(path, "training", "images")], os.path.join(path, "training", "labels"), transform 264 | ) 265 | 266 | val_dataset = SlippyMapTilesConcatenation( 267 | [os.path.join(path, "validation", "images")], os.path.join(path, "validation", "labels"), transform 268 | ) 269 | 270 | assert len(train_dataset) > 0, "at least one tile in training dataset" 271 | assert len(val_dataset) > 0, "at least one tile in validation dataset" 272 | 273 | train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, drop_last=True, num_workers=workers) 274 | val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False, drop_last=True, num_workers=workers) 275 | 276 | return train_loader, val_loader 277 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | **Note: Robosat is neither maintained not actively developed any longer by Mapbox. See [this issue](https://github.com/mapbox/robosat/issues/184). 2 | The main developers ([@daniel-j-h](https://github.com/daniel-j-h), [@bkowshik](https://github.com/bkowshik)) are no longer with Mapbox.** 3 | 4 |

RoboSat

5 | 6 |

7 | Generic ecosystem for feature extraction from aerial and satellite imagery 8 | 9 | RoboSat pipeline extracting buildings from aerial imagery 10 | Berlin aerial imagery, segmentation mask, building outlines, simplified GeoJSON polygons 11 |

12 | 13 |

14 | 15 | 16 | ## Table of Contents 17 | 18 | 1. [Overview](#overview) 19 | 2. [Installation](#installation) 20 | 3. [Usage](#usage) 21 | - [extract](#rs-extract) 22 | - [cover](#rs-cover) 23 | - [download](#rs-download) 24 | - [rasterize](#rs-rasterize) 25 | - [train](#rs-train) 26 | - [export](#rs-export) 27 | - [predict](#rs-predict) 28 | - [mask](#rs-mask) 29 | - [features](#rs-features) 30 | - [merge](#rs-merge) 31 | - [dedupe](#rs-dedupe) 32 | - [serve](#rs-serve) 33 | - [weights](#rs-weights) 34 | - [compare](#rs-compare) 35 | - [subset](#rs-subset) 36 | 4. [Extending](#extending) 37 | - [Bring your own imagery](#bring-your-own-imagery) 38 | - [Bring your own masks](#bring-your-own-masks) 39 | - [Add support for feature in pre-processing](#add-support-for-feature-in-pre-processing) 40 | - [Add support for feature in post-processing](#add-support-for-feature-in-post-processing) 41 | 5. [Contributing](#contributing) 42 | 6. [License](#license) 43 | 44 | 45 | ## Overview 46 | 47 | RoboSat is an end-to-end pipeline written in Python 3 for feature extraction from aerial and satellite imagery. 48 | Features can be anything visually distinguishable in the imagery for example: buildings, parking lots, roads, or cars. 49 | 50 | Have a look at 51 | - [this OpenStreetMap diary post](https://www.openstreetmap.org/user/daniel-j-h/diary/44145) where we first introduced RoboSat and show some results 52 | - [this OpenStreetMap diary post](https://www.openstreetmap.org/user/daniel-j-h/diary/44321) where we extract building footprints based on drone imagery in Tanzania 53 | - [this OpenStreetMap diary post](https://www.openstreetmap.org/user/daniel-j-h/diary/45042) where we summarize the v1.1 release 54 | - [this OpenStreetMap diary post](https://www.openstreetmap.org/user/daniel-j-h/diary/368726) where we summarize the v1.2 release 55 | - [this OpenStreetMap diary post](https://www.openstreetmap.org/user/daniel-j-h/diary/368771) where we run robosat v1.2 on aerial imagery for Bavaria, Germany 56 | - [this OpenStreetMap diary post](https://www.openstreetmap.org/user/maning/diary/44462) where [Maning](https://github.com/maning) runs robosat on imagery from the Philippines 57 | 58 | The tools RoboSat comes with can be categorized as follows: 59 | - data preparation: creating a dataset for training feature extraction models 60 | - training and modeling: segmentation models for feature extraction in images 61 | - post-processing: turning segmentation results into cleaned and simple geometries 62 | 63 | Tools work with the [Slippy Map](https://wiki.openstreetmap.org/wiki/Slippy_map_tilenames) tile format to abstract away geo-referenced imagery behind tiles of the same size. 64 | 65 | ![](./assets/pipeline-01.png) 66 | 67 | The data preparation tools help you with getting started creating a dataset for training feature extraction models. 68 | Such a dataset consists of aerial or satellite imagery and corresponding masks for the features you want to extract. 69 | We provide convenient tools to automatically create these datasets downloading aerial imagery from the [Mapbox](mapbox.com) Maps API and generating masks from [OpenStreetMap](openstreetmap.org) geometries but we are not bound to these sources. 70 | 71 | ![](./assets/pipeline-02.png) 72 | 73 | The modelling tools help you with training fully convolutional neural nets for segmentation. 74 | We recommend using (potentially multiple) GPUs for these tools: we are running RoboSat on AWS p2/p3 instances and GTX 1080 TI GPUs. 75 | After you trained a model you can save its checkpoint and run prediction either on GPUs or CPUs. 76 | 77 | ![](./assets/pipeline-03.png) 78 | 79 | The post-processing tools help you with cleaning up the segmentation model's results. 80 | They are responsible for denoising, simplifying geometries, transforming from pixels in Slippy Map tiles to world coordinates (GeoJSON features), and properly handling tile boundaries. 81 | 82 | If this sounds almost like what you need, see the [extending section](#extending) for more details about extending RoboSat. 83 | If you want to contribute, see the [contributing section](#contributing) for more details about getting involved with RoboSat. 84 | 85 | 86 | ## Installation 87 | 88 | We provide pre-built Docker images for both CPU as well as GPU environments on Docker Hub in [mapbox/robosat](https://hub.docker.com/r/mapbox/robosat/tags/). 89 | 90 | Using a CPU container to show all available sub-commands 91 | 92 | docker run -it --rm -v $PWD:/data --ipc=host --network=host mapbox/robosat:latest-cpu --help 93 | 94 | Using a GPU container (requires [nvidia-docker](https://github.com/NVIDIA/nvidia-docker) on the host) to train a model 95 | 96 | docker run --runtime=nvidia -it --rm -v $PWD:/data --ipc=host mapbox/robosat:latest-gpu train --model /data/model.toml --dataset /data/dataset.toml --workers 4 97 | 98 | Arguments 99 | - `--runtime=nvidia` enables the nvidia-docker runtime for access to host GPUs 100 | - `--ipc=host` is required for shared memory communication between workers 101 | - `--network=host` is required for network communication in the download tool 102 | - `-v $PWD:/data` makes the current directory on the host accessible at `/data` in the container 103 | 104 | For installation from source (requires installing dependencies) see the Dockerfiles in the [`docker/`](docker) directory. 105 | 106 | 107 | ## Usage 108 | 109 | The following describes the tools making up the RoboSat pipeline. 110 | All tools can be invoked via 111 | 112 | ./rs 113 | 114 | Also see the sub-command help available via 115 | 116 | ./rs --help 117 | ./rs --help 118 | 119 | Most tools take a dataset or model configuration file. See examples in the [`configs`](./config) directory. 120 | You will need to adapt these configuration files to your own dataset, for example setting your tile resolution (e.g. 256x256 pixel). 121 | You will also need to adapt these configuration files to your specific deployment setup, for example using CUDA and setting batch sizes. 122 | 123 | 124 | ### rs extract 125 | 126 | Extracts GeoJSON features from OpenStreetMap to build a training set from. 127 | 128 | The result of `rs extract` is a GeoJSON file with the extracted feature geometries. 129 | 130 | The `rs extract` tool walks OpenStreetMap `.osm.pbf` base map files (e.g. from [Geofabrik](http://download.geofabrik.de)) and gathers feature geometries. 131 | These features are for example polygons for parking lots, buildings, or roads. 132 | 133 | 134 | ### rs cover 135 | 136 | Generates a list of tiles covering GeoJSON features to build a training set from. 137 | 138 | The result of `rs cover` is a file with tiles in `(x, y, z)` [Slippy Map](https://wiki.openstreetmap.org/wiki/Slippy_map_tilenames) tile format covering GeoJSON features. 139 | 140 | The `rs cover` tool reads in the GeoJSON features generated by `rs extract` and generates a list of tiles covering the feature geometries. 141 | 142 | 143 | ### rs download 144 | 145 | Downloads aerial or satellite imagery from a Slippy Map endpoint (e.g. the Mapbox Maps API) based on a list of tiles. 146 | 147 | The result of `rs download` is a Slippy Map directory with aerial or satellite images - the training set's images you will need for the model to learn on. 148 | 149 | The `rs download` tool downloads images for a list of tiles in `(x, y, z)` [Slippy Map](https://wiki.openstreetmap.org/wiki/Slippy_map_tilenames) tile format generated by `rs cover`. 150 | 151 | The `rs download` tool expects a Slippy Map endpoint where placeholders for `{x}`, `{y}`, and `{z}` are formatted with each tile's ids. 152 | For example, for the Mapbox Maps API: `https://api.mapbox.com/v4/mapbox.satellite/{z}/{x}/{y}@2x.webp?access_token=TOKEN`. 153 | 154 | 155 | ### rs rasterize 156 | 157 | Rasterizes GeoJSON features into mask images based on a list of tiles. 158 | 159 | The result of `rs rasterize` is a Slippy Map directory with masks - the training set's masks you will need for the model to learn on. 160 | 161 | The `rs rasterize` tool reads in GeoJSON features and rasterizes them into single-channel masks with a color palette attached for quick visual inspection. 162 | 163 | 164 | ### rs train 165 | 166 | Trains a model on a training set made up of `(image, mask)` pairs. 167 | 168 | The result of `rs train` is a checkpoint containing weights for the trained model. 169 | 170 | The `rs train` tool trains a fully convolutional neural net for semantic segmentation on a dataset with `(image, mask)` pairs generated by `rs download` and `rs rasterize`. 171 | We recommend using a GPU for training: we are working with the AWS p2 instances and GTX 1080 TI GPUs. 172 | 173 | Before you can start training you need the following. 174 | 175 | - You need a dataset which you should split into three parts: training and validation for `rs train` to train on and to calculate validation metrics on and a hold-out dataset for final model evaluation. The dataset's directory need to look like the following. 176 | 177 | dataset 178 | ├── training 179 | │   ├── images 180 | │   └── labels 181 | └── validation 182 | ├── images 183 | └── labels 184 | 185 | - You need to calculate label class weights with `rs weights` on the training set's labels 186 | 187 | - You need to add the path to the dataset's directory and the calculated class weights and statistics to the dataset config. 188 | 189 | 190 | ### rs export 191 | 192 | Exports a trained model in [ONNX](https://onnx.ai/) format for prediction across different backends (like Caffe2, TensorFlow). 193 | 194 | The result of `rs export` is an ONNX GraphProto `.pb` file which can be used with the ONNX ecosystem. 195 | 196 | Note: the `rs predict` tool works with `.pth` checkpoints. In contrast to these `.pth` checkpoints the ONNX models neither depend on PyTorch or the Python code for the model class and can be used e.g. in resource constrained environments like AWS Lambda. 197 | 198 | 199 | ### rs predict 200 | 201 | Predicts class probabilities for each image tile in a Slippy Map directory structure. 202 | 203 | The result of `rs predict` is a Slippy Map directory with a class probability encoded in a `.png` file per tile. 204 | 205 | The `rs predict` tool loads the checkpoint weights generated by `rs train` and predicts semantic segmentation class probabilities for a Slippy Map dataset consisting of image tiles. 206 | 207 | 208 | ### rs masks 209 | 210 | Generates segmentation masks for each class probability `.png` file in a Slippy Map directory structure. 211 | 212 | The result of `rs masks` is a Slippy Map directory with one single-channel image per tile with a color palette attached for quick visual inspection. 213 | 214 | The `rs masks` tool loads in the `.png` tile segmentation class probabilities generated by `rs predict` and turns them into segmentation masks. 215 | You can merge multiple Slippy Map directories with class probabilities into a single mask using this tool in case you want to make use of an ensemble of models. 216 | 217 | 218 | ### rs features 219 | 220 | Extracts simplified GeoJSON features for segmentation masks in a Slippy Map directory structure. 221 | 222 | The result of `rs features` is a GeoJSON file with the extracted simplified features. 223 | 224 | The `rs features` tool loads the segmentation masks generated by `rs masks` and turns them into simplified GeoJSON features. 225 | 226 | 227 | ### rs merge 228 | 229 | Merges close adjacent GeoJSON features into single features. 230 | 231 | The result of `rs merge` is a GeoJSON file with the merged features. 232 | 233 | The `rs merge` tool loads GeoJSON features and depending on a threshold merges adjacent geometries together. 234 | 235 | 236 | ### rs dedupe 237 | 238 | Deduplicates predicted features against existing OpenStreetMap features. 239 | 240 | The result of `rs dedupe` is a GeoJSON file with predicted features which are not in OpenStreetMap. 241 | 242 | The `rs dedupe` deduplicates predicted features against OpenStreetMap. 243 | 244 | Note: `rs extract` to generate a GeoJSON file with OpenStreetMap features. 245 | 246 | 247 | ### rs serve 248 | 249 | Serves tile masks by providing an on-demand segmentation tileserver. 250 | 251 | The `rs serve` tool implements a Slippy Map raster tileserver requesting satellite tiles and applying the segmentation model on the fly. 252 | 253 | Notes: useful for visually inspecting the raw segmentation masks on the fly; for serious use-cases use `rs predict` and similar. 254 | 255 | 256 | ### rs weights 257 | 258 | Calculates class weights for a Slippy Map directory with masks. 259 | 260 | The result of `rs weights` is a list of class weights useful for `rs train` to adjust the loss based on the class distribution in the masks. 261 | 262 | The `rs weights` tool computes the pixel-wise class distribution on the training dataset's masks and outputs weights for training. 263 | 264 | 265 | ### rs compare 266 | 267 | Prepares images, labels and predicted masks, side-by-side for visual comparison. 268 | 269 | The result of `rs compare` is a Slippy Map directory with images that have the raw image on the left, the label in the middle and the prediction on the right. 270 | 271 | 272 | ### rs subset 273 | 274 | Filters a Slippy Map directory based on a list of tile ids. 275 | 276 | The result of `rs subset` is a Slippy Map directory filtered by tile ids. 277 | 278 | The main use-case for this tool is hard-negative mining where we want to filter false positives from a prediction run. 279 | 280 | 281 | ## Extending 282 | 283 | There are multiple ways to extend RoboSat for your specific use-cases. 284 | By default we use [Mapbox](mapbox.com) aerial imagery from the Maps API and feature masks generated from [OpenStreetMap](openstreetmap.org) geometries. 285 | If you want to bring your own imagery, masks, or features to extract, the following will get you started. 286 | 287 | ### Bring your own imagery 288 | 289 | RoboSat's main abstraction is the [Slippy Map](https://wiki.openstreetmap.org/wiki/Slippy_map_tilenames) tile format. 290 | As long as your imagery is geo-referenced and you can convert it to a Slippy Map directory structure to point the command lines to, you are good to go. 291 | Make sure imagery and masks are properly aligned. 292 | 293 | ### Bring your own masks 294 | 295 | RoboSat's main abstraction is the [Slippy Map](https://wiki.openstreetmap.org/wiki/Slippy_map_tilenames) tile format. 296 | As long as you can convert your masks to a Slippy Map directory structure to point the command lines to, you are good to go. 297 | Masks have to be single-channel `.png` files with class indices starting from zero. 298 | Make sure imagery and masks are properly aligned. 299 | 300 | ### Add support for feature in pre-processing 301 | 302 | Pre-processing (`rs extract`) is responsible for turning OpenStreetMap geometries and tags into polygon feature masks. 303 | If you want to add a new feature based on geometries in OpenStreetMap you have to: 304 | - Implement an [osmium](https://docs.osmcode.org/pyosmium/latest/) handler which turns OpenStreetMap geometries into polygons; see [`robosat/osm/`](./robosat/osm/) for existing handlers. 305 | - Import and register your handler in [`robosat/tools/extract.py`](./robosat/tools/extract.py). 306 | 307 | And that's it! From there on the pipeline is fully generic. 308 | 309 | ### Add support for feature in post-processing 310 | 311 | Post-processing (`rs features`) is responsible for turning segmentation masks into simplified GeoJSON features. 312 | If you want to add custom post-processing for segmentation masks you have to: 313 | - Implement a featurize handler turning masks into GeoJSON features; see [`robosat/features/`](./robosat/features/) for existing handlers. 314 | - Import and register your handler in [`robosat/tools/features.py`](./robosat/tools/features.py). 315 | 316 | And that's it! From there on the pipeline is fully generic. 317 | 318 | 319 | ## Contributing 320 | 321 | We are thankful for contributions and are happy to help; that said there are some constraints to take into account: 322 | - For non-trivial changes you should open a ticket first to outline and discuss ideas and implementation sketches. If you just send us a pull request with thousands of lines of changes we most likely won't accept your changeset. 323 | - We follow the 80/20 rule where 80% of the effects come from 20% of the causes: we strive for simplicity and maintainability over pixel-perfect results. If you can improve the model's accuracy by two percent points but have to add thousands of lines of code we most likely won't accept your changeset. 324 | - We take responsibility for changesets going into master: as soon as your changeset gets approved it is on us to maintain and debug it. If your changeset can not be tested, or maintained in the future by the core developers we most likely won't accept your changeset. 325 | 326 | 327 | ## License 328 | 329 | Copyright (c) 2018 Mapbox 330 | 331 | Distributed under the MIT License (MIT). 332 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | # 2 | # This file is autogenerated by pip-compile 3 | # To update, run: 4 | # 5 | # pip-compile --generate-hashes 6 | # 7 | affine==2.2.2 \ 8 | --hash=sha256:e5970e2e53edd75fee60eb2550df365a1c3a58d78755e9e5164e345ac36df322 \ 9 | --hash=sha256:ff0d0f40a90faa651f7bc7fece15bdbb7a0e0658b1e7ba6a03422c21efa7da90 \ 10 | # via rasterio, supermercado 11 | atomicwrites==1.3.0 \ 12 | --hash=sha256:03472c30eb2c5d1ba9227e4c2ca66ab8287fbfbbda3888aa93dc2e28fc6811b4 \ 13 | --hash=sha256:75a9445bac02d8d058d5e1fe689654ba5a6556a1dfd8ce6ec55a0ed79866cfa6 \ 14 | # via pytest 15 | attrs==19.1.0 \ 16 | --hash=sha256:69c0dbf2ed392de1cb5ec704444b08a5ef81680a61cb899dc08127123af36a79 \ 17 | --hash=sha256:f0b870f674851ecbfbbbd364d6b5cbdff9dcedbc7f3f5e18a6891057f21fe399 \ 18 | # via pytest, rasterio 19 | certifi==2019.3.9 \ 20 | --hash=sha256:59b7658e26ca9c7339e00f8f4636cdfe59d34fa37b9b04f6f9e9926b3cece1a5 \ 21 | --hash=sha256:b26104d6835d1f5e49452a26eb2ff87fe7090b89dfcaee5ea2212697e1e1d7ae \ 22 | # via requests 23 | chardet==3.0.4 \ 24 | --hash=sha256:84ab92ed1c4d4f16916e05906b6b75a6c0fb5db821cc65e70cbd64a3e2a5eaae \ 25 | --hash=sha256:fc323ffcaeaed0e0a02bf4d117757b98aed530d9ed4531e3e15460124c106691 \ 26 | # via requests 27 | click-plugins==1.1.1 \ 28 | --hash=sha256:46ab999744a9d831159c3411bb0c79346d94a444df9a3a3742e9ed63645f264b \ 29 | --hash=sha256:5d262006d3222f5057fd81e1623d4443e41dcda5dc815c06b442aa3c02889fc8 \ 30 | # via rasterio, supermercado 31 | click==7.0 \ 32 | --hash=sha256:2335065e6395b9e67ca716de5f7526736bfa6ceead690adf616d925bdc622b13 \ 33 | --hash=sha256:5b94b49521f6456670fdb30cd82a4eca9412788a93fa6dd6df72c94d5a8ff2d7 \ 34 | # via click-plugins, cligj, flask, mercantile, rasterio, supermercado 35 | cligj==0.5.0 \ 36 | --hash=sha256:20f24ce9abfde3f758aec3399e6811b936b6772f360846c662c19bf5537b4f14 \ 37 | --hash=sha256:60c93dda4499562eb87509a8ff3535a7441053b766c9c26bcf874a732f939c7c \ 38 | --hash=sha256:6c7d52d529a78712491974f975c33473f430c0f7beb18c0d7a402a743dcb460a \ 39 | # via rasterio, supermercado 40 | cycler==0.10.0 \ 41 | --hash=sha256:1d8a5ae1ff6c5cf9b93e8811e581232ad8920aeec647c37316ceac982b08cb2d \ 42 | --hash=sha256:cd7b2d1018258d7247a71425e9f26463dfb444d411c39569972f4ce586b0c9d8 \ 43 | # via matplotlib 44 | flask==1.0.3 \ 45 | --hash=sha256:ad7c6d841e64296b962296c2c2dabc6543752985727af86a975072dea984b6f3 \ 46 | --hash=sha256:e7d32475d1de5facaa55e3958bc4ec66d3762076b074296aa50ef8fdc5b9df61 47 | geojson==2.4.1 \ 48 | --hash=sha256:b175e00a76d923d6e7409de0784c147adcdd6e04b311b1d405895a4db3612c9d \ 49 | --hash=sha256:b2bfb5c8e6b4b0c55dd139996317145aa8526146b3f8570586f9613c527a648a 50 | idna==2.8 \ 51 | --hash=sha256:c357b3f628cf53ae2c4c05627ecc484553142ca23264e593d327bcde5e9c3407 \ 52 | --hash=sha256:ea8b7f6188e6fa117537c3df7da9fc686d485087abf6ac197f9c46432f7e4a3c \ 53 | # via requests 54 | importlib-metadata==0.17 \ 55 | --hash=sha256:a9f185022cfa69e9ca5f7eabfd5a58b689894cb78a11e3c8c89398a8ccbb8e7f \ 56 | --hash=sha256:df1403cd3aebeb2b1dcd3515ca062eecb5bd3ea7611f18cba81130c68707e879 \ 57 | # via pluggy 58 | itsdangerous==1.1.0 \ 59 | --hash=sha256:321b033d07f2a4136d3ec762eac9f16a10ccd60f53c0c91af90217ace7ba1f19 \ 60 | --hash=sha256:b12271b2047cb23eeb98c8b5622e2e5c5e9abd9784a153e9d8ef9cb4dd09d749 \ 61 | # via flask 62 | jinja2==2.10.1 \ 63 | --hash=sha256:065c4f02ebe7f7cf559e49ee5a95fb800a9e4528727aec6f24402a5374c65013 \ 64 | --hash=sha256:14dd6caf1527abb21f08f86c784eac40853ba93edb79552aa1e4b8aef1b61c7b \ 65 | # via flask 66 | kiwisolver==1.1.0 \ 67 | --hash=sha256:05b5b061e09f60f56244adc885c4a7867da25ca387376b02c1efc29cc16bcd0f \ 68 | --hash=sha256:26f4fbd6f5e1dabff70a9ba0d2c4bd30761086454aa30dddc5b52764ee4852b7 \ 69 | --hash=sha256:3b2378ad387f49cbb328205bda569b9f87288d6bc1bf4cd683c34523a2341efe \ 70 | --hash=sha256:400599c0fe58d21522cae0e8b22318e09d9729451b17ee61ba8e1e7c0346565c \ 71 | --hash=sha256:47b8cb81a7d18dbaf4fed6a61c3cecdb5adec7b4ac292bddb0d016d57e8507d5 \ 72 | --hash=sha256:53eaed412477c836e1b9522c19858a8557d6e595077830146182225613b11a75 \ 73 | --hash=sha256:58e626e1f7dfbb620d08d457325a4cdac65d1809680009f46bf41eaf74ad0187 \ 74 | --hash=sha256:5a52e1b006bfa5be04fe4debbcdd2688432a9af4b207a3f429c74ad625022641 \ 75 | --hash=sha256:5c7ca4e449ac9f99b3b9d4693debb1d6d237d1542dd6a56b3305fe8a9620f883 \ 76 | --hash=sha256:682e54f0ce8f45981878756d7203fd01e188cc6c8b2c5e2cf03675390b4534d5 \ 77 | --hash=sha256:79bfb2f0bd7cbf9ea256612c9523367e5ec51d7cd616ae20ca2c90f575d839a2 \ 78 | --hash=sha256:7f4dd50874177d2bb060d74769210f3bce1af87a8c7cf5b37d032ebf94f0aca3 \ 79 | --hash=sha256:8944a16020c07b682df861207b7e0efcd2f46c7488619cb55f65882279119389 \ 80 | --hash=sha256:8aa7009437640beb2768bfd06da049bad0df85f47ff18426261acecd1cf00897 \ 81 | --hash=sha256:939f36f21a8c571686eb491acfffa9c7f1ac345087281b412d63ea39ca14ec4a \ 82 | --hash=sha256:9733b7f64bd9f807832d673355f79703f81f0b3e52bfce420fc00d8cb28c6a6c \ 83 | --hash=sha256:a02f6c3e229d0b7220bd74600e9351e18bc0c361b05f29adae0d10599ae0e326 \ 84 | --hash=sha256:a0c0a9f06872330d0dd31b45607197caab3c22777600e88031bfe66799e70bb0 \ 85 | --hash=sha256:acc4df99308111585121db217681f1ce0eecb48d3a828a2f9bbf9773f4937e9e \ 86 | --hash=sha256:b64916959e4ae0ac78af7c3e8cef4becee0c0e9694ad477b4c6b3a536de6a544 \ 87 | --hash=sha256:d3fcf0819dc3fea58be1fd1ca390851bdb719a549850e708ed858503ff25d995 \ 88 | --hash=sha256:d52e3b1868a4e8fd18b5cb15055c76820df514e26aa84cc02f593d99fef6707f \ 89 | --hash=sha256:db1a5d3cc4ae943d674718d6c47d2d82488ddd94b93b9e12d24aabdbfe48caee \ 90 | --hash=sha256:e3a21a720791712ed721c7b95d433e036134de6f18c77dbe96119eaf7aa08004 \ 91 | --hash=sha256:e8bf074363ce2babeb4764d94f8e65efd22e6a7c74860a4f05a6947afc020ff2 \ 92 | --hash=sha256:f16814a4a96dc04bf1da7d53ee8d5b1d6decfc1a92a63349bb15d37b6a263dd9 \ 93 | --hash=sha256:f2b22153870ca5cf2ab9c940d7bc38e8e9089fa0f7e5856ea195e1cf4ff43d5a \ 94 | --hash=sha256:f790f8b3dff3d53453de6a7b7ddd173d2e020fb160baff578d578065b108a05f \ 95 | # via matplotlib 96 | markupsafe==1.1.1 \ 97 | --hash=sha256:00bc623926325b26bb9605ae9eae8a215691f33cae5df11ca5424f06f2d1f473 \ 98 | --hash=sha256:09027a7803a62ca78792ad89403b1b7a73a01c8cb65909cd876f7fcebd79b161 \ 99 | --hash=sha256:09c4b7f37d6c648cb13f9230d847adf22f8171b1ccc4d5682398e77f40309235 \ 100 | --hash=sha256:1027c282dad077d0bae18be6794e6b6b8c91d58ed8a8d89a89d59693b9131db5 \ 101 | --hash=sha256:24982cc2533820871eba85ba648cd53d8623687ff11cbb805be4ff7b4c971aff \ 102 | --hash=sha256:29872e92839765e546828bb7754a68c418d927cd064fd4708fab9fe9c8bb116b \ 103 | --hash=sha256:43a55c2930bbc139570ac2452adf3d70cdbb3cfe5912c71cdce1c2c6bbd9c5d1 \ 104 | --hash=sha256:46c99d2de99945ec5cb54f23c8cd5689f6d7177305ebff350a58ce5f8de1669e \ 105 | --hash=sha256:500d4957e52ddc3351cabf489e79c91c17f6e0899158447047588650b5e69183 \ 106 | --hash=sha256:535f6fc4d397c1563d08b88e485c3496cf5784e927af890fb3c3aac7f933ec66 \ 107 | --hash=sha256:62fe6c95e3ec8a7fad637b7f3d372c15ec1caa01ab47926cfdf7a75b40e0eac1 \ 108 | --hash=sha256:6dd73240d2af64df90aa7c4e7481e23825ea70af4b4922f8ede5b9e35f78a3b1 \ 109 | --hash=sha256:717ba8fe3ae9cc0006d7c451f0bb265ee07739daf76355d06366154ee68d221e \ 110 | --hash=sha256:79855e1c5b8da654cf486b830bd42c06e8780cea587384cf6545b7d9ac013a0b \ 111 | --hash=sha256:7c1699dfe0cf8ff607dbdcc1e9b9af1755371f92a68f706051cc8c37d447c905 \ 112 | --hash=sha256:88e5fcfb52ee7b911e8bb6d6aa2fd21fbecc674eadd44118a9cc3863f938e735 \ 113 | --hash=sha256:8defac2f2ccd6805ebf65f5eeb132adcf2ab57aa11fdf4c0dd5169a004710e7d \ 114 | --hash=sha256:98c7086708b163d425c67c7a91bad6e466bb99d797aa64f965e9d25c12111a5e \ 115 | --hash=sha256:9add70b36c5666a2ed02b43b335fe19002ee5235efd4b8a89bfcf9005bebac0d \ 116 | --hash=sha256:9bf40443012702a1d2070043cb6291650a0841ece432556f784f004937f0f32c \ 117 | --hash=sha256:ade5e387d2ad0d7ebf59146cc00c8044acbd863725f887353a10df825fc8ae21 \ 118 | --hash=sha256:b00c1de48212e4cc9603895652c5c410df699856a2853135b3967591e4beebc2 \ 119 | --hash=sha256:b1282f8c00509d99fef04d8ba936b156d419be841854fe901d8ae224c59f0be5 \ 120 | --hash=sha256:b2051432115498d3562c084a49bba65d97cf251f5a331c64a12ee7e04dacc51b \ 121 | --hash=sha256:ba59edeaa2fc6114428f1637ffff42da1e311e29382d81b339c1817d37ec93c6 \ 122 | --hash=sha256:c8716a48d94b06bb3b2524c2b77e055fb313aeb4ea620c8dd03a105574ba704f \ 123 | --hash=sha256:cd5df75523866410809ca100dc9681e301e3c27567cf498077e8551b6d20e42f \ 124 | --hash=sha256:e249096428b3ae81b08327a63a485ad0878de3fb939049038579ac0ef61e17e7 \ 125 | # via jinja2 126 | matplotlib==3.1.0 \ 127 | --hash=sha256:08d9bc2e2acef42965256acd5015dc2c899cbd53e01bf4214c5510c7ea0efd2d \ 128 | --hash=sha256:1e0213f87cc0076f7b0c4c251d7e23601e2419cd98691df79edb95517ba06f0c \ 129 | --hash=sha256:1f31053f660df5f0310118d7f5bd1e8025170e9773f0bebe8fec486d0926adf6 \ 130 | --hash=sha256:399bf6352633aeeb45ca55c6c943fa2738022fb17ae498c32a142ced0b41528d \ 131 | --hash=sha256:409a5894efb810d630d2512449c7a4394de9a4d15fc6394e26a409b17d9cc18c \ 132 | --hash=sha256:5c5ef5cf1bc8f483123102e2615644937af7d4c01d100acc72bf74a044a78717 \ 133 | --hash=sha256:d0052be5cdfa27018bb08194b8812c47cb985d60eb682e1809c76e9600839516 \ 134 | --hash=sha256:e7d6620d145ca9f6c3e88248e5734b6fda430e75e70755b887e48f8e9bc1de2a \ 135 | --hash=sha256:f3d8b6bccc577e4e5ecbd58fdd63cacb8e58f0ed1e97616a7f7a7baaf4b8d036 136 | mercantile==1.0.4 \ 137 | --hash=sha256:0278b393a05d17c3618c5f5da64488bb3707e14046fe36f594d7215db3b376c4 \ 138 | --hash=sha256:db13b7d674a38ea69673898e96f6c6b33e3fd41ccbbfe8ba54860d427fa7d492 \ 139 | --hash=sha256:e56a48313ab8b5ba26d4e601963df02ad989efba58589fce6f90ca9ffac04829 140 | more-itertools==7.0.0 \ 141 | --hash=sha256:2112d2ca570bb7c3e53ea1a35cd5df42bb0fd10c45f0fb97178679c3c03d64c7 \ 142 | --hash=sha256:c3e4748ba1aad8dba30a4886b0b1a2004f9a863837b8654e7059eebf727afa5a \ 143 | # via pytest 144 | numpy==1.16.4 \ 145 | --hash=sha256:0778076e764e146d3078b17c24c4d89e0ecd4ac5401beff8e1c87879043a0633 \ 146 | --hash=sha256:141c7102f20abe6cf0d54c4ced8d565b86df4d3077ba2343b61a6db996cefec7 \ 147 | --hash=sha256:14270a1ee8917d11e7753fb54fc7ffd1934f4d529235beec0b275e2ccf00333b \ 148 | --hash=sha256:27e11c7a8ec9d5838bc59f809bfa86efc8a4fd02e58960fa9c49d998e14332d5 \ 149 | --hash=sha256:2a04dda79606f3d2f760384c38ccd3d5b9bb79d4c8126b67aff5eb09a253763e \ 150 | --hash=sha256:3c26010c1b51e1224a3ca6b8df807de6e95128b0908c7e34f190e7775455b0ca \ 151 | --hash=sha256:52c40f1a4262c896420c6ea1c6fda62cf67070e3947e3307f5562bd783a90336 \ 152 | --hash=sha256:6e4f8d9e8aa79321657079b9ac03f3cf3fd067bf31c1cca4f56d49543f4356a5 \ 153 | --hash=sha256:7242be12a58fec245ee9734e625964b97cf7e3f2f7d016603f9e56660ce479c7 \ 154 | --hash=sha256:7dc253b542bfd4b4eb88d9dbae4ca079e7bf2e2afd819ee18891a43db66c60c7 \ 155 | --hash=sha256:94f5bd885f67bbb25c82d80184abbf7ce4f6c3c3a41fbaa4182f034bba803e69 \ 156 | --hash=sha256:a89e188daa119ffa0d03ce5123dee3f8ffd5115c896c2a9d4f0dbb3d8b95bfa3 \ 157 | --hash=sha256:ad3399da9b0ca36e2f24de72f67ab2854a62e623274607e37e0ce5f5d5fa9166 \ 158 | --hash=sha256:b0348be89275fd1d4c44ffa39530c41a21062f52299b1e3ee7d1c61f060044b8 \ 159 | --hash=sha256:b5554368e4ede1856121b0dfa35ce71768102e4aa55e526cb8de7f374ff78722 \ 160 | --hash=sha256:cbddc56b2502d3f87fda4f98d948eb5b11f36ff3902e17cb6cc44727f2200525 \ 161 | --hash=sha256:d79f18f41751725c56eceab2a886f021d70fd70a6188fd386e29a045945ffc10 \ 162 | --hash=sha256:dc2ca26a19ab32dc475dbad9dfe723d3a64c835f4c23f625c2b6566ca32b9f29 \ 163 | --hash=sha256:dd9bcd4f294eb0633bb33d1a74febdd2b9018b8b8ed325f861fffcd2c7660bb8 \ 164 | --hash=sha256:e8baab1bc7c9152715844f1faca6744f2416929de10d7639ed49555a85549f52 \ 165 | --hash=sha256:ec31fe12668af687b99acf1567399632a7c47b0e17cfb9ae47c098644ef36797 \ 166 | --hash=sha256:f12b4f7e2d8f9da3141564e6737d79016fe5336cc92de6814eba579744f65b0a \ 167 | --hash=sha256:f58ac38d5ca045a377b3b377c84df8175ab992c970a53332fa8ac2373df44ff7 168 | opencv-contrib-python-headless==4.1.0.25 \ 169 | --hash=sha256:03383f0adcf264ded23c6fe0b1498d341bdf8d119e622c4b0153483e9b490967 \ 170 | --hash=sha256:2eb815bbe22025a9652a7a30368394dca70afce5037e29f8a9efac409ba01c16 \ 171 | --hash=sha256:434a931b87d2475a8213c37252893cf92155ee81dd196f42b920087e89896e30 \ 172 | --hash=sha256:4fe7d51dab3018e305c4373ac66ca0701ace6e7b69eee7eac9aaed662e6e0300 \ 173 | --hash=sha256:533a711313056a25a9c2491e0a701ffb71fc8f3391393ef4b769b85f50132099 \ 174 | --hash=sha256:5aeb858293934e859e80d3fa161a61990a194666e3eef5962fe5c811413eaebf \ 175 | --hash=sha256:62ca879be3f4e68a16ebbc8be2300f64b025285be0b2c49ad4aff633644cc331 \ 176 | --hash=sha256:665bfdd387bb8231791199a45f2d88d81927df0d3b57c5cd09702e91a1a5cd88 \ 177 | --hash=sha256:7d23e70f59d19772857d79f454db61038b0faaa10fc19b55377172750901f19d \ 178 | --hash=sha256:802aacdb13eb2e0e670511bce3760c462970362137304546c56ca3a1105c626e \ 179 | --hash=sha256:80c3bc0224d67fd789db6f912a37ddc18fc12d28ca775b7daeb50c9840c9b205 \ 180 | --hash=sha256:83079ef6d3ab76c8d9553525ef9378cb0c07bff31ff22b86ef0899560a9e15cd \ 181 | --hash=sha256:91e7e470bceb2b82726be000e3e10aafc99192d94d175d4992ccc6ce5fcfda7c \ 182 | --hash=sha256:9a300e80b275de7a636c70155c600a6fb44839a059a356353358107453f42e8c \ 183 | --hash=sha256:a45e6b293d50045712a2c7d2214533ce2b1f857e4303440508c566a46c1261d3 \ 184 | --hash=sha256:b23386fcf7aaed228e0fab691a4d881c9ba25f91171d61d35d769992741c3844 \ 185 | --hash=sha256:bcff1fd3437ad758c30219b3006f0c01b96684ee02a7cdcc5f981ec8aae40d8f \ 186 | --hash=sha256:c15981be8ce7210cad4636bb1c55e1c0cd8017c6ce87d5c06e8fab9d4d206c15 \ 187 | --hash=sha256:c5163424c0bc0c6d9a952195f01f5f9176defc1fdecdc411ce0dbc1bdca0cd2c \ 188 | --hash=sha256:c8f0fe926b43965e4e2c4f9fbb94cb5f226ed92e9e85f00a94dfad42816cb389 \ 189 | --hash=sha256:d27fad451fb47c12f53ef63d97f2cf35f0f294420447241e0504c6b9e7bfb362 \ 190 | --hash=sha256:dd1a21552f4d4101e14064a5cd00fc34cb9e3622718102b21de7bbfa4e6bbc13 \ 191 | --hash=sha256:efa2d49fd9430534c71f31c3a6110f5b18ea2195f7f2b69e3244f4e4eb821388 \ 192 | --hash=sha256:f28d809b87a75e6f45fddefca13c5070fbd8e23e1c63e5eab161f9763f92ca70 \ 193 | --hash=sha256:f67456343a905ad524bba30352a7d8d714fa8ecffb57c5f5cb019f80fca5eaff \ 194 | --hash=sha256:f8383ef16004789635a666a68110c20d10713f21047f6993dd6af3c3fdb91b4f \ 195 | --hash=sha256:fe17070b7de59d197be1ccf4a39f21f3e30414c193088352b9bdfe44e7a6a9ce 196 | osmium==2.15.2 \ 197 | --hash=sha256:05bbd36e9a7e54b9c3fd39753d49f23978e365832a01a78510e83a3426f3d962 \ 198 | --hash=sha256:0cc6f22025b94895beba19bec4f099cc9d9ab528f2e6867aac8705e61c8bb634 \ 199 | --hash=sha256:0d0b9baa07ba4148e686f0727997eedfa5ddb8ae67a2f4d162efcfb9e0fe3f41 \ 200 | --hash=sha256:10bb7d60d95c78d1dcc85c17aa44df2215ccde0131b31b45b3fd3879cb7ee179 \ 201 | --hash=sha256:180b218a966372e0d8d3abf2f643b42c7fe14921bda0b9ea755d2c24b015c230 \ 202 | --hash=sha256:42730929cdcaf78519ff343fc1d6ec9be4c476b0374ae191a70f39c1e75909ab \ 203 | --hash=sha256:4f749ca9d86770ca65c9e1b92aac6c93e360a64c45cc3724924226490a2b7ca4 \ 204 | --hash=sha256:5211108ff3a42214a9e1e1eb67562c89c079655e837cadb936dff65daf50015d \ 205 | --hash=sha256:5423c34dd76b266dd31865a5089eb174886ccf1060e0c881b62971d4d73fe8c1 \ 206 | --hash=sha256:5925688a9f4d48129829b6ab1460f34c0f7fd8f4ee1b72f74da5e5c72ffb4c62 \ 207 | --hash=sha256:599e7ecc7be5060512da2b6803a1a96fc56986db29e83db5b3401a1cd5a6fcc2 \ 208 | --hash=sha256:84ec0affc2522451a386d0fdc4f5490eaa88531374947674e2c95507465275a3 \ 209 | --hash=sha256:8dd730a9f367bff32c813e6c3fc599e83494ddb981961c5c4f4433c6a1a85a37 \ 210 | --hash=sha256:ac4a4e0d8e77d89e93858d9cc24909238756eefff895092d97ac882c86a2777f \ 211 | --hash=sha256:b316fad933a580f5fe6d0b665fe722d9adc24035c71d21088a195f5f97ccf105 \ 212 | --hash=sha256:c0052bec1178431d62f3a5d67911471aa178ab3530c0a832cab0c500e405df16 \ 213 | --hash=sha256:c12445476c0276bc2ddb5a13c98fd6215fceca5aa0a08fcfaf1b630492e7e974 \ 214 | --hash=sha256:c781ae862a6029cf6cc0d2b8892665405a34293e32fbdb7216c3832bbf8bdef6 \ 215 | --hash=sha256:ca5ad170c0809714d5161ff07fa571a4af0599352b035999711a12e32a955b36 \ 216 | --hash=sha256:d495a5191ae1d390dfa23aa055ca74eb386d42689069b7cc95ca9297428d1749 \ 217 | --hash=sha256:f42b9aa176c1d003f04bcb5c20b8d56d678130202bf32e40ce286dccb3e640e8 \ 218 | --hash=sha256:fe481236cb0f4660d406a43b885b535e964cd9409d9567716d2001f7cd19a448 219 | pillow==6.0.0 \ 220 | --hash=sha256:15c056bfa284c30a7f265a41ac4cbbc93bdbfc0dfe0613b9cb8a8581b51a9e55 \ 221 | --hash=sha256:1a4e06ba4f74494ea0c58c24de2bb752818e9d504474ec95b0aa94f6b0a7e479 \ 222 | --hash=sha256:1c3c707c76be43c9e99cb7e3d5f1bee1c8e5be8b8a2a5eeee665efbf8ddde91a \ 223 | --hash=sha256:1fd0b290203e3b0882d9605d807b03c0f47e3440f97824586c173eca0aadd99d \ 224 | --hash=sha256:24114e4a6e1870c5a24b1da8f60d0ba77a0b4027907860188ea82bd3508c80eb \ 225 | --hash=sha256:258d886a49b6b058cd7abb0ab4b2b85ce78669a857398e83e8b8e28b317b5abb \ 226 | --hash=sha256:33c79b6dd6bc7f65079ab9ca5bebffb5f5d1141c689c9c6a7855776d1b09b7e8 \ 227 | --hash=sha256:367385fc797b2c31564c427430c7a8630db1a00bd040555dfc1d5c52e39fcd72 \ 228 | --hash=sha256:3c1884ff078fb8bf5f63d7d86921838b82ed4a7d0c027add773c2f38b3168754 \ 229 | --hash=sha256:44e5240e8f4f8861d748f2a58b3f04daadab5e22bfec896bf5434745f788f33f \ 230 | --hash=sha256:46aa988e15f3ea72dddd81afe3839437b755fffddb5e173886f11460be909dce \ 231 | --hash=sha256:74d90d499c9c736d52dd6d9b7221af5665b9c04f1767e35f5dd8694324bd4601 \ 232 | --hash=sha256:809c0a2ce9032cbcd7b5313f71af4bdc5c8c771cb86eb7559afd954cab82ebb5 \ 233 | --hash=sha256:85d1ef2cdafd5507c4221d201aaf62fc9276f8b0f71bd3933363e62a33abc734 \ 234 | --hash=sha256:8c3889c7681af77ecfa4431cd42a2885d093ecb811e81fbe5e203abc07e0995b \ 235 | --hash=sha256:9218d81b9fca98d2c47d35d688a0cea0c42fd473159dfd5612dcb0483c63e40b \ 236 | --hash=sha256:9aa4f3827992288edd37c9df345783a69ef58bd20cc02e64b36e44bcd157bbf1 \ 237 | --hash=sha256:9d80f44137a70b6f84c750d11019a3419f409c944526a95219bea0ac31f4dd91 \ 238 | --hash=sha256:b7ebd36128a2fe93991293f997e44be9286503c7530ace6a55b938b20be288d8 \ 239 | --hash=sha256:c4c78e2c71c257c136cdd43869fd3d5e34fc2162dc22e4a5406b0ebe86958239 \ 240 | --hash=sha256:c6a842537f887be1fe115d8abb5daa9bc8cc124e455ff995830cc785624a97af \ 241 | --hash=sha256:cf0a2e040fdf5a6d95f4c286c6ef1df6b36c218b528c8a9158ec2452a804b9b8 \ 242 | --hash=sha256:cfd28aad6fc61f7a5d4ee556a997dc6e5555d9381d1390c00ecaf984d57e4232 \ 243 | --hash=sha256:dca5660e25932771460d4688ccbb515677caaf8595f3f3240ec16c117deff89a \ 244 | --hash=sha256:de7aedc85918c2f887886442e50f52c1b93545606317956d65f342bd81cb4fc3 \ 245 | --hash=sha256:e6c0bbf8e277b74196e3140c35f9a1ae3eafd818f7f2d3a15819c49135d6c062 246 | pluggy==0.12.0 \ 247 | --hash=sha256:0825a152ac059776623854c1543d65a4ad408eb3d33ee114dff91e57ec6ae6fc \ 248 | --hash=sha256:b9817417e95936bf75d85d3f8767f7df6cdde751fc40aed3bb3074cbcb77757c \ 249 | # via pytest 250 | py==1.8.0 \ 251 | --hash=sha256:64f65755aee5b381cea27766a3a147c3f15b9b6b9ac88676de66ba2ae36793fa \ 252 | --hash=sha256:dc639b046a6e2cff5bbe40194ad65936d6ba360b52b3c3fe1d08a82dd50b5e53 \ 253 | # via pytest 254 | pyparsing==2.4.0 \ 255 | --hash=sha256:1873c03321fc118f4e9746baf201ff990ceb915f433f23b395f5580d1840cb2a \ 256 | --hash=sha256:9b6323ef4ab914af344ba97510e966d64ba91055d6b9afa6b30799340e89cc03 \ 257 | # via matplotlib, snuggs 258 | pyproj==2.1.3 \ 259 | --hash=sha256:0b363909ba0f873db684e75821c215146b010c38cb719346d872d473b890af78 \ 260 | --hash=sha256:0c856964194f4c5e5418e69f2b1fbcc64b4230a7fbc8ba0af06c918f043ca9b8 \ 261 | --hash=sha256:0df13fa24a458026e71599b9af75d47bb7994ff24d9f7a94f47960289a9b0d71 \ 262 | --hash=sha256:193c966982728ea25addc00a0d4a4ecc2ca722ac13f7addb19a6e8f628eda802 \ 263 | --hash=sha256:25d5e394bb91a8a9b3080effdc023ba3a646bf500a28ebaebd4d83ce9584a62a \ 264 | --hash=sha256:38f799621cc3c4c33db367e177340a5f5393ea27994966516d5a8d97b55be230 \ 265 | --hash=sha256:3d890a8e75b55934e37d024aa69c2219ae2b26f57cc5daa10a7ec66e6dab365b \ 266 | --hash=sha256:4c1852652b335ddd9c5a2b760f4ab7a8a448d6831818f3ffa5ffbe260370c8c9 \ 267 | --hash=sha256:56312e0342f48085b452a35d7f67211b62d6663905622de20321e1fb9386039d \ 268 | --hash=sha256:6c33f973b8719f211de073d9b119095ce420a2e96657aef0f2276f97055dd20f \ 269 | --hash=sha256:783720ae26fd40708193c3775231785efc9068aa67f5df1ab4e354ae03dc3794 \ 270 | --hash=sha256:8fd410470f2f3e26983731146863bd939f5b33cc4b572843af836a4a1f43eb44 \ 271 | --hash=sha256:99c52788b01a7bb9a88024bf4d40965c0a66a93d654600b5deacf644775f424d \ 272 | --hash=sha256:9beef4f85a25a86caf73cf2f03918f4c19d7002e2c0f85d12151b3382efd6705 \ 273 | --hash=sha256:a28c4842ef8e6f2bf04e870ba8634020fa6a0a234b46c585aba33de72e261d3e \ 274 | --hash=sha256:cc4ba1f2ffaff2c36fc3c3a50e49892ee5d468eeed7a572ade71367382df4de7 \ 275 | --hash=sha256:d46dc7a2b03724204b8d0daf30fca78d47e9018e10393c2d8b14c5be52ef0135 \ 276 | --hash=sha256:e96ec8763f795fbbc8ac0d5bf2fb6ea39d37f17d73d614aacbd18d2b8f5367d8 \ 277 | --hash=sha256:f1930f3eacd50acbd7a833d5ca0cdf38bb3a7c86f73a45c9b26f9ec0d1d40de0 \ 278 | --hash=sha256:f89b610a9a38d0292e29b06093ffa09365d36259f58aa085b7cf32f2a8ab2b96 \ 279 | --hash=sha256:fac2992495a2a4c29690916a129c6cc58cc93cf689db1e1cab016f2e210f3f4a 280 | pytest==4.5.0 \ 281 | --hash=sha256:1a8aa4fa958f8f451ac5441f3ac130d9fc86ea38780dd2715e6d5c5882700b24 \ 282 | --hash=sha256:b8bf138592384bd4e87338cb0f256bf5f615398a649d4bd83915f0e4047a5ca6 283 | python-dateutil==2.8.0 \ 284 | --hash=sha256:7e6584c74aeed623791615e26efd690f29817a27c73085b78e4bad02493df2fb \ 285 | --hash=sha256:c89805f6f4d64db21ed966fda138f8a5ed7a4fdbc1a8ee329ce1b74e3c74da9e \ 286 | # via matplotlib 287 | rasterio==1.0.23 \ 288 | --hash=sha256:060d1b554b96de916599bfaf73dca533aeecd7b395f6b4dbe9124e6c47eb526f \ 289 | --hash=sha256:3ce69eb247dd800952a1cd4ced60a8b07be75c3cfcb5714d0588cc15b0e3ed73 \ 290 | --hash=sha256:45e1a4b3c5c58a0b9f1a6bfd0aa08bdc15da57fa690a1f6f26d1a6a3d3cc7aa5 \ 291 | --hash=sha256:58a3df45ecff9fb4c1e2c75501bdcd1fb5ad17c1c0222ea24381a4345527b4ad \ 292 | --hash=sha256:688e4ca47e4ce756843b16ab99d32fb9ff0a39218face0297a1eb8dd4901c794 \ 293 | --hash=sha256:88ca29dc0ae35ed96186be7a291f6fe44c93ee68c510722eb5fe7f0bdeba613d \ 294 | --hash=sha256:98ee1aba2a43210d48b12e29523b9e570295cc3264c981a12e298e9da91bc3be \ 295 | --hash=sha256:9902a7966418daaccb6f7299b50e31b2188a9ba0c38759fb98393208e2318f39 \ 296 | --hash=sha256:abc051353c2b5238bef201502c67a2fb188a92c2779f67d021f570389f22c77b \ 297 | --hash=sha256:c36eaee43268fd15f4f625c688aef114b94fce69b7bd8a268f5db7d9535d2bb0 \ 298 | --hash=sha256:d1624ab6bb98df71a1399a1f3ca9d9659f19151f6eee352aae4e17d14ce5c7cd 299 | requests==2.22.0 \ 300 | --hash=sha256:11e007a8a2aa0323f5a921e9e6a2d7e4e67d9877e85773fba9ba6419025cbeb4 \ 301 | --hash=sha256:9cf5292fcd0f598c671cfc1e0d7d1a7f13bb8085e9a590f48c010551dc6c4b31 302 | rtree==0.8.3 \ 303 | --hash=sha256:6cb9cf3000963ea6a3db777a597baee2bc55c4fc891e4f1967f262cc96148649 \ 304 | --hash=sha256:8526431aa15d8cea1c07b451ce853b62b1da4cf6b74c07810aba5b81d12efe66 \ 305 | --hash=sha256:a9e67386073f93f22449f396d5993dfe479335169c376f3f7fb04a396391f0dc 306 | scipy==1.3.0 \ 307 | --hash=sha256:03b1e0775edbe6a4c64effb05fff2ce1429b76d29d754aa5ee2d848b60033351 \ 308 | --hash=sha256:09d008237baabf52a5d4f5a6fcf9b3c03408f3f61a69c404472a16861a73917e \ 309 | --hash=sha256:10325f0ffac2400b1ec09537b7e403419dcd25d9fee602a44e8a32119af9079e \ 310 | --hash=sha256:1db9f964ed9c52dc5bd6127f0dd90ac89791daa690a5665cc01eae185912e1ba \ 311 | --hash=sha256:409846be9d6bdcbd78b9e5afe2f64b2da5a923dd7c1cd0615ce589489533fdbb \ 312 | --hash=sha256:4907040f62b91c2e170359c3d36c000af783f0fa1516a83d6c1517cde0af5340 \ 313 | --hash=sha256:6c0543f2fdd38dee631fb023c0f31c284a532d205590b393d72009c14847f5b1 \ 314 | --hash=sha256:826b9f5fbb7f908a13aa1efd4b7321e36992f5868d5d8311c7b40cf9b11ca0e7 \ 315 | --hash=sha256:a7695a378c2ce402405ea37b12c7a338a8755e081869bd6b95858893ceb617ae \ 316 | --hash=sha256:a84c31e8409b420c3ca57fd30c7589378d6fdc8d155d866a7f8e6e80dec6fd06 \ 317 | --hash=sha256:adadeeae5500de0da2b9e8dd478520d0a9945b577b2198f2462555e68f58e7ef \ 318 | --hash=sha256:b283a76a83fe463c9587a2c88003f800e08c3929dfbeba833b78260f9c209785 \ 319 | --hash=sha256:c19a7389ab3cd712058a8c3c9ffd8d27a57f3d84b9c91a931f542682bb3d269d \ 320 | --hash=sha256:c3bb4bd2aca82fb498247deeac12265921fe231502a6bc6edea3ee7fe6c40a7a \ 321 | --hash=sha256:c5ea60ece0c0c1c849025bfc541b60a6751b491b6f11dd9ef37ab5b8c9041921 \ 322 | --hash=sha256:db61a640ca20f237317d27bc658c1fc54c7581ff7f6502d112922dc285bdabee 323 | shapely==1.6.4.post2 \ 324 | --hash=sha256:0378964902f89b8dbc332e5bdfa08e0bc2f7ab39fecaeb17fbb2a7699a44fe71 \ 325 | --hash=sha256:34e7c6f41fb27906ccdf2514ee44a5774b90b39a256b6511a6a57d11ffe64999 \ 326 | --hash=sha256:3ca69d4b12e2b05b549465822744b6a3a1095d8488cc27b2728a06d3c07d0eee \ 327 | --hash=sha256:3e9388f29bd81fcd4fa5c35125e1fbd4975ee36971a87a90c093f032d0e9de24 \ 328 | --hash=sha256:3ef28e3f20a1c37f5b99ea8cf8dcb58e2f1a8762d65ed2d21fd92bf1d4811182 \ 329 | --hash=sha256:523c94403047eb6cacd7fc1863ebef06e26c04d8a4e7f8f182d49cd206fe787e \ 330 | --hash=sha256:5d22a1a705c2f70f61ccadc696e33d922c1a92e00df8e1d58a6ade14dd7e3b4f \ 331 | --hash=sha256:714b6680215554731389a1bbdae4cec61741aa4726921fa2b2b96a6f578a2534 \ 332 | --hash=sha256:7dfe1528650c3f0dc82f41a74cf4f72018288db9bfb75dcd08f6f04233ec7e78 \ 333 | --hash=sha256:ba58b21b9cf3c33725f7f530febff9ed6a6846f9d0bf8a120fc74683ff919f89 \ 334 | --hash=sha256:c4b87bb61fc3de59fc1f85e71a79b0c709dc68364d9584473697aad4aa13240f \ 335 | --hash=sha256:ebb4d2bee7fac3f6c891fcdafaa17f72ab9c6480f6d00de0b2dc9a5137dfe342 336 | six==1.12.0 \ 337 | --hash=sha256:3350809f0555b11f552448330d0b52d5f24c91a322ea4a15ef22629740f3761c \ 338 | --hash=sha256:d16a0141ec1a18405cd4ce8b4613101da75da0e9a7aec5bdd4fa804d0e0eba73 \ 339 | # via cycler, pytest, python-dateutil 340 | snuggs==1.4.6 \ 341 | --hash=sha256:5ac04dadd8ba20e70ab2a0d565fe2e1a7347635aa2aaf3650d1551b1ef941994 \ 342 | --hash=sha256:8b87e5754fd2d0d1295b70bd502419f6f82be98c4bf668552da9d7c5f4387d1f \ 343 | # via rasterio 344 | supermercado==0.0.5 \ 345 | --hash=sha256:c0d7b8ce428681c597d3f3a51df8bc1b347a6cfc3c1d278a7e68fe6b991a5407 346 | toml==0.10.0 \ 347 | --hash=sha256:229f81c57791a41d65e399fc06bf0848bab550a9dfd5ed66df18ce5f05e73d5c \ 348 | --hash=sha256:235682dd292d5899d361a811df37e04a8828a5b1da3115886b73cf81ebc9100e 349 | tqdm==4.32.1 \ 350 | --hash=sha256:0a860bf2683fdbb4812fe539a6c22ea3f1777843ea985cb8c3807db448a0f7ab \ 351 | --hash=sha256:e288416eecd4df19d12407d0c913cbf77aa8009d7fddb18f632aded3bdbdda6b 352 | urllib3==1.25.3 \ 353 | --hash=sha256:b246607a25ac80bedac05c6f282e3cdaf3afb65420fd024ac94435cabe6e18d1 \ 354 | --hash=sha256:dbe59173209418ae49d485b87d1681aefa36252ee85884c31346debd19463232 \ 355 | # via requests 356 | wcwidth==0.1.7 \ 357 | --hash=sha256:3df37372226d6e63e1b1e1eda15c594bca98a22d33a23832a90998faa96bc65e \ 358 | --hash=sha256:f4ebe71925af7b40a864553f761ed559b43544f8f71746c2d756c7fe788ade7c \ 359 | # via pytest 360 | werkzeug==0.15.4 \ 361 | --hash=sha256:865856ebb55c4dcd0630cdd8f3331a1847a819dda7e8c750d3db6f2aa6c0209c \ 362 | --hash=sha256:a0b915f0815982fb2a09161cb8f31708052d0951c3ba433ccc5e1aa276507ca6 \ 363 | # via flask 364 | zipp==0.5.1 \ 365 | --hash=sha256:8c1019c6aad13642199fbe458275ad6a84907634cc9f0989877ccc4a2840139d \ 366 | --hash=sha256:ca943a7e809cc12257001ccfb99e3563da9af99d52f261725e96dfe0f9275bc3 \ 367 | # via importlib-metadata 368 | --------------------------------------------------------------------------------