├── .dockerignore
├── .eslintrc
├── .gitignore
├── EXPERIMENTS.md
├── LICENSE
├── README.md
├── cover.js
├── data
├── spacenet-1
│ ├── all.txt
│ ├── classes.json
│ ├── train.txt
│ └── val.txt
├── training-set-10
│ ├── classes.json
│ ├── sample.geojson
│ ├── sample.txt
│ ├── train.txt
│ └── val.txt
├── training-set-11
│ ├── classes.json
│ ├── sample.geojson
│ ├── sample.txt
│ ├── train.txt
│ └── val.txt
├── training-set-12
│ ├── classes.json
│ ├── sample.geojson
│ ├── sample.txt
│ ├── train.txt
│ └── val.txt
├── training-set-6
│ ├── classes.json
│ ├── sample.geojson
│ ├── sample.txt
│ ├── test.txt
│ ├── train.txt
│ └── val.txt
├── training-set-7
│ ├── classes.json
│ ├── sample.geojson
│ ├── sample.txt
│ ├── train.txt
│ └── val.txt
├── training-set-8
│ ├── classes.json
│ ├── sample.geojson
│ ├── sample.txt
│ ├── train.txt
│ └── val.txt
└── training-set-9
│ ├── classes.json
│ ├── sample.geojson
│ ├── sample.txt
│ ├── train.txt
│ └── val.txt
├── docker
├── caffe-segnet
│ ├── Dockerfile.cpu
│ ├── Dockerfile.cuda8
│ └── Dockerfile.gpu
├── skynet-monitor
│ ├── Dockerfile
│ └── build.sh
├── skynet-run
│ ├── Dockerfile.cpu
│ ├── Dockerfile.gpu
│ ├── Dockerfile.local-cpu
│ ├── Dockerfile.local-cuda8
│ ├── Dockerfile.local-gpu
│ └── build.sh
└── skynet-train
│ ├── Dockerfile-cuda8.0.gpu
│ └── Dockerfile.gpu
├── monitor
├── dump-logs.sh
├── index.html
├── monitor.js
└── start.sh
├── nginx.conf
├── package.json
├── post-process
├── tilestitching
│ ├── .gitignore
│ ├── README.md
│ ├── index.js
│ ├── lib
│ │ ├── binary.js
│ │ ├── img.js
│ │ ├── tilepaths.js
│ │ └── tiles.js
│ ├── package.json
│ └── yarn.lock
└── vectorization
│ ├── .gitignore
│ ├── README.md
│ ├── requirements.txt
│ └── vectorize.py
├── results-viewer
├── README.md
├── access-token.js
├── dist
│ ├── view.html
│ └── view.js
├── get-tile-url.js
├── map.js
└── view.js
├── segnet
├── .gitignore
├── batch_inference.py
├── compute_bn_statistics.py
├── demo.js
├── extract-log-data.sh
├── get-model-metadata
├── inference.py
├── local_inference.py
├── metrics.py
├── queue.py
├── run-test
├── setup-model
├── static
│ ├── demo.js
│ └── index.html
├── templates
│ ├── bayesian_segnet_basic_inference.prototxt
│ ├── bayesian_segnet_basic_train.prototxt
│ ├── bayesian_segnet_inference.prototxt
│ ├── bayesian_segnet_train.prototxt
│ ├── segnet_basic_inference.prototxt
│ ├── segnet_basic_train.prototxt
│ ├── segnet_inference.prototxt
│ ├── segnet_train.prototxt
│ └── solver.prototxt
├── test_segmentation.py
├── tile_server.py
├── train
└── vectorize.py
├── start_instance
├── start_spot_instance
├── user_data.sh
├── util
├── README.md
├── draw_net.py
├── extract_seconds.py
├── extract_seconds.pyc
├── parse_log.sh
├── plot_training_log.py
├── stop-this-instance.sh
└── train.sh
└── vectorize.js
/.dockerignore:
--------------------------------------------------------------------------------
1 | .git
2 | node_modules
3 | temp
4 |
--------------------------------------------------------------------------------
/.eslintrc:
--------------------------------------------------------------------------------
1 | {
2 | "extends": ["standard"],
3 | "env": {
4 | "node": true,
5 | "es6": true,
6 | "browser": true
7 | }
8 | }
9 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | node_modules
2 | temp
3 | *.pyc
4 |
--------------------------------------------------------------------------------
/EXPERIMENTS.md:
--------------------------------------------------------------------------------
1 | # Experiments
2 |
3 | Here are some of the training datasets and trained models that have come out of
4 | our experiments. Trained weights (`.caffemodel` files) are available in the
5 | [requester-pays](http://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html)
6 | S3 bucket `skynet-models` -- see below for specific paths.
7 |
8 | # Training Data
9 |
10 | The license for the satellite/aerial imagery we used means that we cannot
11 | redistribute the files ourselves, but the
12 | [skynet-data](https://github.com/developmentseed/skynet-data) scripts should
13 | hopefully make it easier for you to create your own. (If you want to reproduce
14 | one of these exactly, see the `/data` directory in this repo for a list of
15 | specific map tiles used for each training set.)
16 |
17 | | Set | N | Resolution | Labels | Source | Location |
18 | |-----|---|------------|--------|--------|----------|
19 | | [training-set-7](https://github.com/developmentseed/skynet-train/blob/master/data/training-set-7/sample.geojson) | 3954 | z16 | road (stroke, 5px) | Mapbox Satellite | Northeastern USA and some of CAN |
20 | | [training-set-8](https://github.com/developmentseed/skynet-train/blob/master/data/training-set-8/sample.geojson) | 24747 | z17 | water (fill) building (fill) road (stroke, 5px) | Mapbox Satellite | Continental USA |
21 | | [training-set-9](https://github.com/developmentseed/skynet-train/blob/master/data/training-set-9/sample.geojson) | 6747 | z17 | road (stroke, 5px) | Mapbox Satellite | Seattle, USA |
22 | | [training-set-10](https://github.com/developmentseed/skynet-train/blob/master/data/training-set-10/sample.geojson) | 6914 | z17 | water (fill) building (fill) road (stroke, 5px) | Mapbox Satellite | Seattle, USA |
23 | | [training-set-11](https://github.com/developmentseed/skynet-train/blob/master/data/training-set-11/sample.geojson) | 6841 | z17 | building (fill) road (stroke, 5px) | Mapbox Satellite | Seattle, WA, USA |
24 | | [training-set-12](https://github.com/developmentseed/skynet-train/blob/master/data/training-set-12/sample.geojson) | 6450 | z17 | building (fill) road (stroke, 5px) | Mapbox Satellite | Dar es Salaam, TZA |
25 | | [spacenet-1](https://github.com/developmentseed/skynet-train/blob/master/data/spacenet-1) | 3075 | 50cm | building (fill) | [SpaceNet on AWS](https://aws.amazon.com/public-datasets/spacenet/) | Rio De Janeiro, BRA |
26 |
27 |
28 | # Trained Models
29 |
30 | A model with 'tsX' in the name corresponds to to 'training set X' above.
31 |
32 |
33 | ## segnet-ts7-1
34 |
35 | Trained caffemodel files: s3://skynet-models/segnet-ts7-1/manifest.txt
36 |
37 |
38 |
39 | |Iterations|Correctness|Completeness|View Results|
40 | |----------|-----------|------------|------------|
41 | |5000|0.3256573519036912|0.7868625456307186|[view results](https://skynet-results.s3.amazonaws.com/view.html?baseurl=segnet-ts7-1/test_5000/)|
42 |
43 |
44 | ## segnet-ts7-2
45 |
46 | Trained caffemodel files: s3://skynet-models/segnet-ts7-2/manifest.txt
47 |
48 |
49 |
50 | |Iterations|Correctness|Completeness|View Results|
51 | |----------|-----------|------------|------------|
52 | |60000|0.5058499453227574|0.6716329348245725|[view results](https://skynet-results.s3.amazonaws.com/view.html?baseurl=segnet-ts7-2/test_60000/)|
53 |
54 |
55 | ## segnet-ts9-1
56 |
57 | Trained caffemodel files: s3://skynet-models/segnet-ts9-1/manifest.txt
58 |
59 |
60 | [Compare results](https://skynet-results.s3.amazonaws.com/view.html?baseurl=segnet-ts9-1/test_init-vgg_5000&baseurl=segnet-ts9-1/test_init-vgg_55000)
61 |
62 | |Iterations|Correctness|Completeness|View Results|
63 | |----------|-----------|------------|------------|
64 | |5000|0.518253184652922|0.8573735411419057|[view results](https://skynet-results.s3.amazonaws.com/view.html?baseurl=segnet-ts9-1/test_init-vgg_5000/)|
65 | |55000|0.7707086613419863|0.7196262477292044|[view results](https://skynet-results.s3.amazonaws.com/view.html?baseurl=segnet-ts9-1/test_init-vgg_55000/)|
66 |
67 |
68 | ## segnet-ts10-1
69 |
70 | Trained caffemodel files: s3://skynet-models/segnet-ts10-1/manifest.txt
71 |
72 |
73 | [Compare results](https://skynet-results.s3.amazonaws.com/view.html?baseurl=segnet-ts10-1/test_20K&baseurl=segnet-ts10-1/test_40K)
74 |
75 | |Iterations|Correctness|Completeness|View Results|
76 | |----------|-----------|------------|------------|
77 | |20K|0.427194953622606|0.46094955169178686|[view results](https://skynet-results.s3.amazonaws.com/view.html?baseurl=segnet-ts10-1/test_20K/)|
78 | |40K|0.5728470881115866|0.638696209213571|[view results](https://skynet-results.s3.amazonaws.com/view.html?baseurl=segnet-ts10-1/test_40K/)|
79 |
80 |
81 | ## segnet-ts11-1
82 |
83 | Trained caffemodel files: s3://skynet-models/segnet-ts11-1/manifest.txt
84 |
85 |
86 | [Compare results](https://skynet-results.s3.amazonaws.com/view.html?baseurl=segnet-ts11-1/test_10K&baseurl=segnet-ts11-1/test_30K)
87 |
88 | |Iterations|Correctness|Completeness|View Results|
89 | |----------|-----------|------------|------------|
90 | |10K|0.6246305164870534|0.6131471814748839|[view results](https://skynet-results.s3.amazonaws.com/view.html?baseurl=segnet-ts11-1/test_10K/)|
91 | |30K|0.7283672440907986|0.45904370319881704|[view results](https://skynet-results.s3.amazonaws.com/view.html?baseurl=segnet-ts11-1/test_30K/)|
92 |
93 |
94 | ## segnet-ts12-1
95 |
96 | Trained caffemodel files: s3://skynet-models/segnet-ts12-1/manifest.txt
97 |
98 |
99 | [Compare results](https://skynet-results.s3.amazonaws.com/view.html?baseurl=segnet-ts12-1/test_15K&baseurl=segnet-ts12-1/test_25K&baseurl=segnet-ts12-1/test_40K)
100 |
101 | |Iterations|Correctness|Completeness|View Results|
102 | |----------|-----------|------------|------------|
103 | |15K|0.6706866344254577|0.8394262122454237|[view results](https://skynet-results.s3.amazonaws.com/view.html?baseurl=segnet-ts12-1/test_15K/)|
104 | |25K|0.6006667123465258|0.8542909458638581|[view results](https://skynet-results.s3.amazonaws.com/view.html?baseurl=segnet-ts12-1/test_25K/)|
105 | |40K|0.6372450268591862|0.8168847046209704|[view results](https://skynet-results.s3.amazonaws.com/view.html?baseurl=segnet-ts12-1/test_40K/)|
106 |
107 |
108 | ## spacenet-1.0
109 |
110 | Trained caffemodel files: s3://skynet-models/spacenet-1.0/manifest.txt
111 |
112 |
113 | [Compare results](https://skynet-results.s3.amazonaws.com/view.html?baseurl=spacenet-1.0/snapshots/segnet_iter_5000.results&baseurl=spacenet-1.0/snapshots/segnet_iter_10000.results&baseurl=spacenet-1.0/snapshots/segnet_iter_15000.results&baseurl=spacenet-1.0/snapshots/segnet_iter_20000.results&baseurl=spacenet-1.0/snapshots/segnet_iter_25000.results)
114 |
115 | |Iterations|Correctness|Completeness|View Results|
116 | |----------|-----------|------------|------------|
117 | |5000|0.7884645371187057|0.9409724819069966|[view results](https://skynet-results.s3.amazonaws.com/view.html?baseurl=spacenet-1.0/snapshots/segnet_iter_5000.results/)|
118 | |10000|0.7882820153922176|0.9430313923848284|[view results](https://skynet-results.s3.amazonaws.com/view.html?baseurl=spacenet-1.0/snapshots/segnet_iter_10000.results/)|
119 | |15000|0.7902352382903021|0.9457690149692252|[view results](https://skynet-results.s3.amazonaws.com/view.html?baseurl=spacenet-1.0/snapshots/segnet_iter_15000.results/)|
120 | |20000|0.7918245263758488|0.9426878128090946|[view results](https://skynet-results.s3.amazonaws.com/view.html?baseurl=spacenet-1.0/snapshots/segnet_iter_20000.results/)|
121 | |25000|0.7923963798908541|0.9405211082529236|[view results](https://skynet-results.s3.amazonaws.com/view.html?baseurl=spacenet-1.0/snapshots/segnet_iter_25000.results/)|
122 |
123 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 |
2 | ---
3 |
4 | The following files include substantial content from
5 | https://github.com/alexgkendall/SegNet-Tutorial by Alex Kendall, licensed for
6 | non-commercial use under [CC-BY-NC
7 | license](http://creativecommons.org/licenses/by-nc/4.0/).
8 |
9 | segnet/README.md
10 | segnet/compute_bn_statistics.py
11 | segnet/test_segmentation.py
12 |
13 | ---
14 |
15 | The following adapted from https://github.com/BVLC/caffe
16 |
17 | draw_net.py
18 | extract_seconds.py
19 | extract_seconds.pyc
20 | parse_log.sh
21 | plot_training_log.py
22 |
23 | COPYRIGHT
24 |
25 | All contributions by the University of California:
26 | Copyright (c) 2014, 2015, The Regents of the University of California (Regents)
27 | All rights reserved.
28 |
29 | All other contributions:
30 | Copyright (c) 2014, 2015, the respective contributors
31 | All rights reserved.
32 |
33 | Caffe uses a shared copyright model: each contributor holds copyright over
34 | their contributions to Caffe. The project versioning records all such
35 | contribution and copyright details. If a contributor wants to further mark
36 | their specific copyright on a particular contribution, they should indicate
37 | their copyright solely in the commit message of the change when it is
38 | committed.
39 |
40 | LICENSE
41 |
42 | Redistribution and use in source and binary forms, with or without
43 | modification, are permitted provided that the following conditions are met:
44 |
45 | 1. Redistributions of source code must retain the above copyright notice, this
46 | list of conditions and the following disclaimer.
47 | 2. Redistributions in binary form must reproduce the above copyright notice,
48 | this list of conditions and the following disclaimer in the documentation
49 | and/or other materials provided with the distribution.
50 |
51 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
52 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
53 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
54 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
55 | ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
56 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
57 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
58 | ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
60 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 |
62 | CONTRIBUTION AGREEMENT
63 |
64 | By contributing to the BVLC/caffe repository through pull-request, comment,
65 | or otherwise, the contributor releases their content to the
66 | license and copyright terms herein.
67 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 | # SegNet training and testing scripts
3 |
4 | These scripts are for use in training and testing the [SegNet neural
5 | network](http://mi.eng.cam.ac.uk/projects/segnet/), particularly with
6 | OpenStreetMap + Satellite Imagery training data generated by
7 | [skynet-data](https://github.com/developmentseed/skynet-data).
8 |
9 | Contributions are very welcome!
10 |
11 | # Quick start
12 |
13 | The quickest and easiest way to use these scripts is via the
14 | `developmentseed/skynet-train` docker image, but note that to make this work
15 | with a GPU--necessary for reasonable training times---you will need a machine
16 | set up to use [`nvidia-docker`](https://github.com/NVIDIA/nvidia-docker). (The
17 | [start_instance](https://github.com/developmentseed/skynet-train/blob/master/start_instance)
18 | script uses `docker-machine` to spin up an AWS EC2 g2 instance and set it up with
19 | nvidia-docker. The [start_spot_instance](https://github.com/developmentseed/skynet-train/blob/master/start_spot_instance)
20 | script does the same thing but creates a [spot](https://aws.amazon.com/ec2/spot/)
21 | instance instead of an on demand one.)
22 |
23 | 1. Create a training dataset with [skynet-data](https://github.com/developmentseed/skynet-data).
24 | 2. Run:
25 |
26 | ```sh
27 | nvidia-docker run \
28 | -v /path/to/training/dataset:/data \
29 | -v /path/to/training/output:/output \
30 | -e AWS_ACCESS_KEY_ID=... \
31 | -e AWS_SECRET_ACCESS_KEY=... \
32 | developmentseed/skynet-train:gpu \
33 | --sync s3://your-bucket/training/blahbla
34 | ```
35 |
36 | This will kick off a training run with the given data. Every 10000 iterations,
37 | the model will be snapshotted and run on the test data, the training "loss"
38 | will be plotted, and all of this uploaded to s3. (Omit the `--sync` argument
39 | and AWS creds to skip the upload.)
40 |
41 | Each batch of test results includes a `view.html` file that shows a bare-bones
42 | viewer allowing you to browse the results on a map and compare model outputs to
43 | the ground truth data. Use it like:
44 | - http://your-bucket-url/...test-dir.../view.html?imagery_source=MAPID&access_token=MAPBOX_ACCESS_TOKEN where `MAPID` points to Mapbox-hosted raster tiles used for training. (Defaults to `mapbox.satellite`.)
45 | - http://your-bucket-url/...test-dir.../view.html?imagery_source=http://yourtiles.com/{z}/{x}/{y} for non-Mapbox imagery tiles
46 |
47 |
48 | Customize the training run with these params:
49 |
50 | ```
51 | --model MODEL # segnet or segnet_basic, defaults to segnet
52 | --output OUTPUT # directory in which to output training assets
53 | --data DATA # training dataset
54 | [--fetch-data FETCH_DATA] # s3 uri from which to download training data into DATA
55 | [--snapshot SNAPSHOT] # snapshot frequency
56 | [--cpu] # sets cpu mode
57 | [--gpu [GPU [GPU ...]]] # set gpu devices to use
58 | [--display-frequency DISPLAY_FREQUENCY] # frequency of logging output (affects granularity of plots)
59 | [--iterations ITERATIONS] # total number of iterations to run
60 | [--crop CROP] # crop trianing images to CROPxCROP pixels
61 | [--batch-size BATCH_SIZE] # batch size (adjust this up or down based on GPU size. defaults to 6 for segnet and 16 for segnet_basic)
62 | [--sync SYNC]
63 | ```
64 |
65 | ## Monitoring
66 |
67 | On an instance where training is happening, expose a simple monitoring page with:
68 |
69 | ```sh
70 | docker run --rm -it -v /mnt/training:/output -p 80:8080 developmentseed/skynet-monitor
71 | ```
72 |
73 | # Details
74 |
75 | Prerequisites / Dependencies:
76 | - Node and Python
77 | - As of now, training SegNet requires building the [caffe-segnet fork](https://github.com/alexgkendall/caffe-segnet) fork of Caffe.
78 | - Install node dependencies by running `npm install` in the root directory of this repo.
79 |
80 | ## Set up model definition
81 |
82 | After creating a dataset with the [skynet-data](https://github.com/developmentseed/skynet-data)
83 | scripts, set up the model `prototxt` definition files by running:
84 |
85 | ```
86 | segnet/setup-model --data /path/to/dataset/ --output /path/to/training/workdir
87 | ```
88 |
89 | Also copy `segnet/templates/solver.prototxt` to the training work directory, and
90 | edit it to (a) point to the right paths, and (b) set up the learning
91 | "hyperparameters".
92 |
93 | (NOTE: this is hard to get right at first; when we post links to a couple of
94 | pre-trained models, we'll also include a copy of the solver.prototxt we used as
95 | a reference / starting point.)
96 |
97 |
98 | ## Train
99 |
100 | Download the pre-trained VGG weights `VGG_ILSVRC_16_layers.caffemodel` from
101 | http://www.robots.ox.ac.uk/~vgg/research/very_deep/
102 |
103 | From your training work directory, run
104 |
105 | ```
106 | $CAFFE_ROOT/build/tools/caffe train -gpu 0 -solver solver.txt \
107 | -weights VGG_ILSVRC_16_layers.caffemodel \
108 | 2>&1 | tee train.log
109 | ```
110 |
111 | You can monitor the training with:
112 |
113 | ```
114 | segnet/util/plot_training_log.py train.log --watch
115 | ```
116 |
117 | This will generate and continually update a plot of the "loss" (i.e., training
118 | error) which should gradually decrease as training progresses.
119 |
120 | ## Testing the Trained Network
121 |
122 | ```
123 | segnet/run_test --output /path/for/test/results/ --train /path/to/segnet_train.prototxt --weights /path/to/snapshots/segnet_blahblah_iter_XXXXX.caffemodel --classes /path/to/dataset/classes.json
124 | ```
125 |
126 | This script essentially carries out the instructions outlined here:
127 | http://mi.eng.cam.ac.uk/projects/segnet/tutorial.html
128 |
129 | ## Inference
130 |
131 | After you have a trained and tested network, you'll often want to use it to predict over a larger area. We've included scripts for running this process locally or on AWS.
132 |
133 | ### Local Inference
134 |
135 | To run predictions locally you'll need:
136 | - Raster imagery (as either a GeoTIFF or a VRT)
137 | - A line delimited list of [XYZ tile indices](https://wiki.openstreetmap.org/wiki/Slippy_map_tilenames) to predict on (e.g. `49757-74085-17`. These can be made with [geodex](https://github.com/developmentseed/geodex))
138 | - A skynet model, trained weights, and class definitions ( `.prototxt`, `.caffemodel`, `.json`)
139 |
140 | To run:
141 |
142 | ```sh
143 | docker run -v /path/to/inputs:/inputs -v /path/to/model:/model -v /path/to/output/:/inference \
144 | developmentseed:/skynet-run:local-gpu /inputs/raster.tif /inputs/tiles.txt \
145 | --model /model/segnet_deploy.prototxt
146 | --weights /model/weights.caffemodel
147 | --classes /model/classes.json
148 | --output /inference
149 | ```
150 |
151 | If you are running on a CPU, use the `:local-cpu` docker image and add `--cpu-only` as a final flag to the above command.
152 |
153 | The predicted rasters and vectorized geojson outputs will be located in `/inference` (and the corresponding mounted volume)
154 |
155 | ### AWS Inference
156 |
157 | TODO: for now, see command line instructions in `segnet/queue.py` and `segnet/batch_inference.py`
158 |
159 | ## GPU
160 |
161 | These scripts were originally developed for use on an AWS `g2.2xlarge` instance. For support on newer GPUs, it may be required to:
162 | - use a [newer NVIDIA driver](https://github.com/developmentseed/skynet-train/blob/master/user_data.sh#L22)
163 | - use a newer version of CUDA. To support CUDA8+, you can use the docker images tagged with `:cuda8`. They are built off an updated [`caffe-segnet` fork](https://github.com/TimoSaemann/caffe-segnet-cudnn5) with support for `cuDNN5`.
164 |
--------------------------------------------------------------------------------
/cover.js:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env node
2 |
3 | const fs = require('fs')
4 | const cover = require('tile-cover')
5 |
6 | const data = JSON.parse(fs.readFileSync(process.argv[2]))
7 | const minZoom = process.argv[3]
8 | const maxZoom = process.argv[4] || minZoom
9 | go(data)
10 |
11 | function go (g) {
12 | if (Array.isArray(g)) {
13 | return g.forEach(go)
14 | } else if (g.type === 'FeatureCollection') {
15 | return g.features.forEach(go)
16 | } else if (g.type === 'Feature') {
17 | return go(g.geometry)
18 | }
19 | // else if (g.type === 'MultiPolygon') {
20 | // return g.coordinates.forEach(polygonCoords => {
21 | // const geom = {
22 | // type: 'Polygon',
23 | // coordinates: polygonCoords
24 | // }
25 | // go(geom)
26 | // })
27 | // }
28 |
29 | const tiles = cover.tiles(g, {
30 | min_zoom: parseInt(minZoom, 10),
31 | max_zoom: parseInt(maxZoom, 10)
32 | })
33 | tiles.forEach(tile => console.log(JSON.stringify(tile)))
34 | }
35 |
36 |
--------------------------------------------------------------------------------
/data/spacenet-1/classes.json:
--------------------------------------------------------------------------------
1 | [{
2 | "name": "Building",
3 | "color": "#ff0000",
4 | "stroke-width": "1",
5 | "filter": "[building].match('.+')",
6 | "sourceLayer": "osm"
7 | }]
8 |
--------------------------------------------------------------------------------
/data/training-set-10/classes.json:
--------------------------------------------------------------------------------
1 | [{
2 | "name": "Water",
3 | "color": "#0000ff",
4 | "stroke-width": "5",
5 | "filter": "[waterway].match('.+') or [natural] = 'water' or [natural] = 'bay'"
6 | }, {
7 | "name": "Road",
8 | "color": "#ffffff",
9 | "stroke-width": "5",
10 | "filter": "[highway].match('.+') and not ([tunnel] = 'yes' or [tunnel]='true')"
11 | }, {
12 | "name": "Building",
13 | "color": "#ff0000",
14 | "stroke-width": "1",
15 | "filter": "[building].match('.+')"
16 | }]
17 |
--------------------------------------------------------------------------------
/data/training-set-11/classes.json:
--------------------------------------------------------------------------------
1 | [{
2 | "name": "Road",
3 | "color": "#ffffff",
4 | "stroke-width": "5",
5 | "filter": "[highway].match('.+') and not ([tunnel] = 'yes' or [tunnel]='true')"
6 | }, {
7 | "name": "Building",
8 | "color": "#ff0000",
9 | "stroke-width": "1",
10 | "filter": "[building].match('.+')"
11 | }]
12 |
--------------------------------------------------------------------------------
/data/training-set-12/classes.json:
--------------------------------------------------------------------------------
1 | [{
2 | "name": "Road",
3 | "color": "#ffff00",
4 | "stroke-width": "5",
5 | "filter": "[highway].match('.+') and not ([tunnel] = 'yes' or [tunnel]='true')",
6 | "sourceLayer": "osm"
7 | }, {
8 | "name": "Building",
9 | "color": "#ff0000",
10 | "stroke-width": "1",
11 | "filter": "[building].match('.+')",
12 | "sourceLayer": "osm"
13 | }]
14 |
--------------------------------------------------------------------------------
/data/training-set-6/classes.json:
--------------------------------------------------------------------------------
1 | [{
2 | "name": "Water",
3 | "color": "#0000ff",
4 | "stroke-width": "1",
5 | "filter": "[waterway].match('.+') or [natural] = 'water' or [natural] = 'bay'"
6 | }, {
7 | "name": "Road",
8 | "color": "#ffffff",
9 | "stroke-width": "1",
10 | "filter": "[highway].match('.+') and not ([tunnel] = 'yes' or [tunnel]='true')"
11 | }, {
12 | "name": "Building",
13 | "color": "#ff0000",
14 | "stroke-width": "1",
15 | "filter": "[building].match('.+')"
16 | }]
17 |
--------------------------------------------------------------------------------
/data/training-set-7/classes.json:
--------------------------------------------------------------------------------
1 | [{
2 | "name": "Road",
3 | "color": "#ffff00",
4 | "stroke-width": "5",
5 | "filter": "[highway].match('.+') and not ([tunnel] = 'yes' or [tunnel]='true')"
6 | }]
7 |
--------------------------------------------------------------------------------
/data/training-set-8/classes.json:
--------------------------------------------------------------------------------
1 | [{
2 | "name": "Water",
3 | "color": "#0000ff",
4 | "stroke-width": "5",
5 | "filter": "[waterway].match('.+') or [natural] = 'water' or [natural] = 'bay'"
6 | }, {
7 | "name": "Road",
8 | "color": "#ffffff",
9 | "stroke-width": "5",
10 | "filter": "[highway].match('.+') and not ([tunnel] = 'yes' or [tunnel]='true')"
11 | }, {
12 | "name": "Building",
13 | "color": "#ff0000",
14 | "stroke-width": "1",
15 | "filter": "[building].match('.+')"
16 | }]
17 |
--------------------------------------------------------------------------------
/data/training-set-9/classes.json:
--------------------------------------------------------------------------------
1 | [{
2 | "name": "Road",
3 | "color": "#ffff00",
4 | "stroke-width": "5",
5 | "filter": "[highway].match('.+') and not ([tunnel] = 'yes' or [tunnel]='true')"
6 | }]
7 |
--------------------------------------------------------------------------------
/docker/caffe-segnet/Dockerfile.cpu:
--------------------------------------------------------------------------------
1 | FROM nvidia/cuda:7.0-cudnn2-devel-ubuntu14.04
2 | MAINTAINER anand@developmentseed.org
3 |
4 | RUN apt-get update && apt-get install -y --no-install-recommends \
5 | build-essential \
6 | cmake \
7 | git \
8 | wget \
9 | libatlas-base-dev \
10 | libboost-all-dev \
11 | libgflags-dev \
12 | libgoogle-glog-dev \
13 | libhdf5-serial-dev \
14 | libleveldb-dev \
15 | liblmdb-dev \
16 | libopencv-dev \
17 | libprotobuf-dev \
18 | libsnappy-dev \
19 | protobuf-compiler \
20 | python-dev \
21 | python-numpy \
22 | python-pip \
23 | python-scipy && \
24 | rm -rf /var/lib/apt/lists/*
25 |
26 | ENV CAFFE_ROOT=/opt/caffe
27 | WORKDIR $CAFFE_ROOT
28 |
29 | RUN git clone -b segnet-multi-gpu --depth=1 https://github.com/developmentseed/caffe . && \
30 | for req in $(cat python/requirements.txt) pydot; do pip install $req; done && \
31 | mkdir build && cd build && \
32 | cmake -DCPU_ONLY=1 .. && \
33 | make -j"$(nproc)"
34 |
35 | ENV PYCAFFE_ROOT $CAFFE_ROOT/python
36 | ENV PYTHONPATH $PYCAFFE_ROOT:$PYTHONPATH
37 | ENV PATH $CAFFE_ROOT/build/tools:$PYCAFFE_ROOT:$PATH
38 | RUN echo "$CAFFE_ROOT/build/lib" >> /etc/ld.so.conf.d/caffe.conf && ldconfig
39 |
40 | WORKDIR /workspace
41 |
--------------------------------------------------------------------------------
/docker/caffe-segnet/Dockerfile.cuda8:
--------------------------------------------------------------------------------
1 | FROM nvidia/cuda:8.0-cudnn5-devel-ubuntu14.04
2 | MAINTAINER anand@developmentseed.org
3 |
4 | RUN apt-get update && apt-get install -y --no-install-recommends \
5 | build-essential \
6 | cmake \
7 | git \
8 | wget \
9 | libatlas-base-dev \
10 | libboost-all-dev \
11 | libgflags-dev \
12 | libgoogle-glog-dev \
13 | libhdf5-serial-dev \
14 | libleveldb-dev \
15 | liblmdb-dev \
16 | libopencv-dev \
17 | libprotobuf-dev \
18 | libsnappy-dev \
19 | protobuf-compiler \
20 | python-dev \
21 | python-numpy \
22 | python-pip \
23 | python-scipy && \
24 | rm -rf /var/lib/apt/lists/*
25 |
26 | ENV CAFFE_ROOT=/opt/caffe
27 | WORKDIR $CAFFE_ROOT
28 |
29 | RUN git clone --depth=1 https://github.com/TimoSaemann/caffe-segnet-cudnn5 . && \
30 | for req in $(cat python/requirements.txt) pydot; do pip install $req; done && \
31 | mkdir build && cd build && \
32 | cmake .. && \
33 | make -j"$(nproc)"
34 |
35 | ENV PYCAFFE_ROOT $CAFFE_ROOT/python
36 | ENV PYTHONPATH $PYCAFFE_ROOT:$PYTHONPATH
37 | ENV PATH $CAFFE_ROOT/build/tools:$PYCAFFE_ROOT:$PATH
38 | RUN echo "$CAFFE_ROOT/build/lib" >> /etc/ld.so.conf.d/caffe.conf && ldconfig
39 |
40 | WORKDIR /workspace
41 |
--------------------------------------------------------------------------------
/docker/caffe-segnet/Dockerfile.gpu:
--------------------------------------------------------------------------------
1 | FROM nvidia/cuda:7.0-cudnn2-devel-ubuntu14.04
2 | MAINTAINER anand@developmentseed.org
3 |
4 | RUN apt-get update && apt-get install -y --no-install-recommends \
5 | build-essential \
6 | cmake \
7 | git \
8 | wget \
9 | libatlas-base-dev \
10 | libboost-all-dev \
11 | libgflags-dev \
12 | libgoogle-glog-dev \
13 | libhdf5-serial-dev \
14 | libleveldb-dev \
15 | liblmdb-dev \
16 | libopencv-dev \
17 | libprotobuf-dev \
18 | libsnappy-dev \
19 | protobuf-compiler \
20 | python-dev \
21 | python-numpy \
22 | python-pip \
23 | python-scipy && \
24 | rm -rf /var/lib/apt/lists/*
25 |
26 | ENV CAFFE_ROOT=/opt/caffe
27 | WORKDIR $CAFFE_ROOT
28 |
29 | RUN git clone -b segnet-multi-gpu --depth=1 https://github.com/developmentseed/caffe . && \
30 | for req in $(cat python/requirements.txt) pydot; do pip install $req; done && \
31 | mkdir build && cd build && \
32 | cmake -DUSE_CUDNN=1 .. && \
33 | make -j"$(nproc)"
34 |
35 | ENV PYCAFFE_ROOT $CAFFE_ROOT/python
36 | ENV PYTHONPATH $PYCAFFE_ROOT:$PYTHONPATH
37 | ENV PATH $CAFFE_ROOT/build/tools:$PYCAFFE_ROOT:$PATH
38 | RUN echo "$CAFFE_ROOT/build/lib" >> /etc/ld.so.conf.d/caffe.conf && ldconfig
39 |
40 | WORKDIR /workspace
41 |
--------------------------------------------------------------------------------
/docker/skynet-monitor/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM python:2.7
2 |
3 | # needed for `column` command
4 | RUN apt-get update && apt-get install -y bsdmainutils
5 |
6 | ADD . /workspace
7 | WORKDIR /workspace
8 |
9 | EXPOSE 8080
10 |
11 | CMD ["monitor/start.sh"]
12 |
--------------------------------------------------------------------------------
/docker/skynet-monitor/build.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | docker build -t developmentseed/skynet-monitor -f docker/skynet-monitor/Dockerfile .
4 |
5 |
--------------------------------------------------------------------------------
/docker/skynet-run/Dockerfile.cpu:
--------------------------------------------------------------------------------
1 | FROM developmentseed/caffe-segnet:cpu
2 | ENV DEBIAN_FRONTEND noninteractive
3 | RUN curl -sL https://deb.nodesource.com/setup_6.x | sudo -E bash - && \
4 | sudo apt-get install -y nodejs build-essential libagg-dev libpotrace-dev && \
5 | pip install flask && \
6 | pip install mercantile && \
7 | pip install boto3 && \
8 | pip install git+https://github.com/flupke/pypotrace.git@master
9 |
10 | ADD package.json /workspace/package.json
11 | RUN npm install
12 | ADD . /workspace
13 | EXPOSE 5000
14 |
15 | ENV SKYNET_CPU_ONLY=1
16 |
--------------------------------------------------------------------------------
/docker/skynet-run/Dockerfile.gpu:
--------------------------------------------------------------------------------
1 | FROM developmentseed/caffe-segnet:gpu
2 | ENV DEBIAN_FRONTEND noninteractive
3 | RUN curl -sL https://deb.nodesource.com/setup_6.x | sudo -E bash - && \
4 | sudo apt-get install -y nodejs build-essential libagg-dev libpotrace-dev && \
5 | pip install flask && \
6 | pip install mercantile && \
7 | pip install boto3 && \
8 | pip install git+https://github.com/flupke/pypotrace.git@master
9 |
10 | ADD package.json /workspace/package.json
11 | RUN npm install
12 | ADD . /workspace
13 | EXPOSE 5000
14 |
15 |
--------------------------------------------------------------------------------
/docker/skynet-run/Dockerfile.local-cpu:
--------------------------------------------------------------------------------
1 | FROM developmentseed/caffe-segnet:cpu
2 | ENV DEBIAN_FRONTEND noninteractive
3 | RUN sudo apt-get update && sudo apt-get install curl -y
4 |
5 | # GDAL
6 | RUN sudo apt-get install software-properties-common -y && \
7 | sudo add-apt-repository ppa:ubuntugis/ubuntugis-unstable -y && \
8 | sudo apt-get update && sudo apt-get install gdal-bin python-gdal libgdal1-dev -y
9 |
10 | # Node
11 | RUN curl -sL https://deb.nodesource.com/setup_6.x | sudo -E bash - && \
12 | sudo apt-get install -y nodejs build-essential libagg-dev libpotrace-dev
13 |
14 | # Python
15 | RUN pip install numpy==1.14.2
16 |
17 | RUN pip install flask && \
18 | pip install mercantile && \
19 | pip install rasterio==1.0a12 && \
20 | pip install boto3 && \
21 | pip install pyproj && \
22 | pip install git+https://github.com/flupke/pypotrace.git@master
23 |
24 | ADD package.json /workspace/package.json
25 | RUN npm install
26 | ADD . /workspace
27 | EXPOSE 5000
28 |
--------------------------------------------------------------------------------
/docker/skynet-run/Dockerfile.local-cuda8:
--------------------------------------------------------------------------------
1 | FROM developmentseed/caffe-segnet:cuda8
2 | ENV DEBIAN_FRONTEND noninteractive
3 | RUN sudo apt-get update && sudo apt-get install curl -y
4 |
5 | # GDAL
6 | RUN sudo apt-get install software-properties-common -y && \
7 | sudo add-apt-repository ppa:ubuntugis/ubuntugis-unstable -y && \
8 | sudo apt-get update && sudo apt-get install gdal-bin python-gdal libgdal1-dev -y
9 |
10 | # Node
11 | RUN curl -sL https://deb.nodesource.com/setup_6.x | sudo -E bash - && \
12 | sudo apt-get install -y nodejs build-essential libagg-dev libpotrace-dev
13 |
14 | # Python
15 | RUN pip install numpy==1.14.2
16 |
17 | RUN pip install flask && \
18 | pip install mercantile && \
19 | pip install rasterio==1.0a12 && \
20 | pip install boto3 && \
21 | pip install pyproj && \
22 | pip install git+https://github.com/flupke/pypotrace.git@master
23 |
24 | ADD package.json /workspace/package.json
25 | RUN npm install
26 | ADD . /workspace
27 | EXPOSE 5000
28 |
--------------------------------------------------------------------------------
/docker/skynet-run/Dockerfile.local-gpu:
--------------------------------------------------------------------------------
1 | FROM developmentseed/caffe-segnet:gpu
2 | ENV DEBIAN_FRONTEND noninteractive
3 | RUN sudo apt-get update && sudo apt-get install curl -y
4 |
5 | # GDAL
6 | RUN sudo apt-get install software-properties-common -y && \
7 | sudo add-apt-repository ppa:ubuntugis/ubuntugis-unstable -y && \
8 | sudo apt-get update && sudo apt-get install gdal-bin python-gdal libgdal1-dev -y
9 |
10 | # Node
11 | RUN curl -sL https://deb.nodesource.com/setup_6.x | sudo -E bash - && \
12 | sudo apt-get install -y nodejs build-essential libagg-dev libpotrace-dev
13 |
14 | # Python
15 | RUN pip install numpy==1.14.2
16 |
17 | RUN pip install flask && \
18 | pip install mercantile && \
19 | pip install rasterio==1.0a12 && \
20 | pip install boto3 && \
21 | pip install pyproj && \
22 | pip install git+https://github.com/flupke/pypotrace.git@master
23 |
24 | ADD package.json /workspace/package.json
25 | RUN npm install
26 | ADD . /workspace
27 | EXPOSE 5000
28 |
--------------------------------------------------------------------------------
/docker/skynet-run/build.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -e
4 |
5 | npm run bundle-demo
6 | docker build -t developmentseed/skynet-run:gpu -f docker/skynet-run/Dockerfile.gpu .
7 | docker build -t developmentseed/skynet-run:cpu -f docker/skynet-run/Dockerfile.cpu .
8 |
9 |
--------------------------------------------------------------------------------
/docker/skynet-train/Dockerfile-cuda8.0.gpu:
--------------------------------------------------------------------------------
1 | FROM developmentseed/caffe-segnet:cuda8
2 | MAINTAINER anand@developmentseed.org
3 |
4 | ENV DEBIAN_FRONTEND noninteractive
5 | RUN sudo apt-get update && sudo apt-get install curl -y
6 | RUN curl -sL https://deb.nodesource.com/setup_6.x | sudo -E bash - && \
7 | sudo apt-get install -y nodejs build-essential
8 |
9 | RUN pip install boto3 && \
10 | pip install protobuf && \
11 | pip install cython && \
12 | pip install scikit-image
13 |
14 | # bsdmainutils is for 'paste' and 'column' commands, used in plot_training_log
15 | RUN pip install awscli && \
16 | apt-get install -y bsdmainutils
17 |
18 | ADD package.json /workspace/package.json
19 | RUN npm install
20 |
21 | ADD . /workspace
22 | WORKDIR /workspace
23 |
24 | # Expose demo server port
25 | EXPOSE 5000
26 |
27 | ENTRYPOINT ["python", "-u", "segnet/train"]
28 |
--------------------------------------------------------------------------------
/docker/skynet-train/Dockerfile.gpu:
--------------------------------------------------------------------------------
1 | FROM developmentseed/caffe-segnet:gpu
2 | MAINTAINER anand@developmentseed.org
3 |
4 | ENV DEBIAN_FRONTEND noninteractive
5 | RUN curl -sL https://deb.nodesource.com/setup_6.x | sudo -E bash - && \
6 | sudo apt-get install -y nodejs build-essential
7 |
8 | RUN pip install boto3 && \
9 | pip install protobuf && \
10 | pip install cython && \
11 | pip install scikit-image
12 |
13 | # bsdmainutils is for 'paste' and 'column' commands, used in plot_training_log
14 | RUN pip install awscli && \
15 | apt-get install -y bsdmainutils
16 |
17 | ADD package.json /workspace/package.json
18 | RUN npm install
19 |
20 | ADD . /workspace
21 | WORKDIR /workspace
22 |
23 | # Expose demo server port
24 | EXPOSE 5000
25 |
26 | ENTRYPOINT ["python", "-u", "segnet/train"]
27 |
28 |
--------------------------------------------------------------------------------
/monitor/dump-logs.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | while :
3 | do
4 | segnet/extract-log-data.sh /output > monitor/training.csv
5 | echo "Updated data from logs: $(cat monitor/training.csv | wc -l) observations"
6 | sleep 60
7 | done
8 |
--------------------------------------------------------------------------------
/monitor/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
23 |
24 |
25 |
26 |
27 |
28 | Iterations:
29 | Elapsed (min):
30 | Iterations / min:
31 |
32 |
33 |
34 |
35 |
36 |
37 |
--------------------------------------------------------------------------------
/monitor/monitor.js:
--------------------------------------------------------------------------------
1 | /* global d3 */
2 |
3 | var charts = []
4 |
5 | charts.push(createChart(d => +d['#Iters'], d => +d['TrainingLoss'], d => +d['SmoothedLoss']))
6 | charts.push(createChart(d => +d['#Iters'], d => +d['LearningRate']))
7 |
8 | function createChart (xvalue, yvalue, linevalue) {
9 | // Set the dimensions of the canvas / graph
10 | var margin = {top: 30, right: 20, bottom: 30, left: 50}
11 | var width = 600 - margin.left - margin.right
12 | var height = 270 - margin.top - margin.bottom
13 |
14 | // Set the ranges
15 | var x = d3.scale.linear().range([0, width])
16 | var y = d3.scale.linear().range([height, 0])
17 |
18 | // Define the axes
19 | var xAxis = d3.svg.axis().scale(x)
20 | .orient('bottom').ticks(5)
21 |
22 | var yAxis = d3.svg.axis().scale(y)
23 | .orient('left').ticks(5)
24 |
25 | // Define the line
26 | var valueline = d3.svg.line()
27 | .interpolate('basis')
28 | .x(d => x(xvalue(d)))
29 | .y(d => y(linevalue(d)))
30 |
31 | // Adds the svg canvas
32 | var svg = d3.select('body')
33 | .append('svg')
34 | .attr('width', width + margin.left + margin.right)
35 | .attr('height', height + margin.top + margin.bottom)
36 | .append('g')
37 | .attr('transform', 'translate(' + margin.left + ',' + margin.top + ')')
38 |
39 | // Add the X Axis
40 | svg.append('g')
41 | .attr('class', 'x axis')
42 | .attr('transform', 'translate(0,' + height + ')')
43 | // Add the Y Axis
44 | svg.append('g')
45 | .attr('class', 'y axis')
46 |
47 | getData()
48 | setInterval(getData, 60000)
49 |
50 | function update (data) {
51 | svg.selectAll('path').remove()
52 | const circle = svg.selectAll('circle')
53 | .data(data)
54 |
55 | circle.enter().append('circle').attr('class', 'point')
56 | circle
57 | .attr('cx', d => x(xvalue(d)))
58 | .attr('cy', d => y(yvalue(d)))
59 | .attr('r', 1)
60 |
61 | if (linevalue) {
62 | svg.append('path')
63 | .attr('class', 'line')
64 | .attr('d', valueline(data))
65 | }
66 |
67 | svg.select('.x.axis').call(xAxis)
68 | svg.select('.y.axis').call(yAxis)
69 | }
70 |
71 | return {
72 | update: function (data) {
73 | // Scale the range of the data
74 | x.domain(d3.extent(data, xvalue))
75 | y.domain([0, d3.max(data, yvalue)])
76 | update(data)
77 | }
78 | }
79 | }
80 |
81 | function getData () {
82 | d3.csv('training.csv', function (error, data) {
83 | if (error) { console.error(error) }
84 |
85 | data.sort((da, db) => da['#Iters'] - db['#Iters'])
86 | data.forEach((d, i) => {
87 | let delta = i ? d['Seconds'] - data[i - 1]['Seconds'] : 0
88 | if (delta < 0) {
89 | // this happens at the boundary between log files (every 10000 iterations)
90 | delta = 0
91 | }
92 | d['DeltaSeconds'] = delta
93 | })
94 |
95 | console.log(data)
96 |
97 | const kernel = normaliseKernel([0.1, 0.2, 0.3, 0.2, 0.1]) // gaussian smoothing
98 | convolute(data, kernel, d => +d['TrainingLoss'], 'SmoothedLoss')
99 |
100 | charts.forEach(chart => chart.update(data))
101 | const elapsedMin = d3.sum(data, d => +d['DeltaSeconds']) / 60
102 | const iterations = d3.max(data, d => +d['#Iters'])
103 | document.querySelector('#iters').innerHTML = Math.round(iterations)
104 | document.querySelector('#elapsed').innerHTML = Math.round(elapsedMin)
105 | document.querySelector('#iters_per_min').innerHTML = Math.round(iterations / elapsedMin)
106 | })
107 | }
108 |
109 | // from http://bl.ocks.org/tomgp/6770520
110 | function convolute (data, kernel, accessor, target) {
111 | var kernelCenter = Math.floor(kernel.length / 2)
112 | // var leftSize = kernelCenter
113 | // var rightSize = kernel.length - (kernelCenter - 1)
114 | if (accessor === undefined) {
115 | accessor = function (datum) {
116 | return datum
117 | }
118 | }
119 |
120 | function constrain (i, range) {
121 | if (i < range[0]) {
122 | i = 0
123 | }
124 | if (i > range[1]) {
125 | i = range[1]
126 | }
127 | return i
128 | }
129 |
130 | data.forEach(function (d, i) {
131 | var s = 0
132 | for (var k = 0; k < kernel.length; k++) {
133 | var index = constrain((i + (k - kernelCenter)), [0, data.length - 1])
134 | s += kernel[k] * accessor(data[index])
135 | }
136 | data[i][target] = s
137 | })
138 | }
139 |
140 | function normaliseKernel (a) {
141 | function arraySum (a) {
142 | var s = 0
143 | for (var i = 0; i < a.length; i++) {
144 | s += a[i]
145 | }
146 | return s
147 | }
148 | var sumA = arraySum(a)
149 | var scaleFactor = sumA / 1
150 | a = a.map(function (d) {
151 | return d / scaleFactor
152 | })
153 | return a
154 | }
155 |
--------------------------------------------------------------------------------
/monitor/start.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | monitor/dump-logs.sh &
4 |
5 | cd monitor
6 |
7 | python -m SimpleHTTPServer 8080
8 |
--------------------------------------------------------------------------------
/nginx.conf:
--------------------------------------------------------------------------------
1 |
2 | user nginx;
3 | worker_processes 1;
4 |
5 | error_log /var/log/nginx/error.log warn;
6 | pid /var/run/nginx.pid;
7 |
8 |
9 | events {
10 | worker_connections 1024;
11 | }
12 |
13 |
14 | http {
15 | include /etc/nginx/mime.types;
16 | default_type application/octet-stream;
17 |
18 | log_format main '$remote_addr - $remote_user [$time_local] "$request" '
19 | '$status $body_bytes_sent "$http_referer" '
20 | '"$http_user_agent" "$http_x_forwarded_for"';
21 |
22 | access_log /var/log/nginx/access.log main;
23 |
24 | sendfile on;
25 | #tcp_nopush on;
26 |
27 | keepalive_timeout 65;
28 |
29 | #gzip on;
30 |
31 | include /etc/nginx/conf.d/*.conf;
32 |
33 | autoindex on;
34 | }
35 |
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "segnet",
3 | "version": "1.0.0",
4 | "description": "(Adapted from http://mi.eng.cam.ac.uk/projects/segnet/tutorial.html) ## Batch Normalization",
5 | "main": "class-weights.js",
6 | "scripts": {
7 | "start-viewer": "budo --dir results-viewer/dist results-viewer/view.js:view.js",
8 | "bundle-viewer": "browserify results-viewer/view.js > results-viewer/dist/view.js",
9 | "bundle-demo": "browserify segnet/demo.js > segnet/static/demo.js",
10 | "build-docker": "docker build -t developmentseed/skynet-train:gpu . -f docker/skynet-train/Dockerfile.gpu"
11 | },
12 | "keywords": [],
13 | "author": "Anand Thakker (http://anandthakker.net/)",
14 | "license": "ISC",
15 | "dependencies": {
16 | "choo": "^2.2.0",
17 | "fs-extra": "^0.30.0",
18 | "geojson-flatten": "^0.2.1",
19 | "geojson-normalize": "0.0.1",
20 | "lodash": "^4.17.2",
21 | "mapbox-gl": "^0.28.0",
22 | "mapbox-gl-styles": "^2.0.2",
23 | "minimist": "^1.2.0",
24 | "polyspine": "^1.0.0",
25 | "tile-cover": "^3.0.1",
26 | "tilebelt": "^1.0.1",
27 | "turf-line-distance": "^3.0.12",
28 | "turf-simplify": "^3.0.12"
29 | },
30 | "devDependencies": {
31 | "browserify": "^13.0.1",
32 | "budo": "^8.4.0",
33 | "es2020": "^1.1.6",
34 | "eslint": "^3.2.2",
35 | "eslint-config-standard": "^5.3.5",
36 | "eslint-plugin-promise": "^2.0.0",
37 | "eslint-plugin-standard": "^2.0.0"
38 | },
39 | "browserify": {
40 | "transform": [
41 | "es2020"
42 | ]
43 | }
44 | }
45 |
--------------------------------------------------------------------------------
/post-process/tilestitching/.gitignore:
--------------------------------------------------------------------------------
1 | node_modules
2 | data
3 | out
4 |
--------------------------------------------------------------------------------
/post-process/tilestitching/README.md:
--------------------------------------------------------------------------------
1 | # stitching
2 |
3 | Stitching `skynet-train` raster results for better downstream vector outputs
4 |
5 | ## requirements
6 | 1. nodejs & yarn
7 | 2. install dependencies with `yarn`
8 | 3. directory of images with the schema `z/x/y.png`
9 |
10 | ## run
11 | ```
12 | node index --dir images --zoom 12
13 | ```
14 |
15 | ## license
16 | MIT
17 |
--------------------------------------------------------------------------------
/post-process/tilestitching/index.js:
--------------------------------------------------------------------------------
1 | // utilities
2 | const R = require('ramda');
3 | const getAllPaths = require("./lib/tilepaths");
4 | const { pathToTile, covering, getChildren } = require("./lib/tiles");
5 | const fs = require("fs");
6 |
7 | // img
8 | const savePixels = require("save-pixels");
9 | const stitch = require("./lib/img");
10 |
11 | // args
12 | const argv = require("minimist")(process.argv.slice(2));
13 |
14 | // Paths and tileMap
15 | console.log("Grabbing all paths.");
16 | const allPaths = getAllPaths(argv.dir);
17 | const tiles = allPaths.map(path => {
18 | const tile = pathToTile(path);
19 | const hash = `${tile.z}/${tile.x}/${tile.y}`;
20 | return {tile, hash, path};
21 | });
22 | let tileMap = new Map(R.map(R.props(['hash', 'path']), tiles));
23 |
24 | // Compute covering with zoom
25 | const zoom = argv.zoom || 12;
26 | let cover = covering(R.map(R.prop('tile'), tiles), zoom);
27 |
28 | // Stitch tiles
29 | cover.forEach(tile => {
30 | stitch(getChildren(tile, 17), tileMap)
31 | .then(out => {
32 | // Output
33 | console.log("Saving output.");
34 | savePixels(out, "png")
35 | .pipe(fs.createWriteStream(`out/${tile.z}-${tile.x}-${tile.y}.png`))
36 | .on("end", function() {
37 | console.log("Wrapping up.");
38 | })
39 | .on("err", err => {
40 | throw new Error(err);
41 | });
42 | })
43 | .catch(err => {
44 | console.error("ERROR", err);
45 | });
46 | });
47 |
--------------------------------------------------------------------------------
/post-process/tilestitching/lib/binary.js:
--------------------------------------------------------------------------------
1 | const cwise = require('cwise');
2 | const ndarray = require('ndarray');
3 |
4 | var computeBin = cwise({
5 | args: ["array", "array", "array", "array"],
6 | body: function (out, r, g, b) {
7 | out = (r > 128 || g > 128 || b > 128) ? 255 : 0;
8 | }
9 | });
10 |
11 | /* Image (256, 256, 4) -> ndarray (256, 256)*/
12 | module.exports = function (image) {
13 | var shape = image.shape.slice(0);
14 | shape.pop();
15 | var result = ndarray(new Uint8Array(image.size), shape)
16 | computeBin(
17 | result,
18 | image.pick(undefined, undefined, 0),
19 | image.pick(undefined, undefined, 1),
20 | image.pick(undefined, undefined, 2)
21 | );
22 | return result;
23 | }
24 |
--------------------------------------------------------------------------------
/post-process/tilestitching/lib/img.js:
--------------------------------------------------------------------------------
1 | const zeros = require("zeros");
2 | const Promise = require("bluebird");
3 | const getPixels = Promise.promisify(require("get-pixels"));
4 |
5 | const binary = require("./binary");
6 | const { getGrid } = require("./tiles");
7 |
8 | /* Fills an image with a subimage*/
9 | function fill(out, pixels, xoffset, yoffset) {
10 | for (let i = 0; i < 256; i++) {
11 | for (let j = 0; j < 256; j++) {
12 | if (pixels) {
13 | out.set(xoffset + i, yoffset + j, pixels.get(i, j));
14 | } else {
15 | out.set(xoffset + i, yoffset + j, 0);
16 | }
17 | }
18 | }
19 | return out;
20 | }
21 |
22 | /* Takes tiles and a tileMap from tile to image
23 | * and forms a stitched tile */
24 | async function stitch(tiles, tileMap) {
25 | console.log(tiles);
26 | let length = tiles.length;
27 | const grid = getGrid(tiles);
28 |
29 | const width = (grid.x1 + 1 - grid.x0) * 256;
30 | const height = (grid.y1 + 1 - grid.y0) * 256;
31 |
32 | console.log("Zeroing out matrix.");
33 | let out = zeros([width, height], "uint8");
34 |
35 | return Promise.mapSeries(tiles, async (tile, idx) => {
36 | console.log(`${idx}/${length}:`, tile);
37 | const path = tileMap.get(`${tile.z}/${tile.x}/${tile.y}`);
38 | const xoffset = (tile.x - grid.x0) * 256;
39 | const yoffset = (tile.y - grid.y0) * 256;
40 |
41 | let pixels = null;
42 | if (path) {
43 | const image = await getPixels(path);
44 | pixels = binary(image);
45 | }
46 |
47 | // If pixels is null, it will fill with black
48 | return fill(out, pixels, xoffset, yoffset);
49 | }).then(() => out);
50 | }
51 |
52 | module.exports = stitch;
53 |
--------------------------------------------------------------------------------
/post-process/tilestitching/lib/tilepaths.js:
--------------------------------------------------------------------------------
1 | const fs = require("fs");
2 | const path = require("path");
3 |
4 | const listDir = function(dir) {
5 | const files = fs.readdirSync(dir);
6 | return files.filter(file => {
7 | return fs.statSync(path.join(dir, file)).isDirectory();
8 | });
9 | };
10 |
11 | /* Takes a directory and generates a list of tiles*/
12 | module.exports = imagesDir => {
13 | let paths = [];
14 | // First level is scenes
15 | const zooms = fs.readdirSync(imagesDir);
16 | zooms.forEach(zoom => {
17 | const xss = listDir(path.join(imagesDir, zoom));
18 | xss.forEach(x => {
19 | const yss = fs.readdirSync(path.join(imagesDir, zoom, x));
20 | yss.forEach(y => {
21 | if (path.extname(y) === ".png") {
22 | paths.push(path.join(imagesDir, zoom, x, y));
23 | }
24 | });
25 | });
26 | });
27 | return paths;
28 | }
29 |
--------------------------------------------------------------------------------
/post-process/tilestitching/lib/tiles.js:
--------------------------------------------------------------------------------
1 | const R = require("ramda");
2 | const tilebelt = require("@mapbox/tilebelt");
3 |
4 | /* images/.../z/x/y.png --> {x, y, z} */
5 | function pathToTile(path) {
6 | const test = /.*\/([0-9]+)\/([0-9]+)\/([0-9]+)\.png/;
7 | const match = test.exec(path);
8 | if (match) {
9 | return {
10 | z: parseInt(match[1]),
11 | x: parseInt(match[2]),
12 | y: parseInt(match[3])
13 | };
14 | } else {
15 | throw new Error("path not matched");
16 | }
17 | }
18 |
19 | /* [{x, y, z}] => {x0, y0, x1, y1) */
20 | /* Get the min max tile indices */
21 | function getGrid(tiles) {
22 | const xss = R.map(R.prop("x"), tiles);
23 | const yss = R.map(R.prop("y"), tiles);
24 | const x0 = R.reduce(R.min, Infinity, xss);
25 | const y0 = R.reduce(R.min, Infinity, yss);
26 | const x1 = R.reduce(R.max, -Infinity, xss);
27 | const y1 = R.reduce(R.max, -Infinity, yss);
28 |
29 | return { x0, y0, x1, y1 };
30 | }
31 |
32 | /* Given tiles at zoom level x, generate a covering of
33 | * those tiles at zoom level y */
34 | function covering(tiles, zoom) {
35 | let currZoom = tiles[0].z;
36 | let currKeys = tiles
37 | .map(tile => [tile.x, tile.y, tile.z])
38 | .map(JSON.stringify);
39 | let cache = new Set(currKeys);
40 |
41 | for (let i = currZoom; i > zoom; i--) {
42 | let parentTiles = Array.from(cache).map(tileStr => {
43 | let tile = JSON.parse(tileStr);
44 | return JSON.stringify(tilebelt.getParent(tile));
45 | });
46 |
47 | cache = new Set(parentTiles);
48 | }
49 | return Array.from(cache).map(JSON.parse).map(tile => {
50 | return { z: tile[2], y: tile[1], x: tile[0] };
51 | });
52 | }
53 |
54 | /* Given tile at zoom x, get all its children at zoom y */
55 | function getChildren(tile, zoom) {
56 | if (tile.z === zoom)
57 | return tile;
58 | else {
59 | let children = tilebelt.getChildren([tile.x, tile.y, tile.z]);
60 | return R.flatten(children.map(tile =>
61 | getChildren({ x: tile[0], y: tile[1], z: tile[2] }, zoom)));
62 | }
63 | }
64 |
65 | module.exports = {
66 | pathToTile,
67 | getGrid,
68 | covering,
69 | getChildren
70 | };
71 |
--------------------------------------------------------------------------------
/post-process/tilestitching/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "tilestitch-experiments",
3 | "version": "0.0.1",
4 | "main": "index.js",
5 | "author": "Development Seed",
6 | "license": "MIT",
7 | "dependencies": {
8 | "@mapbox/tilebelt": "^1.0.1",
9 | "bluebird": "^3.5.0",
10 | "get-pixels": "^3.3.0",
11 | "luminance": "^1.0.3",
12 | "minimist": "^1.2.0",
13 | "ramda": "^0.23.0",
14 | "save-pixels": "^2.3.4",
15 | "zeros": "^1.0.0"
16 | }
17 | }
18 |
--------------------------------------------------------------------------------
/post-process/vectorization/.gitignore:
--------------------------------------------------------------------------------
1 | in
2 | out
3 | *.pyc
4 | node_modules
5 | *.swp
6 |
--------------------------------------------------------------------------------
/post-process/vectorization/README.md:
--------------------------------------------------------------------------------
1 | # Vectorization playground
2 |
3 | This code consists of a library of Python functions and a command line tool for creating GeoJSON (Lat/Lon) linestring coordinates of object skeletons from binary raster imagery. A binary raster image is one that consists of two values (0 or 255). The vectorization process will first generate the skeleton of the objects within the image, and then convert that into coordinates.
4 |
5 | ## Input data
6 |
7 | The command line program vectorize.py currently operates on tiles in PNG format, and expects files to be named in the format: `{zoom}-{x}-{y}.png`. vectorize.py can take in either a single filename or directory name containing a series of png files. Only the first band is used for vectorization.
8 |
9 | The PNG files can be generated using the tilestitching script in this repo. For zoom level 14:
10 |
11 | ```
12 | mkdir zoom14
13 | node ../tilestitching/index.js --dir images --zoom 14
14 | cp -a ../tilestitching/out/*.png zoom14/
15 | ```
16 |
17 |
18 | ## Installation
19 |
20 | To use the vectorize.py Python script some systen dependencies are required that may need to be installed. install the requirements using pip.
21 |
22 | ```
23 | # on debian
24 | $ sudo apt-get install libgdal-dev swig
25 |
26 | # on mac
27 | # brew install gdal swig
28 |
29 | # then, on all systems
30 | $ pip install numpy
31 | $ pip install -r requirements.txt
32 | ```
33 |
34 | Note that NumPy needs to be installed prior to the packages in the requirements.txt.
35 |
36 |
37 | ## Usage
38 |
39 | There are currently no tunable parameters in the vectorization, input files are all that's needed. However there are some options:
40 |
41 | ```
42 | $ ./vectorize.py -h
43 | usage: vectorize.py [-h] (-f FILENAME | -d DIRECTORY) [--outdir OUTDIR]
44 | [--verbose VERBOSE] [--version]
45 |
46 | Binary image vectorization (v0.1.0)
47 |
48 | optional arguments:
49 | -h, --help show this help message and exit
50 | -f FILENAME, --filename FILENAME
51 | Input PNG tile (default: None)
52 | -d DIRECTORY, --directory DIRECTORY
53 | Input directory (default: None)
54 | --outdir OUTDIR Save intermediate files to this dir (otherwise temp)
55 | (default: )
56 | --verbose VERBOSE 0: Quiet, 1: Debug, 2: Info, 3: Warn, 4: Error, 5:
57 | Critical (default: 2)
58 | --version Print version and exit
59 |
60 | ```
61 |
62 | e.g., run on directory of zoom 14
63 |
64 | $ vectorize.py -d zoom14 --outdir zoom14
65 |
66 | Which will put all the outputs next to the input files in the zoom14 directory.
67 |
68 |
69 | ## Output
70 |
71 | Two output files are saved for each input file, either in the current directory or in *outdir* if provided. The main output is the GeoJSON file of the same name as the input file but with a .geojson extension. This contains all of the vectorized linestrings, in an EPSG:4326 CRS. The other file is an intermediate GeoTIFF file (same basename with .tif extension) will be saved, containing the proper projection information and with 3 bands:
72 |
73 | - Band 1 is the original binary image converted into 0 and 1.
74 | - Band 2 is the skeletonized image, where the value indicates the number of neighboring non-zero values. For instance, a road endpoint will have one neighbor and thus has a value of 2. A road midpoint has 2 neighbors and has a value of 3. Intersections have values of 4 or more. The numbers are used by the vectorization algorithm.
75 | - Band 3 contains the residuals, that is any points that were not vectorized. This should be an empty image except for single isolated pixels that are ignored.
76 |
--------------------------------------------------------------------------------
/post-process/vectorization/requirements.txt:
--------------------------------------------------------------------------------
1 | git+https://github.com/gipit/gippy.git@develop
2 | pyproj==1.9.5.1
3 | pygeotile==1.0.0
4 | numpy==1.12.1
5 | six==1.10.0
6 |
--------------------------------------------------------------------------------
/post-process/vectorization/vectorize.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | import os
3 | import sys
4 | import json
5 | import argparse
6 | import glob
7 | from datetime import datetime
8 | import gippy
9 | import numpy as np
10 | from osgeo import osr
11 | from pyproj import Proj, transform
12 | from pygeotile.tile import Tile
13 | import logging
14 |
15 |
16 | __version__ = '0.1.1'
17 | logger = logging.getLogger(__name__)
18 | logging.basicConfig(stream=sys.stdout)
19 |
20 |
21 | def lines_to_features(lines):
22 | """ Create features from lines """
23 | gid = 0
24 | features = []
25 | for line in lines:
26 | feature = {
27 | 'type': 'Feature',
28 | 'geometry': {
29 | 'type': 'LineString',
30 | 'coordinates': line
31 | },
32 | 'properties': {
33 | 'id': gid
34 | }
35 | }
36 | features.append(feature)
37 | gid += 1
38 | geojson = {
39 | 'type': 'FeatureCollection',
40 | 'features': features,
41 | }
42 | return geojson
43 |
44 |
45 | def trace_line(arr, endpoint):
46 | """ Trace a line starting with an endpoint """
47 | # loop until another endpoint is reached
48 | pt = endpoint
49 | line = [[pt[0]+0.5, pt[1]+0.5]]
50 | i = 0
51 | while True:
52 | # zero out current point
53 | arr[pt[0], pt[1]] = 0
54 | # extract subarray
55 | xmin, xmax = max(0, pt[0]-1), min(arr.shape[0], pt[0]+2)
56 | ymin, ymax = max(0, pt[1]-1), min(arr.shape[1], pt[1]+2)
57 | subarr = arr[xmin:xmax, ymin:ymax]
58 | # locate next point
59 | loc = np.where(subarr > 1)
60 | if len(loc[0]) == 0:
61 | break
62 | pt = [loc[0][0]+xmin, loc[1][0]+ymin]
63 | line.append([pt[0]+0.5, pt[1]+0.5])
64 | # check if endpoint
65 | val = arr[pt[0], pt[1]]
66 | if val != 3:
67 | arr[pt[0], pt[1]] = 0
68 | xmin, xmax = max(0, pt[0]-1), min(arr.shape[0], pt[0]+2)
69 | ymin, ymax = max(0, pt[1]-1), min(arr.shape[1], pt[1]+2)
70 | subarr = arr[xmin:xmax, ymin:ymax]
71 | # decrement any remaining pixels in local region
72 | for x in range(xmin, xmax):
73 | for y in range(ymin, ymax):
74 | arr[x, y] = max(0, arr[x, y]-1)
75 | break
76 | i = i+1
77 | return line
78 |
79 |
80 | def vectorize(img):
81 | """ Vectorize a raster skeleton """
82 | # get convolution of 3x3 with skeleton
83 | kernel = np.ones((3, 3))
84 | skel = img[0].skeletonize().read()
85 | skelconv = img[0].skeletonize().convolve(kernel, boundary=False).read()
86 | skelconv[skel == 0] = 0
87 | img[1].write(skelconv)
88 | # img[2].write(skelconv)
89 |
90 | lines = []
91 | # Create list of 2D points
92 | pts = [list(coord_arr) for coord_arr in np.where(skelconv == 2)]
93 | pts = np.vstack(pts).T
94 |
95 | while len(pts) > 0:
96 | # start with an endpoint and trace the entire line, pt by pt
97 | pt = [pts[0][0], pts[0][1]]
98 | line = trace_line(skelconv, pt)
99 | lines.append(line)
100 |
101 | # Recalculate point list since 1 line has been recorded and removed
102 | pts = [list(coord_arr) for coord_arr in np.where(skelconv == 2)]
103 | pts = np.vstack(pts).T
104 | img[2].write(skelconv)
105 | return lines
106 |
107 |
108 | def open_tile(filename, outdir='./'):
109 | """ Open a tile image and assign projection and geotransform """
110 | img = gippy.GeoImage(filename)
111 | z, x, y = map(int, img.basename().split('-'))
112 | tile = Tile.from_tms(tms_x=x, tms_y=y, zoom=z)
113 | img[0] = (img[0] == 255)
114 | fout = os.path.join(outdir, img.basename() + '.tif')
115 | geoimg = img.save(fout, options={'COMPRESS': 'DEFLATE'})
116 | geoimg.set_srs('EPSG:3857')
117 | minpt = tile.bounds[0].meters
118 | maxpt = tile.bounds[1].meters
119 | affine = np.array(
120 | [
121 | minpt[0], (maxpt[0]-minpt[0])/geoimg.xsize(), 0.0,
122 | abs(minpt[1]), 0.0, -(maxpt[1]-minpt[1])/geoimg.ysize()
123 | ])
124 | geoimg.set_affine(affine)
125 | return geoimg
126 |
127 |
128 | def main(filename, outdir='./'):
129 | start0 = datetime.now()
130 | geoimg = open_tile(filename, outdir=outdir)
131 | logger.debug('Open tile (%s)' % (datetime.now() - start0))
132 |
133 | start = datetime.now()
134 | lines = vectorize(geoimg)
135 | logger.debug('Vectorize tile (%s)' % (datetime.now() - start))
136 |
137 | # geolocate
138 | start = datetime.now()
139 | srs = osr.SpatialReference(geoimg.srs()).ExportToProj4()
140 | projin = Proj(srs)
141 | projout = Proj(init='epsg:4326')
142 | newlines = []
143 | for line in lines:
144 | newline = []
145 | for point in line:
146 | pt = geoimg.geoloc(point[1], point[0])
147 | # convert to lat-lon
148 | pt = transform(projin, projout, pt.x(), pt.y())
149 | newline.append(pt)
150 | newlines.append(newline)
151 | logger.debug('Transform coordinates (%s)' % (datetime.now() - start))
152 |
153 | geojson = lines_to_features(newlines)
154 | fout = os.path.join(outdir, geoimg.basename() + '.geojson')
155 | with open(fout, 'w') as f:
156 | f.write(json.dumps(geojson))
157 | logger.info('Completed vectorization in %s' % (datetime.now() - start0))
158 |
159 |
160 | def parse_args(args):
161 | """ Parse arguments for the NDWI algorithm """
162 | desc = 'Binary image vectorization (v%s)' % __version__
163 | dhf = argparse.ArgumentDefaultsHelpFormatter
164 | parser = argparse.ArgumentParser(description=desc, formatter_class=dhf)
165 |
166 | group = parser.add_mutually_exclusive_group(required=True)
167 | group.add_argument('-f', '--filename', help='Input PNG tile')
168 | group.add_argument('-d', '--directory', help='Input directory')
169 |
170 | parser.add_argument('--outdir', help='Save intermediate files to this dir (otherwise temp)', default='')
171 | h = '0: Quiet, 1: Debug, 2: Info, 3: Warn, 4: Error, 5: Critical'
172 | parser.add_argument('--verbose', help=h, default=2, type=int)
173 |
174 | parser.add_argument('--version', help='Print version and exit', action='version', version=__version__)
175 |
176 | return parser.parse_args(args)
177 |
178 |
179 | def cli():
180 | args = parse_args(sys.argv[1:])
181 | logger.setLevel(args.verbose * 10)
182 | if args.directory is None:
183 | filenames = [args.filename]
184 | else:
185 | filenames = glob.glob(os.path.join(args.directory, '*.png'))
186 | for f in filenames:
187 | main(f, outdir=args.outdir)
188 |
189 |
190 | if __name__ == "__main__":
191 | cli()
192 |
--------------------------------------------------------------------------------
/results-viewer/README.md:
--------------------------------------------------------------------------------
1 | In addition to the usage shown in the main [README](https://github.com/developmentseed/skynet-train#quick-start), this viewer can also be used as a standalone tool. The only requirement is that an `index.json` file be added to the `dist` folder with the following format:
2 |
3 | ```js
4 | {
5 | "images": [
6 | {
7 | "index": 0,
8 | "prediction": "0_prediction.png", // location of prediction image
9 | "metrics": {
10 | "correctness_score": 0.43474827245804543,
11 | "completeness_score": 0.79609929078014185
12 | },
13 | "groundtruth": "0_groundtruth.png", // location of label image ("groundtruth")
14 | "input": "0_input.png" // location of input image (satellite imagery in this case)
15 | },
16 | ...
17 | ]
18 | }
19 | ```
20 |
21 | The `dist` folder can be served with [your preferred command](https://gist.github.com/willurd/5720255) and you can access the results at `http://localhost:8000/view.html?access_token=your_access_token`
22 |
--------------------------------------------------------------------------------
/results-viewer/access-token.js:
--------------------------------------------------------------------------------
1 | module.exports = function getAccessToken (opts) {
2 | var accessToken =
3 | opts.access_token ||
4 | localStorage.getItem('accessToken')
5 | if (accessToken) {
6 | localStorage.setItem('accessToken', accessToken)
7 | }
8 | return accessToken
9 | }
10 |
--------------------------------------------------------------------------------
/results-viewer/dist/view.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
--------------------------------------------------------------------------------
/results-viewer/get-tile-url.js:
--------------------------------------------------------------------------------
1 | const getAccessToken = require('./access-token')
2 |
3 | module.exports = function (opts, z, x, y) {
4 | let url
5 | if (opts.imagery_source && /^[^\/]+:\/\//.test(opts.imagery_source)) {
6 | url = opts.imagery_source
7 | } else {
8 | url = '//b.tiles.mapbox.com/v4/{mapid}/{z}/{x}/{y}.png?access_token={token}'
9 | .replace('{mapid}', opts.imagery_source || 'mapbox.satellite')
10 | .replace('{token}', getAccessToken(opts))
11 | }
12 |
13 | return url
14 | .replace('{z}', z)
15 | .replace('{x}', x)
16 | .replace('{y}', y)
17 | }
18 |
--------------------------------------------------------------------------------
/results-viewer/map.js:
--------------------------------------------------------------------------------
1 | const mapboxgl = require('mapbox-gl')
2 | const labelLayers = require('mapbox-gl-styles/styles/basic-v9.json').layers
3 | .filter((l) => /poi|place|country/.test(l.id))
4 | const getAccessToken = require('./access-token')
5 |
6 | module.exports = function (opts) {
7 | let style
8 | if (opts.imagery_source) {
9 | let source
10 | if (!/^[^\/]+:\/\//.test(opts.imagery_source)) {
11 | source = { url: 'mapbox://' + opts.imagery_source }
12 | } else {
13 | source = { tiles: [ opts.imagery_source ] }
14 | }
15 | Object.assign(source, { type: 'raster', tileSize: 256 })
16 | style = {
17 | version: 8,
18 | sources: {
19 | mapbox: { url: 'mapbox://mapbox.mapbox-streets-v7', type: 'vector' },
20 | imagery: source
21 | },
22 | sprite: 'mapbox://sprites/mapbox/basic-v9',
23 | glyphs: 'mapbox://fonts/mapbox/{fontstack}/{range}.pbf',
24 | layers: [
25 | {id: 'background', type: 'background', paint: { 'background-color': '#121212' }},
26 | {id: 'imagery', type: 'raster', source: 'imagery'}
27 | ].concat(labelLayers)
28 | }
29 | } else {
30 | style = 'mapbox://styles/mapbox/satellite-streets-v9'
31 | }
32 |
33 | mapboxgl.accessToken = getAccessToken(opts)
34 | const map = window._map = new mapboxgl.Map({
35 | container: opts.container || 'map',
36 | style: style,
37 | center: [0, 0],
38 | zoom: 1,
39 | hash: true
40 | })
41 |
42 | if (opts.showTileBoundaries) {
43 | map.showTileBoundaries = true
44 | }
45 |
46 | return map
47 | }
48 |
49 |
--------------------------------------------------------------------------------
/results-viewer/view.js:
--------------------------------------------------------------------------------
1 | const qs = require('querystring')
2 | const http = require('choo/http')
3 | const choo = require('choo')
4 | const tilebelt = require('tilebelt')
5 | const createMap = require('./map')
6 | const getSatelliteTileURL = require('./get-tile-url')
7 |
8 | const query = qs.parse(window.location.search.substring(1))
9 | const accessToken = require('./access-token')(query)
10 | let baseurls = query.baseurl || ''
11 | if (!Array.isArray(baseurls)) { baseurls = [baseurls] }
12 | // add trailing slash
13 | baseurls = baseurls.map(b => (b === '' || b.endsWith('/')) ? b : `${b}/`)
14 |
15 | let map = accessToken && !query.hasOwnProperty('no-map') && createMap(query)
16 | .on('load', function () {
17 | map.addSource('tile', { type: 'geojson', data: { type: 'FeatureCollection', features: [] } })
18 | map.addLayer({
19 | 'id': 'tile',
20 | 'source': 'tile',
21 | 'type': 'line',
22 | 'paint': {
23 | 'line-color': 'red',
24 | 'line-width': 4
25 | }
26 | })
27 | })
28 |
29 | const app = choo()
30 |
31 | app.model({
32 | namespace: 'app',
33 | state: { results: [], limit: 50, sort: 'index' },
34 | subscriptions: [
35 | function (send) { send('http:get_json') } // grab json data at startup
36 | ],
37 | reducers: {
38 | 'setTestOutput': (action, state) => {
39 | return Object.assign({}, state, {
40 | results: state.results.concat({
41 | baseurl: action.payload.baseurl,
42 | items: action.payload.images,
43 | metrics: {
44 | correctness: action.payload.correctness,
45 | completeness: action.payload.completeness
46 | }
47 | })
48 | })
49 | },
50 | 'loadMore': logged((action, state) => ({ limit: state.limit + 50 }), 'loadMore'),
51 | 'sort': (action, state) => ({ sort: action.key })
52 | },
53 | effects: {
54 | 'error': (state, event) => console.error(`error: ${event.payload}`),
55 | 'print': (state, event) => console.log(`http: ${event.payload}`)
56 | }
57 | })
58 |
59 | app.model({
60 | namespace: 'http',
61 | effects: {
62 | 'get_json': getJson
63 | }
64 | })
65 |
66 | const view = (params, state, send) => {
67 | const results = [].concat(state.app.results)
68 | .sort((r, s) => baseurls.indexOf(r.baseurl) - baseurls.indexOf(s.baseurl))
69 |
70 | const sort = state.app.sort.split(':')
71 | let items
72 | if (!results.length) {
73 | items = []
74 | } else if (sort[0] === 'index') {
75 | items = [].concat(results[0].items)
76 | } else {
77 | items = results[0].items
78 | .filter(function (item) {
79 | return item.metrics[sort[0]] >= 0
80 | })
81 | .sort(function (a, b) {
82 | var diff = a.metrics[sort[0]] - b.metrics[sort[0]]
83 | return sort[1] === 'ascending' ? diff : -diff
84 | })
85 | }
86 |
87 | const resultNames = stripCommon(results.map(r => r.baseurl))
88 |
89 | const colCount = 2 + baseurls.length
90 | const colMargin = 2
91 | const colStyle = [
92 | `width: calc((100% - ${colMargin * colCount}px) / ${colCount})`,
93 | `margin-left: ${colMargin}px`
94 | ].join('; ')
95 |
96 | let missingAccessToken = ''
97 | if (!query.hasOwnProperty('no-map') && !accessToken) {
98 | const url = window.location.href.replace(/#.*$/, '')
99 | missingAccessToken = choo.view`
100 |
101 | To view results on the map, add a Mapbox access token to the URL like so:
102 | ${url}${/\?/.test(url) ? '&' : '?'}access_token=ACCESSTOKEN
103 |
`
104 | }
105 |
106 | return choo.view`
107 |
108 |
send('app:sort', { key: 'correctness_score:descending' })}>Most Correct
109 |
send('app:sort', { key: 'correctness_score:ascending' })}>Least Correct
110 |
send('app:sort', { key: 'completeness_score:descending' })}>Most Complete
111 |
send('app:sort', { key: 'completeness_score:ascending' })}>Least Complete
112 |
send('app:sort', { key: 'index' })}>Reset
113 | ${missingAccessToken}
114 |
115 |
129 | ${items
130 | .slice(0, state.app.limit)
131 | .map(item => {
132 | var tile = getTile(item)
133 | var image = tile ? getSatelliteTileURL(query, tile[0], tile[1], tile[2])
134 | : (baseurls[0] + item.input)
135 | return choo.view`
136 |
137 |
140 |
141 |
142 |
143 | ${results.map(result => {
144 | const it = result.items.find(i => tile.join('') === getTile(i).join(''))
145 | return choo.view`
146 |
147 |
148 |
149 | Complete: ${it.metrics.completeness_score.toFixed(3)}
150 | Correct: ${it.metrics.correctness_score.toFixed(3)}
151 |
152 |
`
153 | })}
154 |
155 | `
156 | })
157 | }
158 |
159 | ${state.app.limit < items.length
160 | ? choo.view`
send('app:loadMore')}>Load More `
161 | : ''}
162 |
163 | `
164 | }
165 |
166 | app.router((route) => [
167 | route('/', logged(view, 'view'))
168 | ])
169 |
170 | document.querySelector('#app').appendChild(app.start())
171 |
172 | function getTile (item) {
173 | var match = /(\d*)-(\d*)-(\d*).png/.exec(item.test_data)
174 | return match && match.slice(1, 4)
175 | }
176 |
177 | function onClick (event) {
178 | if (!map) { return }
179 |
180 | let node = event.currentTarget
181 | while (node && !(node.dataset && node.dataset.tile)) {
182 | if (node.parentNode === node) { break }
183 | node = node.parentNode
184 | }
185 |
186 | var tile = node.dataset.tile.split(',').map(Number)
187 | tile = [tile[1], tile[2], tile[0]]
188 | var [w, s, e, n] = tilebelt.tileToBBOX(tile)
189 | var z = +tile[2]
190 |
191 | var coordinates = [
192 | [w, n],
193 | [e, n],
194 | [e, s],
195 | [w, s]
196 | ]
197 |
198 | map.jumpTo({center: [ (w + e) / 2, (s + n) / 2 ], zoom: z - 1, speed: 2})
199 | map.getSource('tile').setData({
200 | type: 'Feature',
201 | properties: {},
202 | geometry: {
203 | type: 'Polygon',
204 | coordinates: [ coordinates.concat([[w, n]]) ]
205 | }
206 | })
207 |
208 | showOverlay(event.currentTarget.src, coordinates)
209 | }
210 |
211 | function showOverlay (url, coords) {
212 | console.log('show overlay', url, coords)
213 | if (map.getLayer('class-overlay')) {
214 | map.removeLayer('class-overlay')
215 | }
216 | if (map.getSource('class-overlay')) {
217 | map.removeSource('class-overlay')
218 | }
219 |
220 | map.addSource('class-overlay', {
221 | type: 'image',
222 | url: url,
223 | coordinates: coords
224 | })
225 |
226 | map.addLayer({
227 | 'id': 'class-overlay',
228 | 'source': 'class-overlay',
229 | 'type': 'raster',
230 | 'paint': { 'raster-opacity': 0.5 }
231 | })
232 | }
233 |
234 | function getJson (state, action, send) {
235 | baseurls.forEach((baseurl, i) => {
236 | http.get(baseurl + 'index.json', { json: true }, function (err, res, body) {
237 | if (err) return send('app:error', { payload: err.message })
238 | if (res.statusCode !== 200 || !body) {
239 | return send('app:error', { payload: 'something went wrong' })
240 | }
241 | if (typeof body === 'string') {
242 | body = JSON.parse(body.replace(/NaN/g, '-1'))
243 | }
244 | body.baseurl = baseurl
245 | send('app:setTestOutput', { payload: body })
246 | })
247 | })
248 | }
249 |
250 | function logged (view, tag) {
251 | return function () {
252 | console.log(tag || '', arguments)
253 | return view.apply(this, Array.prototype.slice.call(arguments))
254 | }
255 | }
256 |
257 | /**
258 | * Given an array of strings, return a new array wherein the common prefix
259 | * and common postfix for the given strings is removed.
260 | *
261 | * Example: stripCommon(['abcHello Worldxyz', 'abc123xyz', 'abcxyz']) ===
262 | * ['Hello World', '123', '']
263 | *
264 | * One small exception: do only treat numerical digits (and "K") as 'common'
265 | * if the whole string of them is common -- otherwise
266 | * ['abc_123_5000', 'abc_123_55000'] would become ['', '5'], whereas we really
267 | * want ['5000', '55000']
268 | */
269 | function stripCommon (strings) {
270 | if (strings.length <= 1) return strings
271 | let digits = []
272 | let pre = 0
273 | while (pre < strings[0].length) {
274 | let chars = strings.map(s => s.charAt(pre))
275 | if (chars.some(c => !c)) { break }
276 | if (chars.some(c => c !== chars[0])) {
277 | break
278 | }
279 | if (/[\dK]/.test(chars[0])) {
280 | digits.push(chars[0])
281 | } else {
282 | digits = []
283 | }
284 | pre++
285 | }
286 | console.log(digits)
287 | strings = strings.map(s => digits.join('') + s.slice(Math.max(pre, 0)))
288 |
289 | let post = 0
290 | digits = []
291 | while (post < strings[0].length) {
292 | let chars = strings.map(s => s.charAt(s.length - post - 1))
293 | if (chars.some(c => !c.length)) { break }
294 | if (chars.some(c => c !== chars[0])) {
295 | break
296 | }
297 | if (/[\dK]/.test(chars[0])) {
298 | digits.unshift(chars[0])
299 | } else {
300 | digits = []
301 | }
302 | post++
303 | }
304 | return strings.map(s => s.slice(0, s.length - post) + digits.join(''))
305 | }
306 |
307 |
--------------------------------------------------------------------------------
/segnet/.gitignore:
--------------------------------------------------------------------------------
1 | env
2 |
--------------------------------------------------------------------------------
/segnet/batch_inference.py:
--------------------------------------------------------------------------------
1 | # https://github.com/BVLC/caffe/issues/861
2 | import matplotlib
3 | matplotlib.use('Agg')
4 |
5 | import re
6 | import os
7 | import os.path
8 | import sys
9 | import time
10 | import random
11 | import requests
12 | import numpy as np
13 | from PIL import Image
14 | import click
15 | import json
16 | import StringIO
17 | import subprocess
18 | import tempfile
19 | from boto3.session import Session
20 | caffe_root = os.getenv('CAFFE_ROOT', '/home/ubuntu/caffe-segnet/')
21 | sys.path.insert(0, caffe_root + 'python')
22 | import caffe
23 |
24 | from inference import predict
25 | from queue import receive
26 | from vectorize import vectorize
27 |
28 | aws_session = Session()
29 | s3 = aws_session.client('s3')
30 | dirname = os.path.dirname(os.path.realpath(__file__))
31 | polys_to_lines = os.path.join(dirname, '../vectorize.js')
32 |
33 |
34 | class TileNotFoundError(Exception):
35 | def __init__(self, msg):
36 | Exception.__init__(self, msg)
37 |
38 |
39 | def parse_s3_uri(s3uri):
40 | match = re.search('s3://([^/]*)/(.*)$', s3uri)
41 | if not match:
42 | return None
43 | return (match.group(1), match.group(2))
44 |
45 |
46 | def resolve_s3(s3uri, temp=False):
47 | parsed = parse_s3_uri(s3uri)
48 | if not parsed:
49 | return s3uri
50 | (bucket, key) = parsed
51 | if temp:
52 | target = '/tmp/' + os.path.basename(key)
53 | else:
54 | target = '/model/' + os.path.basename(key)
55 | if not os.path.isfile(target):
56 | print('downloading ' + s3uri + ' to ' + target)
57 | s3.download_file(bucket, key, target)
58 | else:
59 | print(s3uri + ' appears to have already been downloaded to ' + target +
60 | '; using local copy.')
61 | return target
62 |
63 |
64 | def setup_net(model, weights, gpu, cpu_only):
65 | model_file = resolve_s3(model)
66 | weights_file = resolve_s3(weights)
67 | if not os.path.isfile(weights_file) and os.path.isdir('/model'):
68 | caffemodels = filter(lambda x: x.endswith('.caffemodel'), os.listdir('/model'))
69 | if len(caffemodels) == 0:
70 | raise Exception('No .caffemodel files found in /model.')
71 | weights_file = '/model/%s' % caffemodels[0]
72 |
73 | # read model definition
74 | model = open(model_file, 'r').read()
75 | # create net
76 | if cpu_only:
77 | caffe.set_mode_cpu()
78 | else:
79 | caffe.set_mode_gpu()
80 | caffe.set_device(gpu)
81 |
82 | return caffe.Net(model_file.encode('utf8'),
83 | weights_file.encode('utf8'),
84 | caffe.TEST)
85 |
86 |
87 | def make_prediction(net, colors, im, outfile):
88 | bands = len(im.getbands())
89 | imdata = np.array(im.getdata()).reshape(im.size[0], im.size[1], bands)
90 | predicted = predict(net, colors, imdata)
91 | predicted.save(outfile, 'PNG')
92 |
93 |
94 | def get_image_tile(url, x, y, z):
95 | image_url = url.replace('{x}', str(x)).replace('{y}', str(y)).replace('{z}', str(z))
96 |
97 | # First check if image is on S3
98 | if 's3://' in image_url:
99 | img_to_load = resolve_s3(image_url, temp=True)
100 |
101 | # Otherwise, pull it from other source
102 | else:
103 | resp = requests.get(image_url)
104 | if not resp.ok:
105 | raise TileNotFoundError({'status': resp.status_code, 'content': resp.content})
106 | img_to_load = StringIO.StringIO(resp.content)
107 |
108 | return Image.open(img_to_load).convert('RGB')
109 |
110 |
111 | def upload_centerlines(filename, output_bucket, prefix):
112 | uid = ''.join(random.choice('abcdef0123456789') for _ in range(6))
113 | key = '%s/centerlines.%s-%s.geojson' % (prefix, time.time(), uid)
114 | click.echo('Uploading geojson %s' % key)
115 | s3.upload_file(filename, output_bucket, key, ExtraArgs={
116 | 'ContentType': 'application/ndjson'
117 | })
118 |
119 |
120 | @click.command()
121 | @click.argument('queue_name')
122 | @click.option('--model', type=str, default='/model/segnet_deploy.prototxt')
123 | @click.option('--weights', type=str, default='/model/weights.caffemodel')
124 | @click.option('--classes', type=str, default='/model/classes.json')
125 | @click.option('--gpu', type=int, default=0)
126 | @click.option('--cpu-only', is_flag=True, default=False)
127 | def run_batch(queue_name, model, weights, classes, gpu, cpu_only):
128 | net = setup_net(model, weights, gpu, cpu_only)
129 | classes_file = resolve_s3(classes)
130 |
131 | # read classes metadata
132 | with open(classes_file) as classes:
133 | colors = map(lambda x: x['color'][1:], json.load(classes))
134 | colors.append('000000')
135 | colors = map(lambda rgbstr: tuple(map(ord, rgbstr.decode('hex'))), colors)
136 |
137 | count = 0
138 | centerlines = tempfile.NamedTemporaryFile(suffix='.geojson', delete=False)
139 | click.echo('geojson output: %s' % centerlines.name)
140 |
141 | for message in receive(queue_name):
142 | try:
143 | click.echo('processing: %s' % message.body)
144 | (output_bucket, prefix, image_tiles, z, x, y) = json.loads(message.body)
145 |
146 | image = get_image_tile(image_tiles, x, y, z)
147 |
148 | # run prediction
149 | predicted = tempfile.NamedTemporaryFile(suffix='.png', delete=False)
150 | make_prediction(net, colors, image, predicted)
151 | predicted.close()
152 |
153 | # upload raster prediction image
154 | key = '%s/%s/%s/%s.png' % (prefix, z, x, y)
155 | s3.upload_file(predicted.name, output_bucket, key, ExtraArgs={
156 | 'ContentType': 'image/png'
157 | })
158 |
159 | # trace raster -> polygons
160 | polygonized = tempfile.NamedTemporaryFile(suffix='.geojson', delete=False)
161 | polygonized.write(json.dumps(vectorize(predicted.name)))
162 | polygonized.close()
163 |
164 | # upload polygon geojson for this tile
165 | key = '%s/%s/%s/%s.polygons.geojson' % (prefix, z, x, y)
166 | s3.upload_file(polygonized.name, output_bucket, key, ExtraArgs={
167 | 'ContentType': 'application/json'
168 | })
169 |
170 | # polygons => centerlines
171 | polyspine_args = map(str, [polys_to_lines, polygonized.name, x, y, z, 0.2])
172 | exitcode = subprocess.call(polyspine_args, stdout=centerlines)
173 |
174 | # clean up tempfiles
175 | os.remove(predicted.name)
176 | os.remove(polygonized.name)
177 |
178 | if exitcode != 0:
179 | raise Exception('Vectorize exited nonzero')
180 |
181 | # upload centerlines geojson to S3 every so often
182 | count += 1
183 | if count % 5000 == 0:
184 | centerlines.close()
185 | upload_centerlines(centerlines.name, output_bucket, prefix)
186 | # clear the file out and continue writing
187 | centerlines = open(centerlines.name, 'w+b')
188 |
189 | # remove message from the queue
190 | message.delete()
191 | except TileNotFoundError:
192 | click.echo('Imagery tile not found.')
193 | message.delete()
194 | except Exception as err:
195 | click.echo(err)
196 | try:
197 | message.delete()
198 | except Exception:
199 | pass
200 |
201 | centerlines.close()
202 | upload_centerlines(centerlines.name, output_bucket, prefix)
203 |
204 |
205 | if __name__ == '__main__':
206 | run_batch(auto_envvar_prefix='SKYNET')
207 |
--------------------------------------------------------------------------------
/segnet/compute_bn_statistics.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | # Modified from https://github.com/alexgkendall/SegNet-Tutorial
4 |
5 | # > The Batch Normalisation layers in SegNet shift the input feature maps
6 | # > according to their mean and variance statistics for each mini batch during
7 | # > training. At test time we must use the statistics for the entire dataset.
8 | # (http://mi.eng.cam.ac.uk/projects/segnet/tutorial.html)
9 |
10 | import matplotlib
11 | matplotlib.use('Agg') # https://github.com/BVLC/caffe/issues/861
12 |
13 | import os
14 | import numpy as np
15 | from skimage.io import ImageCollection
16 | from argparse import ArgumentParser
17 |
18 |
19 | caffe_root = os.getenv('CAFFE_ROOT', '/home/ubuntu/caffe-segnet/')
20 | import sys
21 | sys.path.insert(0, os.path.join(caffe_root, 'python'))
22 |
23 | import caffe
24 | from caffe.proto import caffe_pb2
25 | from google.protobuf import text_format
26 |
27 |
28 | def extract_dataset(net_message):
29 | assert net_message.layer[0].type == "DenseImageData"
30 | source = net_message.layer[0].dense_image_data_param.source
31 | with open(source) as f:
32 | data = f.read().split()
33 | ims = ImageCollection(data[::2])
34 | labs = ImageCollection(data[1::2])
35 | assert len(ims) == len(labs) > 0
36 | return ims, labs
37 |
38 |
39 | def make_testable(train_model_path):
40 | # load the train net prototxt as a protobuf message
41 | with open(train_model_path) as f:
42 | train_str = f.read()
43 | train_net = caffe_pb2.NetParameter()
44 | text_format.Merge(train_str, train_net)
45 |
46 | # add the mean, var top blobs to all BN layers
47 | for layer in train_net.layer:
48 | if layer.type == "BN" and len(layer.top) == 1:
49 | layer.top.append(layer.top[0] + "-mean")
50 | layer.top.append(layer.top[0] + "-var")
51 |
52 | # remove the test data layer if present
53 | if train_net.layer[1].name == "data" and train_net.layer[1].include:
54 | train_net.layer.remove(train_net.layer[1])
55 | if train_net.layer[0].include:
56 | # remove the 'include {phase: TRAIN}' layer param
57 | train_net.layer[0].include.remove(train_net.layer[0].include[0])
58 | return train_net
59 |
60 |
61 | def make_test_files(testable_net_path, train_weights_path, num_iterations,
62 | in_h, in_w):
63 | # load the train net prototxt as a protobuf message
64 | with open(testable_net_path) as f:
65 | testable_str = f.read()
66 | testable_msg = caffe_pb2.NetParameter()
67 | text_format.Merge(testable_str, testable_msg)
68 |
69 | bn_layers = [l.name for l in testable_msg.layer if l.type == "BN"]
70 | bn_blobs = [l.top[0] for l in testable_msg.layer if l.type == "BN"]
71 | bn_means = [l.top[1] for l in testable_msg.layer if l.type == "BN"]
72 | bn_vars = [l.top[2] for l in testable_msg.layer if l.type == "BN"]
73 |
74 | net = caffe.Net(testable_net_path, train_weights_path, caffe.TEST)
75 |
76 | # init our blob stores with the first forward pass
77 | res = net.forward()
78 | bn_avg_mean = {bn_mean: np.squeeze(res[bn_mean]).copy() for bn_mean in bn_means}
79 | bn_avg_var = {bn_var: np.squeeze(res[bn_var]).copy() for bn_var in bn_vars}
80 |
81 | # iterate over the rest of the training set
82 | for i in xrange(1, num_iterations):
83 | res = net.forward()
84 | for bn_mean in bn_means:
85 | bn_avg_mean[bn_mean] += np.squeeze(res[bn_mean])
86 | for bn_var in bn_vars:
87 | bn_avg_var[bn_var] += np.squeeze(res[bn_var])
88 | print 'progress: {}/{}'.format(i, num_iterations)
89 |
90 | # compute average means and vars
91 | for bn_mean in bn_means:
92 | bn_avg_mean[bn_mean] /= num_iterations
93 | for bn_var in bn_vars:
94 | bn_avg_var[bn_var] /= num_iterations
95 |
96 | for bn_blob, bn_var in zip(bn_blobs, bn_vars):
97 | m = np.prod(net.blobs[bn_blob].data.shape) / np.prod(bn_avg_var[bn_var].shape)
98 | bn_avg_var[bn_var] *= (m / (m - 1))
99 |
100 | # calculate the new scale and shift blobs for all the BN layers
101 | scale_data = {bn_layer: np.squeeze(net.params[bn_layer][0].data)
102 | for bn_layer in bn_layers}
103 | shift_data = {bn_layer: np.squeeze(net.params[bn_layer][1].data)
104 | for bn_layer in bn_layers}
105 |
106 | var_eps = 1e-9
107 | new_scale_data = {}
108 | new_shift_data = {}
109 | for bn_layer, bn_mean, bn_var in zip(bn_layers, bn_means, bn_vars):
110 | gamma = scale_data[bn_layer]
111 | beta = shift_data[bn_layer]
112 | Ex = bn_avg_mean[bn_mean]
113 | Varx = bn_avg_var[bn_var]
114 | new_gamma = gamma / np.sqrt(Varx + var_eps)
115 | new_beta = beta - (gamma * Ex / np.sqrt(Varx + var_eps))
116 |
117 | new_scale_data[bn_layer] = new_gamma
118 | new_shift_data[bn_layer] = new_beta
119 | print "New data:"
120 | print new_scale_data.keys()
121 | print new_shift_data.keys()
122 |
123 | # assign computed new scale and shift values to net.params
124 | for bn_layer in bn_layers:
125 | net.params[bn_layer][0].data[...] = new_scale_data[bn_layer].reshape(
126 | net.params[bn_layer][0].data.shape
127 | )
128 | net.params[bn_layer][1].data[...] = new_shift_data[bn_layer].reshape(
129 | net.params[bn_layer][1].data.shape
130 | )
131 |
132 | # build a test net prototxt
133 | test_msg = testable_msg
134 | # replace data layers with 'input' net param
135 | data_layers = [l for l in test_msg.layer if l.type.endswith("Data")]
136 | for data_layer in data_layers:
137 | test_msg.layer.remove(data_layer)
138 | test_msg.input.append("data")
139 | test_msg.input_dim.append(1)
140 | test_msg.input_dim.append(3)
141 | test_msg.input_dim.append(in_h)
142 | test_msg.input_dim.append(in_w)
143 | # Set BN layers to INFERENCE so they use the new stat blobs
144 | # and remove mean, var top blobs.
145 | for l in test_msg.layer:
146 | if l.type == "BN":
147 | if len(l.top) > 1:
148 | dead_tops = l.top[1:]
149 | for dl in dead_tops:
150 | l.top.remove(dl)
151 | l.bn_param.bn_mode = caffe_pb2.BNParameter.INFERENCE
152 | # replace output loss, accuracy layers with a softmax
153 | dead_outputs = [l for l in test_msg.layer if l.type in ["SoftmaxWithLoss", "Accuracy"]]
154 | out_bottom = dead_outputs[0].bottom[0]
155 | for dead in dead_outputs:
156 | test_msg.layer.remove(dead)
157 | test_msg.layer.add(
158 | name="prob", type="Softmax", bottom=[out_bottom], top=['prob']
159 | )
160 | return net, test_msg
161 |
162 |
163 | def make_parser():
164 | p = ArgumentParser()
165 | p.add_argument('train_model')
166 | p.add_argument('weights')
167 | p.add_argument('out_dir')
168 | p.add_argument('--gpu', type=int, default=None)
169 | return p
170 |
171 |
172 | if __name__ == '__main__':
173 | caffe.set_mode_gpu()
174 | p = make_parser()
175 | args = p.parse_args()
176 |
177 | if args.gpu is not None:
178 | caffe.set_device(args.gpu)
179 |
180 | # build and save testable net
181 | if not os.path.exists(args.out_dir):
182 | os.makedirs(args.out_dir)
183 | print "Building BN calc net..."
184 | testable_msg = make_testable(args.train_model)
185 | BN_calc_path = os.path.join(
186 | args.out_dir, '__for_calculating_BN_stats_' + os.path.basename(args.train_model)
187 | )
188 | with open(BN_calc_path, 'w') as f:
189 | f.write(text_format.MessageToString(testable_msg))
190 |
191 | # use testable net to calculate BN layer stats
192 | print "Calculate BN stats..."
193 | train_ims, train_labs = extract_dataset(testable_msg)
194 | train_size = len(train_ims)
195 | minibatch_size = testable_msg.layer[0].dense_image_data_param.batch_size
196 | num_iterations = train_size // minibatch_size + train_size % minibatch_size
197 | in_h, in_w =(360, 480)
198 | test_net, test_msg = make_test_files(BN_calc_path, args.weights, num_iterations,
199 | in_h, in_w)
200 |
201 | # save deploy prototxt
202 | #print "Saving deployment prototext file..."
203 | #test_path = os.path.join(args.out_dir, "deploy.prototxt")
204 | #with open(test_path, 'w') as f:
205 | # f.write(text_format.MessageToString(test_msg))
206 |
207 | print "Saving test net weights..."
208 | test_net.save(os.path.join(args.out_dir, "test_weights.caffemodel"))
209 | print "done"
210 |
211 |
--------------------------------------------------------------------------------
/segnet/demo.js:
--------------------------------------------------------------------------------
1 | const qs = require('querystring')
2 | const http = require('choo/http')
3 | const tilebelt = require('tilebelt')
4 | const createMap = require('../results-viewer/map')
5 | const getSatelliteTileURL = require('../results-viewer/get-tile-url')
6 |
7 | const query = qs.parse(window.location.search.substring(1))
8 |
9 | const predictions = window.location.href.replace(/static.*$/, 'index.json')
10 |
11 | let map = createMap(query).on('load', function () {
12 | map.addSource('prediction', {
13 | type: 'raster',
14 | tileSize: 256,
15 | url: predictions
16 | })
17 | map.addLayer({
18 | id: 'prediction',
19 | type: 'raster',
20 | source: 'prediction',
21 | paint: {
22 | 'raster-opacity': 0.5
23 | }
24 | })
25 | })
26 |
27 |
--------------------------------------------------------------------------------
/segnet/extract-log-data.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | # Dump data parsed from caffe logs to stdout
4 |
5 | set -eu
6 |
7 | SKYNET_TRAIN=${SKYNET_TRAIN:-"."}
8 |
9 | MODEL=$1
10 | NAME=${2:-$(basename $MODEL)}
11 | TO_CSV="sed -re s/[[:blank:]]+/,/g"
12 | PREPEND_NAME="sed s/^/$NAME,/"
13 |
14 |
15 | FIRST=true
16 | for log in $MODEL/train_*.log; do
17 | $SKYNET_TRAIN/util/parse_log.sh $log
18 | data="$(basename $log).train"
19 |
20 | # header
21 | if $FIRST; then head -n 1 $data | $TO_CSV | sed s/^/Model,/; fi
22 | # data
23 | tail -n +2 $data | $TO_CSV | $PREPEND_NAME
24 |
25 | rm $data
26 | rm "$(basename $log).test"
27 | FIRST=false
28 | done
29 |
30 |
--------------------------------------------------------------------------------
/segnet/get-model-metadata:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | # Download from S3 all of the non-huge data from model training:
4 | # - caffe prototxt files
5 | # - training logs
6 | # - test output (metrics only, not the images)
7 |
8 | # Usage: get-model-metadata bucketname/prefix path/to/local/dir [model_name]
9 |
10 | set -eu
11 |
12 | INPUT=$1
13 | OUTPUT=$2
14 | # optionally accept a specific model as third arg
15 | if [[ $# -gt 2 ]]; then
16 | INPUT="$INPUT/$3"
17 | OUTPUT="$OUTPUT/$3"
18 | fi
19 |
20 | aws s3 cp --recursive s3://$INPUT/ $OUTPUT \
21 | --exclude '*.caffemodel' \
22 | --exclude '*.solverstate' \
23 | --exclude '*.png' --include 'train-loss-vs-iters.png' \
24 | --exclude '*.inference/*'
25 |
26 |
--------------------------------------------------------------------------------
/segnet/inference.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import scipy
3 | import time
4 |
5 |
6 | def predict(net, label_colors, threshold, image=None):
7 | num_classes = len(label_colors)
8 |
9 | if image is not None:
10 | image = image.transpose((2, 0, 1))
11 | net.blobs['data'].data[0] = image
12 |
13 | start = time.time()
14 | net.forward()
15 | print('Prediction time', time.time() - start)
16 |
17 | predicted = net.blobs['prob'].data
18 | output = np.squeeze(predicted[0, :, :, :])
19 |
20 | # only use the max-probability non-background class if its probability is
21 | # above some threshold
22 | ind = np.argmax(output, axis=0)
23 | fg = output[:-1, :, :] # foreground classes only
24 | bg = np.full(ind.shape, num_classes - 1)
25 | ind = np.where(np.max(fg, axis=0) > threshold, ind, bg)
26 |
27 | max_prob = np.max(output, axis=0)
28 | return labels_to_image(ind, label_colors, max_prob)
29 |
30 |
31 | def labels_to_image(labels, label_colors, alpha=None):
32 | num_classes = len(label_colors)
33 | # construct output image
34 | r = labels.copy()
35 | g = labels.copy()
36 | b = labels.copy()
37 | a = np.zeros(labels.shape)
38 | label_colors = np.array(label_colors)
39 | for l in range(0, num_classes):
40 | r[labels == l] = label_colors[l, 0]
41 | g[labels == l] = label_colors[l, 1]
42 | b[labels == l] = label_colors[l, 2]
43 |
44 | if (alpha is not None):
45 | a[labels != num_classes - 1] = alpha[labels != num_classes - 1] * 255
46 | else:
47 | a[:] = 255
48 |
49 | rgb = np.zeros((labels.shape[0], labels.shape[1], 4))
50 | rgb[:, :, 0] = r
51 | rgb[:, :, 1] = g
52 | rgb[:, :, 2] = b
53 | rgb[:, :, 3] = a
54 |
55 | return scipy.misc.toimage(rgb, cmin=0.0, cmax=255, mode='RGBA')
56 |
--------------------------------------------------------------------------------
/segnet/local_inference.py:
--------------------------------------------------------------------------------
1 | # https://github.com/BVLC/caffe/issues/861
2 | import matplotlib
3 | matplotlib.use('Agg')
4 |
5 | import json
6 | import os
7 | from os import path as op
8 | import sys
9 | import subprocess
10 |
11 | caffe_root = os.getenv('CAFFE_ROOT', '/home/ubuntu/caffe-segnet/')
12 | sys.path.insert(0, caffe_root + 'python')
13 | import caffe
14 | import click
15 | import numpy as np
16 | import rasterio
17 | from mercantile import bounds
18 | from pyproj import Proj, transform
19 | from PIL import Image
20 |
21 | from inference import predict
22 | from vectorize import vectorize
23 |
24 | dirname = op.dirname(op.realpath(__file__))
25 | polys_to_lines = op.join(dirname, '../vectorize.js')
26 |
27 | class TileNotFoundError(Exception):
28 | def __init__(self, msg):
29 | Exception.__init__(self, msg)
30 |
31 |
32 | def setup_net(model_file, weights_file, gpu, cpu_only):
33 | if not op.isfile(weights_file) and op.isdir('/model'):
34 | caffemodels = filter(lambda x: x.endswith('.caffemodel'), os.listdir('/model'))
35 | if len(caffemodels) == 0:
36 | raise Exception('No .caffemodel files found in /model.')
37 | weights_file = '/model/%s' % caffemodels[0]
38 |
39 | # read model definition
40 | model = open(model_file, 'r').read()
41 | # create net
42 | if cpu_only:
43 | caffe.set_mode_cpu()
44 | else:
45 | caffe.set_mode_gpu()
46 | caffe.set_device(gpu)
47 |
48 | return caffe.Net(model_file.encode('utf8'),
49 | weights_file.encode('utf8'),
50 | caffe.TEST)
51 |
52 |
53 | def make_prediction(net, colors, im, threshold, outfile):
54 | bands = len(im.getbands())
55 | imdata = np.array(im.getdata()).reshape(im.size[0], im.size[1], bands)
56 | predicted = predict(net, colors, threshold, imdata)
57 | predicted.save(outfile, 'PNG')
58 |
59 |
60 | def get_image_tile(raster, x, y, z):
61 | try:
62 | bound = bounds(x, y, z)
63 |
64 | with rasterio.open(raster) as src:
65 | x_res, y_res = src.transform[0], src.transform[4]
66 | p1 = Proj({'init': 'epsg:4326'})
67 | p2 = Proj(**src.crs)
68 |
69 | # project tile boundaries from lat/lng to source CRS
70 | tile_ul_proj = transform(p1, p2, bound.west, bound.north)
71 | tile_lr_proj = transform(p1, p2, bound.east, bound.south)
72 | # get origin point from the TIF
73 | tif_ul_proj = (src.bounds.left, src.bounds.top)
74 |
75 | # use the above information to calculate the pixel indices of the window
76 | top = int((tile_ul_proj[1] - tif_ul_proj[1]) / y_res)
77 | left = int((tile_ul_proj[0] - tif_ul_proj[0]) / x_res)
78 | bottom = int((tile_lr_proj[1] - tif_ul_proj[1]) / y_res)
79 | right = int((tile_lr_proj[0] - tif_ul_proj[0]) / x_res)
80 |
81 | window = ((top, bottom), (left, right))
82 |
83 | # read the first three bands (assumed RGB) of the TIF into an array
84 | data = np.empty(shape=(3, 256, 256)).astype(src.profile['dtype'])
85 | for k in (1, 2, 3):
86 | src.read(k, window=window, out=data[k - 1], boundless=True)
87 |
88 | return Image.fromarray(np.moveaxis(data, 0, -1), mode='RGB')
89 | except Exception as err:
90 | raise TileNotFoundError(err)
91 |
92 |
93 | @click.command()
94 | @click.argument('raster')
95 | @click.argument('tiles')
96 | @click.option('--model', type=str, default='/model/segnet_deploy.prototxt')
97 | @click.option('--weights', type=str, default='/model/weights.caffemodel')
98 | @click.option('--classes', type=str, default='/model/classes.json')
99 | @click.option('--output', type=str, default='/inference')
100 | @click.option('--gpu', type=int, default=0)
101 | @click.option('--cpu-only', is_flag=True, default=False)
102 | @click.option('--threshold', type=float, default=0.5)
103 | def run_batch(raster, tiles, model, weights, classes, output, gpu, cpu_only):
104 | net = setup_net(model, weights, gpu, cpu_only)
105 |
106 | # read classes metadata
107 | with open(classes) as c:
108 | colors = map(lambda x: x['color'][1:], json.load(c))
109 | colors.append('000000')
110 | colors = map(lambda rgbstr: tuple(map(ord, rgbstr.decode('hex'))), colors)
111 |
112 | centerlines_file = op.join(output, 'complete.geojson')
113 | centerlines = open(centerlines_file, 'w')
114 |
115 | with open(tiles) as tile_list:
116 | for tile in tile_list:
117 | try:
118 | click.echo('processing: %s' % tile.strip())
119 | x, y, z = [int(t) for t in tile.strip().split('-')]
120 | image = get_image_tile(raster, x, y, z)
121 | image.save(op.join(output, '%s_real.png' % tile.strip()))
122 |
123 | # run prediction
124 | predicted_file = op.join(output, '%s.png' % tile.strip())
125 | make_prediction(net, colors, image, threshold, predicted_file)
126 |
127 | # trace raster -> polygons
128 | polygonized_file = op.join(output, '%s.geojson' % tile.strip())
129 | with open(polygonized_file, 'w') as p:
130 | p.write(json.dumps(vectorize(predicted_file)))
131 |
132 | # polygons => centerlines
133 | polyspine_args = map(str, [polys_to_lines, polygonized_file, x, y, z, 0.2])
134 | exitcode = subprocess.call(polyspine_args, stdout=centerlines)
135 |
136 | if exitcode != 0:
137 | raise Exception('Vectorize exited nonzero')
138 |
139 | except TileNotFoundError:
140 | click.echo('Imagery tile not found.')
141 | except Exception as err:
142 | click.echo(err)
143 |
144 | centerlines.close()
145 |
146 |
147 | if __name__ == '__main__':
148 | run_batch()
149 |
--------------------------------------------------------------------------------
/segnet/metrics.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 |
4 | # + + + + + + + + + + + + + + + + +
5 | # + + + + + + + + + + + + + + + + +
6 | # + + + + + + + + + + + + + + + + +
7 | # + + + @ + + + + + + + + + + + + +
8 | # + + + + + + + + + + + + + + + + +
9 | # + + + + + + + + + + + + + + + + +
10 | # + + + + + + + + + + + + + + + + +
11 | def complete_and_correct(output, label, r, threshold):
12 | num_classes = output.shape[0]
13 | bg = num_classes - 1
14 | predicted_pixels = np.zeros((num_classes,))
15 | actual_pixels = np.zeros((num_classes,))
16 |
17 | # only use the max-probability non-background class if its probability is
18 | # above the threshold
19 | ind = np.argmax(output, axis=0)
20 | fg = output[:-1, :, :] # foreground classes only
21 | prediction = np.where(np.max(fg, axis=0) > threshold, ind, num_classes - 1)
22 |
23 | # for each class, the number of pixels where that class is predicted and
24 | # the groundtruth includes a pixel with that class within r pixels.
25 | # (i.e., number of true positives)
26 | correct = np.zeros((num_classes - 1,))
27 |
28 | # for each class, the number of pixels where that class is the true value
29 | # and the predicted output includes a pixel with that class within r pixels
30 | # (i.e., actual_pixels - number of false negatives)
31 | complete = np.zeros((num_classes - 1,))
32 |
33 | # todo -- deal with image boundaries
34 | for x in range(r + 1, label.shape[0] - r - 1):
35 | for y in range(r + 1, label.shape[1] - r - 1):
36 | # assess completeness contribution of this pixel
37 | a_class = label[x, y]
38 | p_win = prediction[x - r: x + r, y - r: y + r]
39 | actual_pixels[a_class] += 1
40 | if a_class != bg and (p_win == a_class).any():
41 | complete[a_class] += 1
42 | # assess correctness contribution of this pixel
43 | p_class = prediction[x, y]
44 | a_win = label[x - r: x + r, y - r: y + r]
45 | predicted_pixels[p_class] += 1
46 | if p_class != bg and (a_win == p_class).any():
47 | correct[p_class] += 1
48 |
49 | return {
50 | 'pixels_correct': correct.tolist(),
51 | 'pixels_predicted': predicted_pixels.tolist(),
52 | 'pixels_actual': actual_pixels.tolist(),
53 | 'pixels_complete': complete.tolist(),
54 | 'completeness_score': np.sum(complete) / np.sum(actual_pixels[:-1]),
55 | 'correctness_score': np.sum(correct) / np.sum(predicted_pixels[:-1])
56 | }
57 |
--------------------------------------------------------------------------------
/segnet/queue.py:
--------------------------------------------------------------------------------
1 | import json
2 | import click
3 | from boto3.session import Session
4 | session = Session()
5 |
6 | sqs = session.resource('sqs')
7 | s3 = session.client('s3')
8 |
9 |
10 | def send(queue, messages, dryrun):
11 | messages = [{'Id': str(i), 'MessageBody': m} for i, m in enumerate(messages)]
12 | if dryrun:
13 | print('DRYRUN %s' % json.dumps({'Entries': messages}))
14 | return
15 | q = sqs.get_queue_by_name(QueueName=queue)
16 | resp = q.send_messages(Entries=messages)
17 | assert len(resp.get('Failed', [])) == 0, str(resp)
18 |
19 |
20 | def receive(queue):
21 | q = sqs.get_queue_by_name(QueueName=queue)
22 | while True:
23 | for message in q.receive_messages(MaxNumberOfMessages=10, VisibilityTimeout=60):
24 | yield message
25 |
26 |
27 | @click.command()
28 | @click.argument('queue_name')
29 | @click.argument('output_bucket')
30 | @click.argument('tileset')
31 | @click.argument('image_tiles')
32 | @click.argument('input', default='-')
33 | @click.option('--tile-url', type=str)
34 | @click.option('--dryrun', is_flag=True, default=False)
35 | def populate(queue_name, output_bucket, tileset, image_tiles, input, tile_url, dryrun):
36 | """
37 | Populate the given SQS queue with tasks of the form [bucket, tileset, image_tiles, z, x, y],
38 | to be consumed by batch_process.py.
39 |
40 | Works well with cover.js. E.g.:
41 |
42 | ./cover.js file.geojson 15 16 | python segnet/queue.py my_queue my_bucket my_tileset "http://api.mapbox.com/v4/mapbox.satellite/{z}/{x}/{y}.png?access_token={MAPBOX_ACCESS_TOKEN}"
43 | """
44 |
45 | try:
46 | input = click.open_file(input).readlines()
47 | except IOError:
48 | input = [input]
49 | batch = []
50 | count = 0
51 | for tile in input:
52 | tile = (x, y, z) = json.loads(tile.strip())
53 | count += 1
54 | batch.append(json.dumps((output_bucket, tileset, image_tiles, z, x, y)))
55 | if (len(batch) == 10):
56 | send(queue_name, batch, dryrun)
57 | batch = []
58 |
59 | if len(batch) > 0:
60 | send(queue_name, batch, dryrun)
61 |
62 | if not tile_url:
63 | tile_url = 'https://%s.s3.amazonaws.com/%s/{z}/{x}/{y}.png' % (output_bucket, tileset)
64 | tilejson = json.dumps({
65 | 'tilejson': '2.0.0',
66 | 'tiles': [tile_url]
67 | })
68 | key = '%s/index.json' % tileset
69 | click.echo('%s tiles queued. Pushing TileJSON to %s/%s' % (count, output_bucket, tileset))
70 | click.echo(tilejson)
71 | if not dryrun:
72 | s3.put_object(Body=tilejson, Bucket=output_bucket, Key=key, ContentType='application/json')
73 |
74 |
75 | if __name__ == '__main__':
76 | populate()
77 |
--------------------------------------------------------------------------------
/segnet/run-test:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env node
2 |
3 | /*
4 | * Convenience script for testing net from a particular snapshot during
5 | * training.
6 | *
7 | * 1. Compute the 'batch normalization' statistics from the training data, and
8 | * create a new copy of the trained weights with this normalization applied to
9 | * them. (See compute_bn_statistics.py for details.)
10 | * If the given weights were `snapshots/segnet_iter_X.caffemodel`, the
11 | * normalized weights will be at `snapshots/segnet_iter_X.inference/test_weights.caffemodel`
12 | *
13 | * 2. Run forward inference on the test data, outputting prediction images and
14 | * completeness/correctness metrics into `snapshots/segnet_iter_X.results/`.
15 | * (See test_segmentation.py for details.)
16 | */
17 |
18 | var fs = require('fs-extra')
19 | var path = require('path')
20 | var spawnSync = require('child_process').spawnSync
21 | var argv = require('minimist')(process.argv.slice(2), {
22 | alias: {
23 | output: 'o',
24 | train: 't',
25 | weights: 'w',
26 | classes: 'c'
27 | }
28 | })
29 |
30 | var computeStats = path.join(__dirname, 'compute_bn_statistics.py')
31 | var runTest = path.join(__dirname, 'test_segmentation.py')
32 |
33 | if (!argv.train || !argv.weights || !argv.classes) {
34 | console.error('Usage: ' + process.argv.slice(0, 2).join(' ') + ' --train train.prototxt --weights weights.caffemodel --classes class-defs.json --output /path/to/results/output')
35 | console.log(argv)
36 | process.exit(1)
37 | }
38 |
39 | var train = path.resolve(argv.train)
40 | var weights = path.resolve(argv.weights)
41 | var classes = path.resolve(argv.classes)
42 | if (argv.output) {
43 | argv.output = weights.replace('.caffemodel', '.results')
44 | }
45 | var output = path.resolve(argv.output)
46 | if (!fs.existsSync(output)) {
47 | fs.mkdirSync(output)
48 | }
49 |
50 | // guess the inference.prototxt filename
51 | var inference = argv.inference || train.replace('_train.prototxt', '_inference.prototxt')
52 |
53 | // dir for outputting the BN-stats-infused weights
54 | var inferenceDir = weights.replace('.caffemodel', '.inference')
55 | fs.mkdirSync(inferenceDir)
56 |
57 | console.log('Computing batch normalization stats.')
58 | spawnSync('python', [
59 | computeStats,
60 | train,
61 | weights,
62 | inferenceDir
63 | ].concat(argv.gpu ? ['--gpu', argv.gpu] : []), { stdio: 'inherit' })
64 |
65 | console.log('Running inference.')
66 | spawnSync('python', [
67 | runTest,
68 | '--model', inference,
69 | '--weights', path.join(inferenceDir, 'test_weights.caffemodel'),
70 | '--classes', classes,
71 | '--output', output
72 | ].concat(argv.gpu ? ['--gpu', argv.gpu] : []), { stdio: 'inherit' })
73 |
74 | fs.copySync(path.join(__dirname, '../results-viewer/dist'), output)
75 |
76 | console.log('Done!')
77 |
78 |
--------------------------------------------------------------------------------
/segnet/setup-model:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env node
2 | var fs = require('fs')
3 | var path = require('path')
4 |
5 | // Set up the caffe model prototxt files for a given training data set
6 | module.exports = setupModel
7 | function setupModel (model, batchSize, trainingData, testData, crop, stats, output) {
8 | var cw = classWeights(stats).map(function (weight) {
9 | return 'class_weighting: ' + weight
10 | })
11 |
12 | batchSize = isNaN(batchSize) ? (model === 'segnet' ? 6 : 16) : batchSize
13 |
14 | renderModel('train', model, cw, output)
15 | renderModel('inference', model, cw, output)
16 | renderModel('deploy', model, cw, output)
17 |
18 | // phase: 'train' or 'inference'
19 | // model: 'segnet' or 'segnet_basic'
20 | function renderModel (phase, model, classWeights, outputDir) {
21 | var template = model + '_' + (phase === 'train' ? 'train' : 'inference') + '.prototxt'
22 | template = path.join(__dirname, 'templates', template)
23 | var batch = phase === 'train' ? batchSize : 1
24 | var inputLayer = phase === 'deploy' ? `input: "data"
25 | input_shape: {
26 | dim: 1
27 | dim: 3
28 | dim: 256
29 | dim: 256
30 | }` : `layer {
31 | name: "data"
32 | type: "DenseImageData"
33 | top: "data"
34 | top: "label"
35 | dense_image_data_param {
36 | source: "${phase === 'train' ? trainingData : testData}"
37 | batch_size: ${batch}
38 | shuffle: ${phase === 'train' ? 'true' : 'false'}
39 | ${crop ? 'crop_height: ' + crop : ''}
40 | ${crop ? 'crop_width: ' + crop : ''}
41 | }
42 | }`
43 |
44 | var tmpl = fs.readFileSync(template, 'utf-8')
45 | .replace(/INPUT/g, inputLayer)
46 | .replace(/CLASS_COUNT/g, classWeights.length)
47 | .replace(/(\s+)CLASS_WEIGHTING/g, (_, space) => {
48 | return classWeights.map(w => space + w).join('')
49 | })
50 |
51 | var file = path.join(outputDir, model + '_' + phase + '.prototxt')
52 | fs.writeFileSync(file, tmpl)
53 | }
54 |
55 | // compute class weighting according to SegNet paper: http://arxiv.org/abs/1511.00561
56 | function classWeights (stats) {
57 | var data = fs.readFileSync(stats, 'utf-8').split('\n')
58 | .slice(1)
59 | .filter(Boolean)
60 | .map(x => x.split(',').map(Number))
61 | var frequencies = data.map(x => x[1] / (x[2] * 65536))
62 | var sorted = [].concat(frequencies).sort((a, b) => (a - b))
63 | var n = sorted.length
64 | var median = n % 2 === 0 ? (sorted[n / 2] + sorted[n / 2 - 1]) / 2 : sorted[n / 2 - 0.5]
65 | return frequencies.map(x => (median / x))
66 | }
67 | }
68 |
69 | if (require.main === module) {
70 | var args = require('minimist')(process.argv.slice(2), {
71 | alias: {
72 | d: 'data',
73 | o: 'output',
74 | m: 'model',
75 | b: 'batch-size'
76 | }
77 | })
78 | var trainingData = args['train'] || (args['data'] + '/' + 'train.txt')
79 | var testData = args['test'] || (args['data'] + '/' + 'val.txt')
80 | var stats = args['label-stats'] || (args['data'] + '/labels/label-stats.csv')
81 | var model = args['model']
82 | var output = args['output']
83 | var crop = args['crop']
84 | if (!(output && model && stats && trainingData && testData)) {
85 | console.log('Usage: setup-model --model segnet|segnet_basic --train /path/to/train.txt --test /path/to/train.txt --label-stats /path/to/labelstats.csv --output /path/to/output/dir')
86 | console.log('label-stats refers to a the label stats csv generated by skynet-data scripts')
87 | process.exit(1)
88 | }
89 | setupModel(model, args.b, trainingData, testData, crop, stats, output)
90 | }
91 |
--------------------------------------------------------------------------------
/segnet/static/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
--------------------------------------------------------------------------------
/segnet/templates/bayesian_segnet_basic_inference.prototxt:
--------------------------------------------------------------------------------
1 | name: "segnet"
2 | INPUT
3 | layer {
4 | name: "norm"
5 | type: "LRN"
6 | bottom: "data"
7 | top: "norm"
8 | lrn_param {
9 | local_size: 5
10 | alpha: 0.0001
11 | beta: 0.75
12 | }
13 | }
14 | layer {
15 | name: "conv1"
16 | type: "Convolution"
17 | bottom: "norm"
18 | top: "conv1"
19 | param {
20 | lr_mult: 1
21 | decay_mult: 1
22 | }
23 | param {
24 | lr_mult: 2
25 | decay_mult: 0
26 | }
27 | convolution_param {
28 | num_output: 64
29 | kernel_size: 7
30 | pad: 3
31 | weight_filler {
32 | type: "msra"
33 | }
34 | bias_filler {
35 | type: "constant"
36 | }
37 | }
38 | }
39 | layer {
40 | bottom: "conv1"
41 | top: "conv1"
42 | name: "conv1_bn"
43 | type: "BN"
44 | bn_param {
45 | bn_mode: INFERENCE
46 | scale_filler {
47 | type: "constant"
48 | value: 1
49 | }
50 | shift_filler {
51 | type: "constant"
52 | value: 0.001
53 | }
54 | }
55 | }
56 | layer {
57 | name: "relu1"
58 | type: "ReLU"
59 | bottom: "conv1"
60 | top: "conv1"
61 | }
62 | layer {
63 | name: "pool1"
64 | type: "Pooling"
65 | bottom: "conv1"
66 | top: "pool1"
67 | top: "pool1_mask"
68 | pooling_param {
69 | pool: MAX
70 | kernel_size: 2
71 | stride: 2
72 | }
73 | }
74 | layer {
75 | name: "conv2"
76 | type: "Convolution"
77 | bottom: "pool1"
78 | top: "conv2"
79 | param {
80 | lr_mult: 1
81 | decay_mult: 1
82 | }
83 | param {
84 | lr_mult: 2
85 | decay_mult: 0
86 | }
87 | convolution_param {
88 | num_output: 64
89 | kernel_size: 7
90 | pad: 3
91 | weight_filler {
92 | type: "msra"
93 | }
94 | bias_filler {
95 | type: "constant"
96 | }
97 | }
98 | }
99 | layer {
100 | bottom: "conv2"
101 | top: "conv2"
102 | name: "conv2_bn"
103 | type: "BN"
104 | bn_param {
105 | bn_mode: INFERENCE
106 | scale_filler {
107 | type: "constant"
108 | value: 1
109 | }
110 | shift_filler {
111 | type: "constant"
112 | value: 0.001
113 | }
114 | }
115 | }
116 | layer {
117 | name: "relu2"
118 | type: "ReLU"
119 | bottom: "conv2"
120 | top: "conv2"
121 | }
122 | layer {
123 | name: "pool2"
124 | type: "Pooling"
125 | bottom: "conv2"
126 | top: "pool2"
127 | top: "pool2_mask"
128 | pooling_param {
129 | pool: MAX
130 | kernel_size: 2
131 | stride: 2
132 | }
133 | }
134 | layer {
135 | name: "conv3"
136 | type: "Convolution"
137 | bottom: "pool2"
138 | top: "conv3"
139 | param {
140 | lr_mult: 1
141 | decay_mult: 1
142 | }
143 | param {
144 | lr_mult: 2
145 | decay_mult: 0
146 | }
147 | convolution_param {
148 | num_output: 64
149 | kernel_size: 7
150 | pad: 3
151 | weight_filler {
152 | type: "msra"
153 | }
154 | bias_filler {
155 | type: "constant"
156 | }
157 | }
158 | }
159 | layer {
160 | bottom: "conv3"
161 | top: "conv3"
162 | name: "conv3_bn"
163 | type: "BN"
164 | bn_param {
165 | bn_mode: INFERENCE
166 | scale_filler {
167 | type: "constant"
168 | value: 1
169 | }
170 | shift_filler {
171 | type: "constant"
172 | value: 0.001
173 | }
174 | }
175 | }
176 | layer {
177 | name: "relu3"
178 | type: "ReLU"
179 | bottom: "conv3"
180 | top: "conv3"
181 | }
182 | layer {
183 | name: "pool3"
184 | type: "Pooling"
185 | bottom: "conv3"
186 | top: "pool3"
187 | top: "pool3_mask"
188 | pooling_param {
189 | pool: MAX
190 | kernel_size: 2
191 | stride: 2
192 | }
193 | }
194 | layer {
195 | name: "encdrop3"
196 | type: "Dropout"
197 | bottom: "pool3"
198 | top: "pool3"
199 | dropout_param {
200 | dropout_ratio: 0.5
201 | sample_weights_test: true
202 | }
203 | }
204 | layer {
205 | name: "conv4"
206 | type: "Convolution"
207 | bottom: "pool3"
208 | top: "conv4"
209 | param {
210 | lr_mult: 1
211 | decay_mult: 1
212 | }
213 | param {
214 | lr_mult: 2
215 | decay_mult: 0
216 | }
217 | convolution_param {
218 | num_output: 64
219 | kernel_size: 7
220 | pad: 3
221 | weight_filler {
222 | type: "msra"
223 | }
224 | bias_filler {
225 | type: "constant"
226 | }
227 | }
228 | }
229 | layer {
230 | bottom: "conv4"
231 | top: "conv4"
232 | name: "conv4_bn"
233 | type: "BN"
234 | bn_param {
235 | bn_mode: INFERENCE
236 | scale_filler {
237 | type: "constant"
238 | value: 1
239 | }
240 | shift_filler {
241 | type: "constant"
242 | value: 0.001
243 | }
244 | }
245 | }
246 | layer {
247 | name: "relu4"
248 | type: "ReLU"
249 | bottom: "conv4"
250 | top: "conv4"
251 | }
252 | layer {
253 | name: "pool4"
254 | type: "Pooling"
255 | bottom: "conv4"
256 | top: "pool4"
257 | top: "pool4_mask"
258 | pooling_param {
259 | pool: MAX
260 | kernel_size: 2
261 | stride: 2
262 | }
263 | }
264 | layer {
265 | name: "encdrop4"
266 | type: "Dropout"
267 | bottom: "pool4"
268 | top: "pool4"
269 | dropout_param {
270 | dropout_ratio: 0.5
271 | sample_weights_test: true
272 | }
273 | }
274 | layer {
275 | name: "upsample4"
276 | type: "Upsample"
277 | bottom: "pool4"
278 | bottom: "pool4_mask"
279 | top: "upsample4"
280 | upsample_param {
281 | scale: 2
282 | }
283 | }
284 | layer {
285 | name: "conv_decode4"
286 | type: "Convolution"
287 | bottom: "upsample4"
288 | top: "conv_decode4"
289 | param {
290 | lr_mult: 1
291 | decay_mult: 1
292 | }
293 | param {
294 | lr_mult: 2
295 | decay_mult: 0
296 | }
297 | convolution_param {
298 | num_output: 64
299 | kernel_size: 7
300 | pad: 3
301 | weight_filler {
302 | type: "msra"
303 | }
304 | bias_filler {
305 | type: "constant"
306 | }
307 | }
308 | }
309 | layer {
310 | bottom: "conv_decode4"
311 | top: "conv_decode4"
312 | name: "conv_decode4_bn"
313 | type: "BN"
314 | bn_param {
315 | bn_mode: INFERENCE
316 | scale_filler {
317 | type: "constant"
318 | value: 1
319 | }
320 | shift_filler {
321 | type: "constant"
322 | value: 0.001
323 | }
324 | }
325 | }
326 | layer {
327 | name: "decdrop4"
328 | type: "Dropout"
329 | bottom: "conv_decode4"
330 | top: "conv_decode4"
331 | dropout_param {
332 | dropout_ratio: 0.5
333 | sample_weights_test: true
334 | }
335 | }
336 | layer {
337 | name: "upsample3"
338 | type: "Upsample"
339 | bottom: "conv_decode4"
340 | bottom: "pool3_mask"
341 | top: "upsample3"
342 | upsample_param {
343 | scale: 2
344 | }
345 | }
346 | layer {
347 | name: "conv_decode3"
348 | type: "Convolution"
349 | bottom: "upsample3"
350 | top: "conv_decode3"
351 | param {
352 | lr_mult: 1
353 | decay_mult: 1
354 | }
355 | param {
356 | lr_mult: 2
357 | decay_mult: 0
358 | }
359 | convolution_param {
360 | num_output: 64
361 | kernel_size: 7
362 | pad: 3
363 | weight_filler {
364 | type: "msra"
365 | }
366 | bias_filler {
367 | type: "constant"
368 | }
369 | }
370 | }
371 | layer {
372 | bottom: "conv_decode3"
373 | top: "conv_decode3"
374 | name: "conv_decode3_bn"
375 | type: "BN"
376 | bn_param {
377 | bn_mode: INFERENCE
378 | scale_filler {
379 | type: "constant"
380 | value: 1
381 | }
382 | shift_filler {
383 | type: "constant"
384 | value: 0.001
385 | }
386 | }
387 | }
388 | layer {
389 | name: "decdrop3"
390 | type: "Dropout"
391 | bottom: "conv_decode3"
392 | top: "conv_decode3"
393 | dropout_param {
394 | dropout_ratio: 0.5
395 | sample_weights_test: true
396 | }
397 | }
398 | layer {
399 | name: "upsample2"
400 | type: "Upsample"
401 | bottom: "conv_decode3"
402 | bottom: "pool2_mask"
403 | top: "upsample2"
404 | upsample_param {
405 | scale: 2
406 | }
407 | }
408 | layer {
409 | name: "conv_decode2"
410 | type: "Convolution"
411 | bottom: "upsample2"
412 | top: "conv_decode2"
413 | param {
414 | lr_mult: 1
415 | decay_mult: 1
416 | }
417 | param {
418 | lr_mult: 2
419 | decay_mult: 0
420 | }
421 | convolution_param {
422 | num_output: 64
423 | kernel_size: 7
424 | pad: 3
425 | weight_filler {
426 | type: "msra"
427 | }
428 | bias_filler {
429 | type: "constant"
430 | }
431 | }
432 | }
433 | layer {
434 | bottom: "conv_decode2"
435 | top: "conv_decode2"
436 | name: "conv_decode2_bn"
437 | type: "BN"
438 | bn_param {
439 | bn_mode: INFERENCE
440 | scale_filler {
441 | type: "constant"
442 | value: 1
443 | }
444 | shift_filler {
445 | type: "constant"
446 | value: 0.001
447 | }
448 | }
449 | }
450 | layer {
451 | name: "upsample1"
452 | type: "Upsample"
453 | bottom: "conv_decode2"
454 | bottom: "pool1_mask"
455 | top: "upsample1"
456 | upsample_param {
457 | scale: 2
458 | }
459 | }
460 | layer {
461 | name: "conv_decode1"
462 | type: "Convolution"
463 | bottom: "upsample1"
464 | top: "conv_decode1"
465 | param {
466 | lr_mult: 1
467 | decay_mult: 1
468 | }
469 | param {
470 | lr_mult: 2
471 | decay_mult: 0
472 | }
473 | convolution_param {
474 | num_output: 64
475 | kernel_size: 7
476 | pad: 3
477 | weight_filler {
478 | type: "msra"
479 | }
480 | bias_filler {
481 | type: "constant"
482 | }
483 | }
484 | }
485 | layer {
486 | bottom: "conv_decode1"
487 | top: "conv_decode1"
488 | name: "conv_decode1_bn"
489 | type: "BN"
490 | bn_param {
491 | bn_mode: INFERENCE
492 | scale_filler {
493 | type: "constant"
494 | value: 1
495 | }
496 | shift_filler {
497 | type: "constant"
498 | value: 0.001
499 | }
500 | }
501 | }
502 | layer {
503 | name: "dense_softmax_inner_prod"
504 | type: "Convolution"
505 | bottom: "conv_decode1"
506 | top: "dense_softmax_inner_prod"
507 | param {
508 | lr_mult: 1
509 | decay_mult: 1
510 | }
511 | param {
512 | lr_mult: 2
513 | decay_mult: 0
514 | }
515 | convolution_param {
516 | num_output: CLASS_COUNT
517 | kernel_size: 1
518 | weight_filler {
519 | type: "msra"
520 | }
521 | bias_filler {
522 | type: "constant"
523 | }
524 | }
525 | }
526 | layer {
527 | name: "prob"
528 | type: "Softmax"
529 | bottom: "dense_softmax_inner_prod"
530 | top: "prob"
531 | softmax_param {engine: CAFFE}
532 | }
533 |
--------------------------------------------------------------------------------
/segnet/templates/bayesian_segnet_basic_train.prototxt:
--------------------------------------------------------------------------------
1 | name: "segnet"
2 | INPUT
3 | layer {
4 | name: "norm"
5 | type: "LRN"
6 | bottom: "data"
7 | top: "norm"
8 | lrn_param {
9 | local_size: 5
10 | alpha: 0.0001
11 | beta: 0.75
12 | }
13 | }
14 | layer {
15 | name: "conv1"
16 | type: "Convolution"
17 | bottom: "norm"
18 | top: "conv1"
19 | param {
20 | lr_mult: 1
21 | decay_mult: 1
22 | }
23 | param {
24 | lr_mult: 2
25 | decay_mult: 0
26 | }
27 | convolution_param {
28 | num_output: 64
29 | kernel_size: 7
30 | pad: 3
31 | weight_filler {
32 | type: "msra"
33 | }
34 | bias_filler {
35 | type: "constant"
36 | }
37 | }
38 | }
39 | layer {
40 | bottom: "conv1"
41 | top: "conv1"
42 | name: "conv1_bn"
43 | type: "BN"
44 | bn_param {
45 | scale_filler {
46 | type: "constant"
47 | value: 1
48 | }
49 | shift_filler {
50 | type: "constant"
51 | value: 0.001
52 | }
53 | }
54 | }
55 | layer {
56 | name: "relu1"
57 | type: "ReLU"
58 | bottom: "conv1"
59 | top: "conv1"
60 | }
61 | layer {
62 | name: "pool1"
63 | type: "Pooling"
64 | bottom: "conv1"
65 | top: "pool1"
66 | top: "pool1_mask"
67 | pooling_param {
68 | pool: MAX
69 | kernel_size: 2
70 | stride: 2
71 | }
72 | }
73 | layer {
74 | name: "conv2"
75 | type: "Convolution"
76 | bottom: "pool1"
77 | top: "conv2"
78 | param {
79 | lr_mult: 1
80 | decay_mult: 1
81 | }
82 | param {
83 | lr_mult: 2
84 | decay_mult: 0
85 | }
86 | convolution_param {
87 | num_output: 64
88 | kernel_size: 7
89 | pad: 3
90 | weight_filler {
91 | type: "msra"
92 | }
93 | bias_filler {
94 | type: "constant"
95 | }
96 | }
97 | }
98 | layer {
99 | bottom: "conv2"
100 | top: "conv2"
101 | name: "conv2_bn"
102 | type: "BN"
103 | bn_param {
104 | scale_filler {
105 | type: "constant"
106 | value: 1
107 | }
108 | shift_filler {
109 | type: "constant"
110 | value: 0.001
111 | }
112 | }
113 | }
114 | layer {
115 | name: "relu2"
116 | type: "ReLU"
117 | bottom: "conv2"
118 | top: "conv2"
119 | }
120 | layer {
121 | name: "pool2"
122 | type: "Pooling"
123 | bottom: "conv2"
124 | top: "pool2"
125 | top: "pool2_mask"
126 | pooling_param {
127 | pool: MAX
128 | kernel_size: 2
129 | stride: 2
130 | }
131 | }
132 | layer {
133 | name: "conv3"
134 | type: "Convolution"
135 | bottom: "pool2"
136 | top: "conv3"
137 | param {
138 | lr_mult: 1
139 | decay_mult: 1
140 | }
141 | param {
142 | lr_mult: 2
143 | decay_mult: 0
144 | }
145 | convolution_param {
146 | num_output: 64
147 | kernel_size: 7
148 | pad: 3
149 | weight_filler {
150 | type: "msra"
151 | }
152 | bias_filler {
153 | type: "constant"
154 | }
155 | }
156 | }
157 | layer {
158 | bottom: "conv3"
159 | top: "conv3"
160 | name: "conv3_bn"
161 | type: "BN"
162 | bn_param {
163 | scale_filler {
164 | type: "constant"
165 | value: 1
166 | }
167 | shift_filler {
168 | type: "constant"
169 | value: 0.001
170 | }
171 | }
172 | }
173 | layer {
174 | name: "relu3"
175 | type: "ReLU"
176 | bottom: "conv3"
177 | top: "conv3"
178 | }
179 | layer {
180 | name: "pool3"
181 | type: "Pooling"
182 | bottom: "conv3"
183 | top: "pool3"
184 | top: "pool3_mask"
185 | pooling_param {
186 | pool: MAX
187 | kernel_size: 2
188 | stride: 2
189 | }
190 | }
191 | layer {
192 | name: "encdrop3"
193 | type: "Dropout"
194 | bottom: "pool3"
195 | top: "pool3"
196 | dropout_param {
197 | dropout_ratio: 0.5
198 | }
199 | }
200 | layer {
201 | name: "conv4"
202 | type: "Convolution"
203 | bottom: "pool3"
204 | top: "conv4"
205 | param {
206 | lr_mult: 1
207 | decay_mult: 1
208 | }
209 | param {
210 | lr_mult: 2
211 | decay_mult: 0
212 | }
213 | convolution_param {
214 | num_output: 64
215 | kernel_size: 7
216 | pad: 3
217 | weight_filler {
218 | type: "msra"
219 | }
220 | bias_filler {
221 | type: "constant"
222 | }
223 | }
224 | }
225 | layer {
226 | bottom: "conv4"
227 | top: "conv4"
228 | name: "conv4_bn"
229 | type: "BN"
230 | bn_param {
231 | scale_filler {
232 | type: "constant"
233 | value: 1
234 | }
235 | shift_filler {
236 | type: "constant"
237 | value: 0.001
238 | }
239 | }
240 | }
241 | layer {
242 | name: "relu4"
243 | type: "ReLU"
244 | bottom: "conv4"
245 | top: "conv4"
246 | }
247 | layer {
248 | name: "pool4"
249 | type: "Pooling"
250 | bottom: "conv4"
251 | top: "pool4"
252 | top: "pool4_mask"
253 | pooling_param {
254 | pool: MAX
255 | kernel_size: 2
256 | stride: 2
257 | }
258 | }
259 | layer {
260 | name: "encdrop4"
261 | type: "Dropout"
262 | bottom: "pool4"
263 | top: "pool4"
264 | dropout_param {
265 | dropout_ratio: 0.5
266 | }
267 | }
268 | layer {
269 | name: "upsample4"
270 | type: "Upsample"
271 | bottom: "pool4"
272 | bottom: "pool4_mask"
273 | top: "upsample4"
274 | upsample_param {
275 | scale: 2
276 | }
277 | }
278 | layer {
279 | name: "conv_decode4"
280 | type: "Convolution"
281 | bottom: "upsample4"
282 | top: "conv_decode4"
283 | param {
284 | lr_mult: 1
285 | decay_mult: 1
286 | }
287 | param {
288 | lr_mult: 2
289 | decay_mult: 0
290 | }
291 | convolution_param {
292 | num_output: 64
293 | kernel_size: 7
294 | pad: 3
295 | weight_filler {
296 | type: "msra"
297 | }
298 | bias_filler {
299 | type: "constant"
300 | }
301 | }
302 | }
303 | layer {
304 | bottom: "conv_decode4"
305 | top: "conv_decode4"
306 | name: "conv_decode4_bn"
307 | type: "BN"
308 | bn_param {
309 | scale_filler {
310 | type: "constant"
311 | value: 1
312 | }
313 | shift_filler {
314 | type: "constant"
315 | value: 0.001
316 | }
317 | }
318 | }
319 | layer {
320 | name: "decdrop4"
321 | type: "Dropout"
322 | bottom: "conv_decode4"
323 | top: "conv_decode4"
324 | dropout_param {
325 | dropout_ratio: 0.5
326 | }
327 | }
328 | layer {
329 | name: "upsample3"
330 | type: "Upsample"
331 | bottom: "conv_decode4"
332 | bottom: "pool3_mask"
333 | top: "upsample3"
334 | upsample_param {
335 | scale: 2
336 | }
337 | }
338 | layer {
339 | name: "conv_decode3"
340 | type: "Convolution"
341 | bottom: "upsample3"
342 | top: "conv_decode3"
343 | param {
344 | lr_mult: 1
345 | decay_mult: 1
346 | }
347 | param {
348 | lr_mult: 2
349 | decay_mult: 0
350 | }
351 | convolution_param {
352 | num_output: 64
353 | kernel_size: 7
354 | pad: 3
355 | weight_filler {
356 | type: "msra"
357 | }
358 | bias_filler {
359 | type: "constant"
360 | }
361 | }
362 | }
363 | layer {
364 | bottom: "conv_decode3"
365 | top: "conv_decode3"
366 | name: "conv_decode3_bn"
367 | type: "BN"
368 | bn_param {
369 | scale_filler {
370 | type: "constant"
371 | value: 1
372 | }
373 | shift_filler {
374 | type: "constant"
375 | value: 0.001
376 | }
377 | }
378 | }
379 | layer {
380 | name: "decdrop3"
381 | type: "Dropout"
382 | bottom: "conv_decode3"
383 | top: "conv_decode3"
384 | dropout_param {
385 | dropout_ratio: 0.5
386 | }
387 | }
388 | layer {
389 | name: "upsample2"
390 | type: "Upsample"
391 | bottom: "conv_decode3"
392 | bottom: "pool2_mask"
393 | top: "upsample2"
394 | upsample_param {
395 | scale: 2
396 | }
397 | }
398 | layer {
399 | name: "conv_decode2"
400 | type: "Convolution"
401 | bottom: "upsample2"
402 | top: "conv_decode2"
403 | param {
404 | lr_mult: 1
405 | decay_mult: 1
406 | }
407 | param {
408 | lr_mult: 2
409 | decay_mult: 0
410 | }
411 | convolution_param {
412 | num_output: 64
413 | kernel_size: 7
414 | pad: 3
415 | weight_filler {
416 | type: "msra"
417 | }
418 | bias_filler {
419 | type: "constant"
420 | }
421 | }
422 | }
423 | layer {
424 | bottom: "conv_decode2"
425 | top: "conv_decode2"
426 | name: "conv_decode2_bn"
427 | type: "BN"
428 | bn_param {
429 | scale_filler {
430 | type: "constant"
431 | value: 1
432 | }
433 | shift_filler {
434 | type: "constant"
435 | value: 0.001
436 | }
437 | }
438 | }
439 | layer {
440 | name: "upsample1"
441 | type: "Upsample"
442 | bottom: "conv_decode2"
443 | bottom: "pool1_mask"
444 | top: "upsample1"
445 | upsample_param {
446 | scale: 2
447 | }
448 | }
449 | layer {
450 | name: "conv_decode1"
451 | type: "Convolution"
452 | bottom: "upsample1"
453 | top: "conv_decode1"
454 | param {
455 | lr_mult: 1
456 | decay_mult: 1
457 | }
458 | param {
459 | lr_mult: 2
460 | decay_mult: 0
461 | }
462 | convolution_param {
463 | num_output: 64
464 | kernel_size: 7
465 | pad: 3
466 | weight_filler {
467 | type: "msra"
468 | }
469 | bias_filler {
470 | type: "constant"
471 | }
472 | }
473 | }
474 | layer {
475 | bottom: "conv_decode1"
476 | top: "conv_decode1"
477 | name: "conv_decode1_bn"
478 | type: "BN"
479 | bn_param {
480 | scale_filler {
481 | type: "constant"
482 | value: 1
483 | }
484 | shift_filler {
485 | type: "constant"
486 | value: 0.001
487 | }
488 | }
489 | }
490 | layer {
491 | name: "dense_softmax_inner_prod"
492 | type: "Convolution"
493 | bottom: "conv_decode1"
494 | top: "dense_softmax_inner_prod"
495 | param {
496 | lr_mult: 1
497 | decay_mult: 1
498 | }
499 | param {
500 | lr_mult: 2
501 | decay_mult: 0
502 | }
503 | convolution_param {
504 | num_output: CLASS_COUNT
505 | kernel_size: 1
506 | weight_filler {
507 | type: "msra"
508 | }
509 | bias_filler {
510 | type: "constant"
511 | }
512 | }
513 | }
514 | layer {
515 | name: "loss"
516 | type: "SoftmaxWithLoss"
517 | bottom: "dense_softmax_inner_prod"
518 | bottom: "label"
519 | top: "loss"
520 | softmax_param {engine: CAFFE}
521 | loss_param: {
522 | weight_by_label_freqs: true
523 | CLASS_WEIGHTING
524 | }
525 | }
526 | layer {
527 | name: "accuracy"
528 | type: "Accuracy"
529 | bottom: "dense_softmax_inner_prod"
530 | bottom: "label"
531 | top: "accuracy"
532 | top: "per_class_accuracy"
533 | }
534 |
--------------------------------------------------------------------------------
/segnet/templates/segnet_basic_inference.prototxt:
--------------------------------------------------------------------------------
1 | name: "segnet"
2 | INPUT
3 | layer {
4 | name: "norm"
5 | type: "LRN"
6 | bottom: "data"
7 | top: "norm"
8 | lrn_param {
9 | local_size: 5
10 | alpha: 0.0001
11 | beta: 0.75
12 | }
13 | }
14 | layer {
15 | name: "conv1"
16 | type: "Convolution"
17 | bottom: "norm"
18 | top: "conv1"
19 | param {
20 | lr_mult: 1
21 | decay_mult: 1
22 | }
23 | param {
24 | lr_mult: 2
25 | decay_mult: 0
26 | }
27 | convolution_param {
28 | num_output: 64
29 | kernel_size: 7
30 | pad: 3
31 | weight_filler {
32 | type: "msra"
33 | }
34 | bias_filler {
35 | type: "constant"
36 | }
37 | }
38 | }
39 | layer {
40 | bottom: "conv1"
41 | top: "conv1"
42 | name: "conv1_bn"
43 | type: "BN"
44 | bn_param {
45 | bn_mode: INFERENCE
46 | scale_filler {
47 | type: "constant"
48 | value: 1
49 | }
50 | shift_filler {
51 | type: "constant"
52 | value: 0.001
53 | }
54 | }
55 | }
56 | layer {
57 | name: "relu1"
58 | type: "ReLU"
59 | bottom: "conv1"
60 | top: "conv1"
61 | }
62 | layer {
63 | name: "pool1"
64 | type: "Pooling"
65 | bottom: "conv1"
66 | top: "pool1"
67 | top: "pool1_mask"
68 | pooling_param {
69 | pool: MAX
70 | kernel_size: 2
71 | stride: 2
72 | }
73 | }
74 | layer {
75 | name: "conv2"
76 | type: "Convolution"
77 | bottom: "pool1"
78 | top: "conv2"
79 | param {
80 | lr_mult: 1
81 | decay_mult: 1
82 | }
83 | param {
84 | lr_mult: 2
85 | decay_mult: 0
86 | }
87 | convolution_param {
88 | num_output: 64
89 | kernel_size: 7
90 | pad: 3
91 | weight_filler {
92 | type: "msra"
93 | }
94 | bias_filler {
95 | type: "constant"
96 | }
97 | }
98 | }
99 | layer {
100 | bottom: "conv2"
101 | top: "conv2"
102 | name: "conv2_bn"
103 | type: "BN"
104 | bn_param {
105 | bn_mode: INFERENCE
106 | scale_filler {
107 | type: "constant"
108 | value: 1
109 | }
110 | shift_filler {
111 | type: "constant"
112 | value: 0.001
113 | }
114 | }
115 | }
116 | layer {
117 | name: "relu2"
118 | type: "ReLU"
119 | bottom: "conv2"
120 | top: "conv2"
121 | }
122 | layer {
123 | name: "pool2"
124 | type: "Pooling"
125 | bottom: "conv2"
126 | top: "pool2"
127 | top: "pool2_mask"
128 | pooling_param {
129 | pool: MAX
130 | kernel_size: 2
131 | stride: 2
132 | }
133 | }
134 | layer {
135 | name: "conv3"
136 | type: "Convolution"
137 | bottom: "pool2"
138 | top: "conv3"
139 | param {
140 | lr_mult: 1
141 | decay_mult: 1
142 | }
143 | param {
144 | lr_mult: 2
145 | decay_mult: 0
146 | }
147 | convolution_param {
148 | num_output: 64
149 | kernel_size: 7
150 | pad: 3
151 | weight_filler {
152 | type: "msra"
153 | }
154 | bias_filler {
155 | type: "constant"
156 | }
157 | }
158 | }
159 | layer {
160 | bottom: "conv3"
161 | top: "conv3"
162 | name: "conv3_bn"
163 | type: "BN"
164 | bn_param {
165 | bn_mode: INFERENCE
166 | scale_filler {
167 | type: "constant"
168 | value: 1
169 | }
170 | shift_filler {
171 | type: "constant"
172 | value: 0.001
173 | }
174 | }
175 | }
176 | layer {
177 | name: "relu3"
178 | type: "ReLU"
179 | bottom: "conv3"
180 | top: "conv3"
181 | }
182 | layer {
183 | name: "pool3"
184 | type: "Pooling"
185 | bottom: "conv3"
186 | top: "pool3"
187 | top: "pool3_mask"
188 | pooling_param {
189 | pool: MAX
190 | kernel_size: 2
191 | stride: 2
192 | }
193 | }
194 | layer {
195 | name: "conv4"
196 | type: "Convolution"
197 | bottom: "pool3"
198 | top: "conv4"
199 | param {
200 | lr_mult: 1
201 | decay_mult: 1
202 | }
203 | param {
204 | lr_mult: 2
205 | decay_mult: 0
206 | }
207 | convolution_param {
208 | num_output: 64
209 | kernel_size: 7
210 | pad: 3
211 | weight_filler {
212 | type: "msra"
213 | }
214 | bias_filler {
215 | type: "constant"
216 | }
217 | }
218 | }
219 | layer {
220 | bottom: "conv4"
221 | top: "conv4"
222 | name: "conv4_bn"
223 | type: "BN"
224 | bn_param {
225 | bn_mode: INFERENCE
226 | scale_filler {
227 | type: "constant"
228 | value: 1
229 | }
230 | shift_filler {
231 | type: "constant"
232 | value: 0.001
233 | }
234 | }
235 | }
236 | layer {
237 | name: "relu4"
238 | type: "ReLU"
239 | bottom: "conv4"
240 | top: "conv4"
241 | }
242 | layer {
243 | name: "pool4"
244 | type: "Pooling"
245 | bottom: "conv4"
246 | top: "pool4"
247 | top: "pool4_mask"
248 | pooling_param {
249 | pool: MAX
250 | kernel_size: 2
251 | stride: 2
252 | }
253 | }
254 | layer {
255 | name: "upsample4"
256 | type: "Upsample"
257 | bottom: "pool4"
258 | bottom: "pool4_mask"
259 | top: "upsample4"
260 | upsample_param {
261 | scale: 2
262 | }
263 | }
264 | layer {
265 | name: "conv_decode4"
266 | type: "Convolution"
267 | bottom: "upsample4"
268 | top: "conv_decode4"
269 | param {
270 | lr_mult: 1
271 | decay_mult: 1
272 | }
273 | param {
274 | lr_mult: 2
275 | decay_mult: 0
276 | }
277 | convolution_param {
278 | num_output: 64
279 | kernel_size: 7
280 | pad: 3
281 | weight_filler {
282 | type: "msra"
283 | }
284 | bias_filler {
285 | type: "constant"
286 | }
287 | }
288 | }
289 | layer {
290 | bottom: "conv_decode4"
291 | top: "conv_decode4"
292 | name: "conv_decode4_bn"
293 | type: "BN"
294 | bn_param {
295 | bn_mode: INFERENCE
296 | scale_filler {
297 | type: "constant"
298 | value: 1
299 | }
300 | shift_filler {
301 | type: "constant"
302 | value: 0.001
303 | }
304 | }
305 | }
306 | layer {
307 | name: "upsample3"
308 | type: "Upsample"
309 | bottom: "conv_decode4"
310 | bottom: "pool3_mask"
311 | top: "upsample3"
312 | upsample_param {
313 | scale: 2
314 | }
315 | }
316 | layer {
317 | name: "conv_decode3"
318 | type: "Convolution"
319 | bottom: "upsample3"
320 | top: "conv_decode3"
321 | param {
322 | lr_mult: 1
323 | decay_mult: 1
324 | }
325 | param {
326 | lr_mult: 2
327 | decay_mult: 0
328 | }
329 | convolution_param {
330 | num_output: 64
331 | kernel_size: 7
332 | pad: 3
333 | weight_filler {
334 | type: "msra"
335 | }
336 | bias_filler {
337 | type: "constant"
338 | }
339 | }
340 | }
341 | layer {
342 | bottom: "conv_decode3"
343 | top: "conv_decode3"
344 | name: "conv_decode3_bn"
345 | type: "BN"
346 | bn_param {
347 | bn_mode: INFERENCE
348 | scale_filler {
349 | type: "constant"
350 | value: 1
351 | }
352 | shift_filler {
353 | type: "constant"
354 | value: 0.001
355 | }
356 | }
357 | }
358 | layer {
359 | name: "upsample2"
360 | type: "Upsample"
361 | bottom: "conv_decode3"
362 | bottom: "pool2_mask"
363 | top: "upsample2"
364 | upsample_param {
365 | scale: 2
366 | }
367 | }
368 | layer {
369 | name: "conv_decode2"
370 | type: "Convolution"
371 | bottom: "upsample2"
372 | top: "conv_decode2"
373 | param {
374 | lr_mult: 1
375 | decay_mult: 1
376 | }
377 | param {
378 | lr_mult: 2
379 | decay_mult: 0
380 | }
381 | convolution_param {
382 | num_output: 64
383 | kernel_size: 7
384 | pad: 3
385 | weight_filler {
386 | type: "msra"
387 | }
388 | bias_filler {
389 | type: "constant"
390 | }
391 | }
392 | }
393 | layer {
394 | bottom: "conv_decode2"
395 | top: "conv_decode2"
396 | name: "conv_decode2_bn"
397 | type: "BN"
398 | bn_param {
399 | bn_mode: INFERENCE
400 | scale_filler {
401 | type: "constant"
402 | value: 1
403 | }
404 | shift_filler {
405 | type: "constant"
406 | value: 0.001
407 | }
408 | }
409 | }
410 | layer {
411 | name: "upsample1"
412 | type: "Upsample"
413 | bottom: "conv_decode2"
414 | bottom: "pool1_mask"
415 | top: "upsample1"
416 | upsample_param {
417 | scale: 2
418 | }
419 | }
420 | layer {
421 | name: "conv_decode1"
422 | type: "Convolution"
423 | bottom: "upsample1"
424 | top: "conv_decode1"
425 | param {
426 | lr_mult: 1
427 | decay_mult: 1
428 | }
429 | param {
430 | lr_mult: 2
431 | decay_mult: 0
432 | }
433 | convolution_param {
434 | num_output: 64
435 | kernel_size: 7
436 | pad: 3
437 | weight_filler {
438 | type: "msra"
439 | }
440 | bias_filler {
441 | type: "constant"
442 | }
443 | }
444 | }
445 | layer {
446 | bottom: "conv_decode1"
447 | top: "conv_decode1"
448 | name: "conv_decode1_bn"
449 | type: "BN"
450 | bn_param {
451 | bn_mode: INFERENCE
452 | scale_filler {
453 | type: "constant"
454 | value: 1
455 | }
456 | shift_filler {
457 | type: "constant"
458 | value: 0.001
459 | }
460 | }
461 | }
462 | layer {
463 | name: "conv_classifier"
464 | type: "Convolution"
465 | bottom: "conv_decode1"
466 | top: "conv_classifier"
467 | param {
468 | lr_mult: 1
469 | decay_mult: 1
470 | }
471 | param {
472 | lr_mult: 2
473 | decay_mult: 0
474 | }
475 | convolution_param {
476 | num_output: CLASS_COUNT
477 | kernel_size: 1
478 | weight_filler {
479 | type: "msra"
480 | }
481 | bias_filler {
482 | type: "constant"
483 | }
484 | }
485 | }
486 | layer {
487 | name: "prob"
488 | type: "Softmax"
489 | bottom: "conv_classifier"
490 | top: "prob"
491 | softmax_param {engine: CAFFE}
492 | }
493 |
--------------------------------------------------------------------------------
/segnet/templates/segnet_basic_train.prototxt:
--------------------------------------------------------------------------------
1 | name: "segnet"
2 | INPUT
3 | layer {
4 | name: "norm"
5 | type: "LRN"
6 | bottom: "data"
7 | top: "norm"
8 | lrn_param {
9 | local_size: 5
10 | alpha: 0.0001
11 | beta: 0.75
12 | }
13 | }
14 | layer {
15 | name: "conv1"
16 | type: "Convolution"
17 | bottom: "norm"
18 | top: "conv1"
19 | param {
20 | lr_mult: 1
21 | decay_mult: 1
22 | }
23 | param {
24 | lr_mult: 2
25 | decay_mult: 0
26 | }
27 | convolution_param {
28 | num_output: 64
29 | kernel_size: 7
30 | pad: 3
31 | weight_filler {
32 | type: "msra"
33 | }
34 | bias_filler {
35 | type: "constant"
36 | }
37 | }
38 | }
39 | layer {
40 | bottom: "conv1"
41 | top: "conv1"
42 | name: "conv1_bn"
43 | type: "BN"
44 | bn_param {
45 | scale_filler {
46 | type: "constant"
47 | value: 1
48 | }
49 | shift_filler {
50 | type: "constant"
51 | value: 0.001
52 | }
53 | }
54 | }
55 | layer {
56 | name: "relu1"
57 | type: "ReLU"
58 | bottom: "conv1"
59 | top: "conv1"
60 | }
61 | layer {
62 | name: "pool1"
63 | type: "Pooling"
64 | bottom: "conv1"
65 | top: "pool1"
66 | top: "pool1_mask"
67 | pooling_param {
68 | pool: MAX
69 | kernel_size: 2
70 | stride: 2
71 | }
72 | }
73 | layer {
74 | name: "conv2"
75 | type: "Convolution"
76 | bottom: "pool1"
77 | top: "conv2"
78 | param {
79 | lr_mult: 1
80 | decay_mult: 1
81 | }
82 | param {
83 | lr_mult: 2
84 | decay_mult: 0
85 | }
86 | convolution_param {
87 | num_output: 64
88 | kernel_size: 7
89 | pad: 3
90 | weight_filler {
91 | type: "msra"
92 | }
93 | bias_filler {
94 | type: "constant"
95 | }
96 | }
97 | }
98 | layer {
99 | bottom: "conv2"
100 | top: "conv2"
101 | name: "conv2_bn"
102 | type: "BN"
103 | bn_param {
104 | scale_filler {
105 | type: "constant"
106 | value: 1
107 | }
108 | shift_filler {
109 | type: "constant"
110 | value: 0.001
111 | }
112 | }
113 | }
114 | layer {
115 | name: "relu2"
116 | type: "ReLU"
117 | bottom: "conv2"
118 | top: "conv2"
119 | }
120 | layer {
121 | name: "pool2"
122 | type: "Pooling"
123 | bottom: "conv2"
124 | top: "pool2"
125 | top: "pool2_mask"
126 | pooling_param {
127 | pool: MAX
128 | kernel_size: 2
129 | stride: 2
130 | }
131 | }
132 | layer {
133 | name: "conv3"
134 | type: "Convolution"
135 | bottom: "pool2"
136 | top: "conv3"
137 | param {
138 | lr_mult: 1
139 | decay_mult: 1
140 | }
141 | param {
142 | lr_mult: 2
143 | decay_mult: 0
144 | }
145 | convolution_param {
146 | num_output: 64
147 | kernel_size: 7
148 | pad: 3
149 | weight_filler {
150 | type: "msra"
151 | }
152 | bias_filler {
153 | type: "constant"
154 | }
155 | }
156 | }
157 | layer {
158 | bottom: "conv3"
159 | top: "conv3"
160 | name: "conv3_bn"
161 | type: "BN"
162 | bn_param {
163 | scale_filler {
164 | type: "constant"
165 | value: 1
166 | }
167 | shift_filler {
168 | type: "constant"
169 | value: 0.001
170 | }
171 | }
172 | }
173 | layer {
174 | name: "relu3"
175 | type: "ReLU"
176 | bottom: "conv3"
177 | top: "conv3"
178 | }
179 | layer {
180 | name: "pool3"
181 | type: "Pooling"
182 | bottom: "conv3"
183 | top: "pool3"
184 | top: "pool3_mask"
185 | pooling_param {
186 | pool: MAX
187 | kernel_size: 2
188 | stride: 2
189 | }
190 | }
191 | layer {
192 | name: "conv4"
193 | type: "Convolution"
194 | bottom: "pool3"
195 | top: "conv4"
196 | param {
197 | lr_mult: 1
198 | decay_mult: 1
199 | }
200 | param {
201 | lr_mult: 2
202 | decay_mult: 0
203 | }
204 | convolution_param {
205 | num_output: 64
206 | kernel_size: 7
207 | pad: 3
208 | weight_filler {
209 | type: "msra"
210 | }
211 | bias_filler {
212 | type: "constant"
213 | }
214 | }
215 | }
216 | layer {
217 | bottom: "conv4"
218 | top: "conv4"
219 | name: "conv4_bn"
220 | type: "BN"
221 | bn_param {
222 | scale_filler {
223 | type: "constant"
224 | value: 1
225 | }
226 | shift_filler {
227 | type: "constant"
228 | value: 0.001
229 | }
230 | }
231 | }
232 | layer {
233 | name: "relu4"
234 | type: "ReLU"
235 | bottom: "conv4"
236 | top: "conv4"
237 | }
238 | layer {
239 | name: "pool4"
240 | type: "Pooling"
241 | bottom: "conv4"
242 | top: "pool4"
243 | top: "pool4_mask"
244 | pooling_param {
245 | pool: MAX
246 | kernel_size: 2
247 | stride: 2
248 | }
249 | }
250 | layer {
251 | name: "upsample4"
252 | type: "Upsample"
253 | bottom: "pool4"
254 | bottom: "pool4_mask"
255 | top: "upsample4"
256 | upsample_param {
257 | scale: 2
258 | }
259 | }
260 | layer {
261 | name: "conv_decode4"
262 | type: "Convolution"
263 | bottom: "upsample4"
264 | top: "conv_decode4"
265 | param {
266 | lr_mult: 1
267 | decay_mult: 1
268 | }
269 | param {
270 | lr_mult: 2
271 | decay_mult: 0
272 | }
273 | convolution_param {
274 | num_output: 64
275 | kernel_size: 7
276 | pad: 3
277 | weight_filler {
278 | type: "msra"
279 | }
280 | bias_filler {
281 | type: "constant"
282 | }
283 | }
284 | }
285 | layer {
286 | bottom: "conv_decode4"
287 | top: "conv_decode4"
288 | name: "conv_decode4_bn"
289 | type: "BN"
290 | bn_param {
291 | scale_filler {
292 | type: "constant"
293 | value: 1
294 | }
295 | shift_filler {
296 | type: "constant"
297 | value: 0.001
298 | }
299 | }
300 | }
301 | layer {
302 | name: "upsample3"
303 | type: "Upsample"
304 | bottom: "conv_decode4"
305 | bottom: "pool3_mask"
306 | top: "upsample3"
307 | upsample_param {
308 | scale: 2
309 | }
310 | }
311 | layer {
312 | name: "conv_decode3"
313 | type: "Convolution"
314 | bottom: "upsample3"
315 | top: "conv_decode3"
316 | param {
317 | lr_mult: 1
318 | decay_mult: 1
319 | }
320 | param {
321 | lr_mult: 2
322 | decay_mult: 0
323 | }
324 | convolution_param {
325 | num_output: 64
326 | kernel_size: 7
327 | pad: 3
328 | weight_filler {
329 | type: "msra"
330 | }
331 | bias_filler {
332 | type: "constant"
333 | }
334 | }
335 | }
336 | layer {
337 | bottom: "conv_decode3"
338 | top: "conv_decode3"
339 | name: "conv_decode3_bn"
340 | type: "BN"
341 | bn_param {
342 | scale_filler {
343 | type: "constant"
344 | value: 1
345 | }
346 | shift_filler {
347 | type: "constant"
348 | value: 0.001
349 | }
350 | }
351 | }
352 | layer {
353 | name: "upsample2"
354 | type: "Upsample"
355 | bottom: "conv_decode3"
356 | bottom: "pool2_mask"
357 | top: "upsample2"
358 | upsample_param {
359 | scale: 2
360 | }
361 | }
362 | layer {
363 | name: "conv_decode2"
364 | type: "Convolution"
365 | bottom: "upsample2"
366 | top: "conv_decode2"
367 | param {
368 | lr_mult: 1
369 | decay_mult: 1
370 | }
371 | param {
372 | lr_mult: 2
373 | decay_mult: 0
374 | }
375 | convolution_param {
376 | num_output: 64
377 | kernel_size: 7
378 | pad: 3
379 | weight_filler {
380 | type: "msra"
381 | }
382 | bias_filler {
383 | type: "constant"
384 | }
385 | }
386 | }
387 | layer {
388 | bottom: "conv_decode2"
389 | top: "conv_decode2"
390 | name: "conv_decode2_bn"
391 | type: "BN"
392 | bn_param {
393 | scale_filler {
394 | type: "constant"
395 | value: 1
396 | }
397 | shift_filler {
398 | type: "constant"
399 | value: 0.001
400 | }
401 | }
402 | }
403 | layer {
404 | name: "upsample1"
405 | type: "Upsample"
406 | bottom: "conv_decode2"
407 | bottom: "pool1_mask"
408 | top: "upsample1"
409 | upsample_param {
410 | scale: 2
411 | }
412 | }
413 | layer {
414 | name: "conv_decode1"
415 | type: "Convolution"
416 | bottom: "upsample1"
417 | top: "conv_decode1"
418 | param {
419 | lr_mult: 1
420 | decay_mult: 1
421 | }
422 | param {
423 | lr_mult: 2
424 | decay_mult: 0
425 | }
426 | convolution_param {
427 | num_output: 64
428 | kernel_size: 7
429 | pad: 3
430 | weight_filler {
431 | type: "msra"
432 | }
433 | bias_filler {
434 | type: "constant"
435 | }
436 | }
437 | }
438 | layer {
439 | bottom: "conv_decode1"
440 | top: "conv_decode1"
441 | name: "conv_decode1_bn"
442 | type: "BN"
443 | bn_param {
444 | scale_filler {
445 | type: "constant"
446 | value: 1
447 | }
448 | shift_filler {
449 | type: "constant"
450 | value: 0.001
451 | }
452 | }
453 | }
454 | layer {
455 | name: "conv_classifier"
456 | type: "Convolution"
457 | bottom: "conv_decode1"
458 | top: "conv_classifier"
459 | param {
460 | lr_mult: 1
461 | decay_mult: 1
462 | }
463 | param {
464 | lr_mult: 2
465 | decay_mult: 0
466 | }
467 | convolution_param {
468 | num_output: CLASS_COUNT
469 | kernel_size: 1
470 | weight_filler {
471 | type: "msra"
472 | }
473 | bias_filler {
474 | type: "constant"
475 | }
476 | }
477 | }
478 | layer {
479 | name: "loss"
480 | type: "SoftmaxWithLoss"
481 | bottom: "conv_classifier"
482 | bottom: "label"
483 | top: "loss"
484 | softmax_param {engine: CAFFE}
485 | loss_param: {
486 | weight_by_label_freqs: true
487 | CLASS_WEIGHTING
488 | }
489 | }
490 | layer {
491 | name: "accuracy"
492 | type: "Accuracy"
493 | bottom: "conv_classifier"
494 | bottom: "label"
495 | accuracy_param {
496 | }
497 | top: "accuracy"
498 | top: "per_class_accuracy"
499 | }
500 |
--------------------------------------------------------------------------------
/segnet/templates/solver.prototxt:
--------------------------------------------------------------------------------
1 | net: "TRAIN_NET"
2 | test_initialization: false
3 | test_iter: 1
4 | test_interval: 1000000
5 | base_lr: BASE_LR
6 | lr_policy: "step"
7 | gamma: GAMMA
8 | stepsize: STEPSIZE
9 | weight_decay: 0.0005
10 | momentum: 0.9
11 | display: 50
12 | max_iter: MAX_ITER
13 | snapshot: SNAPSHOT_FREQUENCY
14 | snapshot_prefix: "SNAPSHOT_PREFIX"
15 | solver_mode: GPU
16 |
--------------------------------------------------------------------------------
/segnet/test_segmentation.py:
--------------------------------------------------------------------------------
1 | # https://github.com/BVLC/caffe/issues/861
2 | import matplotlib
3 | matplotlib.use('Agg')
4 |
5 | import numpy as np
6 | import os.path
7 | import re
8 | import json
9 | import scipy
10 | import argparse
11 | caffe_root = os.getenv('CAFFE_ROOT', '/home/ubuntu/caffe-segnet/')
12 | import sys
13 | sys.path.insert(0, caffe_root + 'python')
14 |
15 | import caffe
16 |
17 | from metrics import complete_and_correct
18 | from inference import predict
19 | from inference import labels_to_image
20 |
21 |
22 | # Import arguments
23 | parser = argparse.ArgumentParser()
24 | parser.add_argument('--model', type=str, required=True)
25 | parser.add_argument('--weights', type=str, required=True)
26 | parser.add_argument('--output', type=str, required=True)
27 | parser.add_argument('--classes', type=str, required=True)
28 | parser.add_argument('--metrics-only', default=False, action='store_true')
29 | parser.add_argument('--gpu', type=int, default=None)
30 | args = parser.parse_args()
31 |
32 | with open(args.classes) as classes:
33 | colors = map(lambda x: x['color'][1:], json.load(classes))
34 | colors.append('000000')
35 | colors = map(lambda rgbstr: tuple(map(ord, rgbstr.decode('hex'))), colors)
36 |
37 | num_classes = len(colors)
38 |
39 | model = open(args.model, 'r').read()
40 | m = re.search('source:\s*"(.*)"', model)
41 | test_data = m.group(1)
42 | test_data = open(test_data, 'r').readlines()
43 |
44 | caffe.set_mode_gpu()
45 | if args.gpu is not None:
46 | caffe.set_device(args.gpu)
47 |
48 | net = caffe.Net(args.model,
49 | args.weights,
50 | caffe.TEST)
51 |
52 | pixels_correct = 0
53 | pixels_complete = 0
54 | pixels_predicted_fg = 0
55 | pixels_actual_fg = 0
56 | outputs = []
57 |
58 | for i in range(0, len(test_data)):
59 | prediction_image = predict(net, colors)
60 |
61 | image = net.blobs['data'].data
62 | image = np.squeeze(image[0, :, :, :])
63 | label = net.blobs['label'].data
64 | label = np.squeeze(label).astype(np.uint8)
65 | predicted = net.blobs['prob'].data
66 | predicted = np.squeeze(predicted[0, :, :, :])
67 |
68 | metrics = complete_and_correct(predicted, label, 3, 0.5)
69 |
70 | result = {'index': i, 'metrics': metrics}
71 | print(result)
72 | outputs.append(result)
73 | pixels_correct += sum(metrics['pixels_correct'])
74 | pixels_complete += sum(metrics['pixels_complete'])
75 | pixels_predicted_fg += sum(metrics['pixels_predicted'][:-1])
76 | pixels_actual_fg += sum(metrics['pixels_actual'][:-1])
77 | if args.metrics_only:
78 | continue
79 |
80 | image = np.transpose(image, (1, 2, 0))
81 | image = image[:, :, (2, 1, 0)]
82 |
83 | prediction = str(i) + '_prediction.png'
84 | input_image = str(i) + '_input.png'
85 | groundtruth = str(i) + '_groundtruth.png'
86 |
87 | prediction_image.save(os.path.join(args.output, prediction))
88 | labels_to_image(label, colors).save(os.path.join(args.output, groundtruth))
89 | scipy.misc.toimage(image, cmin=0.0, cmax=255).save(
90 | os.path.join(args.output, input_image))
91 |
92 | result['input'] = input_image
93 | result['prediction'] = prediction
94 | result['groundtruth'] = groundtruth
95 | result['test_data'] = test_data[i]
96 |
97 | with open(os.path.join(args.output, 'index.json'), 'w') as outfile:
98 | json.dump({
99 | 'correctness': pixels_correct / pixels_predicted_fg,
100 | 'completeness': pixels_complete / pixels_actual_fg,
101 | 'images': outputs
102 | }, outfile)
103 |
104 | print 'Success!'
105 |
--------------------------------------------------------------------------------
/segnet/tile_server.py:
--------------------------------------------------------------------------------
1 | # https://github.com/BVLC/caffe/issues/861
2 | import matplotlib
3 | matplotlib.use('Agg')
4 |
5 | import re
6 | import os.path
7 | import sys
8 | from flask import Flask
9 | from flask import request
10 | from flask import send_file
11 | from flask import jsonify
12 | from flask import abort
13 | import requests
14 | import numpy as np
15 | from PIL import Image
16 | import argparse
17 | import json
18 | import StringIO
19 | from boto3.session import Session
20 | caffe_root = os.getenv('CAFFE_ROOT', '/home/ubuntu/caffe-segnet/')
21 | sys.path.insert(0, caffe_root + 'python')
22 | import caffe
23 |
24 | from inference import predict
25 |
26 | app = Flask(__name__)
27 | app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 300
28 | aws_session = Session()
29 | s3 = aws_session.client('s3')
30 |
31 | # Import arguments
32 | cpu_only_env = bool(os.getenv('SKYNET_CPU_ONLY', False))
33 | parser = argparse.ArgumentParser()
34 | parser.add_argument('image_tiles', type=str)
35 | parser.add_argument('--model', type=str, default='/model/segnet_deploy.prototxt')
36 | parser.add_argument('--weights', type=str, default='/model/weights.caffemodel')
37 | parser.add_argument('--classes', type=str, default='/model/classes.json')
38 | parser.add_argument('--cpu-mode', action='store_true', default=cpu_only_env)
39 | parser.add_argument('--min-zoom', type=int, default=0)
40 | parser.add_argument('--max-zoom', type=int, default=18)
41 | args = parser.parse_args()
42 |
43 |
44 | def resolve_s3(s3uri):
45 | match = re.search('s3://([^/]*)/(.*)$', s3uri)
46 | if not match:
47 | return s3uri
48 | bucket = match.group(1)
49 | key = match.group(2)
50 | target = '/model/' + os.path.basename(key)
51 | if not os.path.isfile(target):
52 | print('downloading ' + s3uri + ' to ' + target)
53 | s3.download_file(bucket, key, target)
54 | else:
55 | print(s3uri + ' appears to have already been downloaded to ' + target +
56 | '; using local copy.')
57 | return target
58 |
59 |
60 | model_file = resolve_s3(args.model)
61 | weights_file = resolve_s3(args.weights)
62 | classes_file = resolve_s3(args.classes)
63 |
64 |
65 | # read classes metadata
66 | with open(classes_file) as classes:
67 | colors = map(lambda x: x['color'][1:], json.load(classes))
68 | colors.append('000000')
69 | colors = map(lambda rgbstr: tuple(map(ord, rgbstr.decode('hex'))), colors)
70 | num_classes = len(colors)
71 |
72 | # read model definition
73 | model = open(model_file, 'r').read()
74 |
75 | # create net
76 | if args.cpu_mode:
77 | caffe.set_mode_cpu()
78 | else:
79 | caffe.set_mode_gpu()
80 |
81 | net = caffe.Net(model_file,
82 | weights_file,
83 | caffe.TEST)
84 |
85 |
86 | @app.route('/index.json')
87 | def index():
88 | return jsonify(tilejson='2.0.0',
89 | tiles=[request.url.replace('index.json', '{z}/{x}/{y}/tile.png')],
90 | minzoom=args.min_zoom,
91 | maxzoom=args.max_zoom)
92 |
93 |
94 | def send_prediction(im):
95 | bands = len(im.getbands())
96 | imdata = np.array(im.getdata()).reshape(im.size[0], im.size[1], bands)
97 | prediction = predict(net, colors, imdata)
98 | strio = StringIO.StringIO()
99 | prediction.save(strio, 'PNG')
100 | strio.seek(0)
101 | return send_file(strio, mimetype='image/png')
102 |
103 |
104 | @app.route('////tile.png')
105 | def tile(x, y, z):
106 | if z > args.max_zoom or z < args.min_zoom:
107 | return abort(404)
108 | image_url = args.image_tiles.replace('{x}', str(x)).replace('{y}', str(y)).replace('{z}', str(z))
109 | resp = requests.get(image_url)
110 | return send_prediction(Image.open(StringIO.StringIO(resp.content)))
111 |
112 |
113 | @app.route('/predict', methods=['POST'])
114 | def pred():
115 | return send_prediction(Image.open(request.files['image']))
116 |
117 |
118 | app.run(host='0.0.0.0')
119 |
--------------------------------------------------------------------------------
/segnet/train:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | import os
4 | import re
5 | import math
6 | import glob
7 | import argparse
8 | import subprocess
9 | from google.protobuf import text_format
10 |
11 | caffe_root = os.getenv('CAFFE_ROOT', '/home/ubuntu/caffe-segnet/')
12 | import sys # nopep8
13 | sys.path.insert(0, caffe_root + 'python')
14 | from caffe.proto import caffe_pb2 # nopep8
15 |
16 | caffe_binary = os.path.join(caffe_root, 'build/tools/caffe')
17 | scripts = os.path.dirname(os.path.realpath(__file__))
18 | setup_model = os.path.join(scripts, 'setup-model')
19 | run_test = os.path.join(scripts, 'run-test')
20 | plot_log = os.path.normpath(os.path.join(scripts, '../util/plot_training_log.py'))
21 |
22 | parser = argparse.ArgumentParser()
23 | parser.add_argument('--model', type=str, default='segnet')
24 | parser.add_argument('--output', type=str, default='/output')
25 | parser.add_argument('--data', type=str, default='/data')
26 | parser.add_argument('--fetch-data', type=str)
27 | parser.add_argument('--snapshot', type=int, default=10000)
28 | parser.add_argument('--iterations', type=int, default=100000)
29 | parser.add_argument('--cpu', action='store_true')
30 | parser.add_argument('--gpu', type=int, nargs='*', default=[0])
31 | parser.add_argument('--batch-size', type=int)
32 | parser.add_argument('--crop-size', type=int)
33 | parser.add_argument('--lr-step-size', type=int, default=30)
34 | parser.add_argument('--display-frequency', type=int, default=100)
35 | parser.add_argument('--sync', type=str)
36 | args = parser.parse_args()
37 |
38 | snapshot_dir = os.path.join(args.output, 'snapshots')
39 | snapshot_prefix = os.path.join(snapshot_dir, args.model)
40 |
41 | # hacky and brittle!
42 | solver_types = caffe_pb2.SolverParameter.SolverType.DESCRIPTOR.values_by_name
43 |
44 |
45 | def setup_solver(train_net_path, epoch, previous_solver=None, **kwargs):
46 | defaults = {
47 | 'solver_type': 'SGD',
48 | 'iter_size': 1,
49 | 'max_iter': 100000,
50 | 'base_lr': 0.01,
51 | 'lr_policy': 'step',
52 | 'gamma': 0.25,
53 | 'stepsize': 30 * epoch,
54 | 'momentum': 0.9,
55 | 'weight_decay': 0.0005,
56 | 'display': args.display_frequency,
57 | 'snapshot': 10000,
58 | 'snapshot_prefix': snapshot_prefix,
59 | 'solver_mode': caffe_pb2.SolverParameter.GPU
60 | }
61 | s = caffe_pb2.SolverParameter()
62 | if previous_solver:
63 | previous_solver = open(previous_solver, 'r').read()
64 | text_format.Merge(previous_solver, s)
65 | for key, value in kwargs.iteritems():
66 | if key == 'solver_type':
67 | value = solver_types[value].number
68 | if key == 'stepsize':
69 | value = value * epoch
70 |
71 | setattr(s, key, value)
72 | else:
73 | for key, default_value in defaults.iteritems():
74 | value = kwargs.get(key, default_value)
75 | if key == 'solver_type':
76 | value = solver_types[value].number
77 | if key == 'stepsize':
78 | value = value * epoch
79 | setattr(s, key, value)
80 | s.train_net = train_net_path
81 | return str(s)
82 |
83 |
84 | def snapshot_run(starting_snapshot_iteration, snapshot_iterations):
85 | train_args = [caffe_binary, 'train']
86 | if starting_snapshot_iteration > 0:
87 | train_args.append('-snapshot')
88 | train_args.append('%s_iter_%s.solverstate' % (snapshot_prefix, starting_snapshot_iteration))
89 |
90 | parallel = 1
91 | if not args.cpu:
92 | train_args.append('-gpu')
93 | train_args.append(','.join(map(str, args.gpu)))
94 | parallel = len(args.gpu)
95 |
96 | # Set up solver_START-END.prototxt file
97 | previous_solver = None
98 | if starting_snapshot_iteration > 0:
99 | solvers = glob.glob(os.path.join(args.output, 'solver*-%s.prototxt' % starting_snapshot_iteration))
100 | assert len(solvers) > 0, "Could not find previous solver prototxt for iteration %s" % starting_snapshot_iteration
101 | previous_solver = solvers[0]
102 |
103 | max_iterations = snapshot_iterations * (1 + starting_snapshot_iteration / snapshot_iterations)
104 | assert max_iterations % snapshot_iterations == 0
105 | epoch = int(math.ceil(float(train_size) / batch_size / parallel))
106 | solver = setup_solver(train_net_path, epoch, previous_solver,
107 | max_iter=max_iterations,
108 | snapshot=snapshot_iterations,
109 | stepsize=args.lr_step_size)
110 |
111 | solver_file = 'solver_%s-%s.prototxt' % (starting_snapshot_iteration, max_iterations)
112 | solver_path = os.path.join(args.output, solver_file)
113 | with open(solver_path, 'w') as f:
114 | f.write(str(solver))
115 |
116 | train_args.append('-solver')
117 | train_args.append(solver_path)
118 |
119 | logfile = 'train_%s-%s.log' % (starting_snapshot_iteration, max_iterations)
120 | logfile = os.path.join(args.output, logfile)
121 | print('Training: %s' % ' '.join(train_args))
122 | with open(logfile, 'w') as f:
123 | exitcode = subprocess.call(train_args, stdout=f, stderr=subprocess.STDOUT)
124 |
125 | if exitcode != 0:
126 | print(open(logfile, 'r').read())
127 | sys.exit(exitcode)
128 |
129 | print('Finished training to snapshot iteration %s' % max_iterations)
130 | return snapshot_prefix + '_iter_' + str(max_iterations) + '.caffemodel'
131 |
132 |
133 | def test_run(snapshot, iteration):
134 | logfile = 'test_%s.log' % iteration
135 | logfile = os.path.join(args.output, logfile)
136 | test_args = [run_test,
137 | '--output', snapshot.replace('caffemodel', 'results'),
138 | '--train', train_net_path,
139 | '--weights', snapshot,
140 | '--classes', os.path.join(args.data, 'classes.json')]
141 |
142 | # If we're running on a single GPU, then pass its id to the test-runner
143 | # scripts as well. Otherwise, if multiple training runs are happening on
144 | # a single machine, they would all run their tests using GPU 0, and fail
145 | if not args.cpu and len(args.gpu) == 1:
146 | test_args.append('--gpu')
147 | test_args.append(','.join(map(str, args.gpu)))
148 |
149 | print('Testing: %s' % ' '.join(test_args))
150 | with open(logfile, 'w') as f:
151 | exitcode = subprocess.call(test_args, stdout=f, stderr=subprocess.STDOUT)
152 |
153 | if exitcode != 0:
154 | print(open(logfile, 'r').read())
155 | sys.exit(exitcode)
156 |
157 |
158 | # Fetch training data if it doesn't exist
159 | if not os.path.exists(args.data):
160 | if not args.fetch_data:
161 | raise 'No data at % and --fetch-data is not set.'
162 | subprocess.call(['aws', 's3', 'sync', args.fetch_data, args.data])
163 |
164 |
165 | # Create the network prototxt files
166 | if not os.path.exists(os.path.join(args.output, args.model + '_train.prototxt')):
167 | setup_model_cmd = [setup_model,
168 | '--model', args.model,
169 | '--data', args.data,
170 | '--output', args.output,
171 | ]
172 | if args.batch_size:
173 | setup_model_cmd.append('--batch-size')
174 | setup_model_cmd.append(str(args.batch_size))
175 | if args.crop_size:
176 | setup_model_cmd.append('--crop')
177 | setup_model_cmd.append(str(args.crop_size))
178 | print('Set up model: %s' % ' '.join(setup_model_cmd))
179 | subprocess.call(setup_model_cmd)
180 |
181 | # determine the size of the training set and the batch size
182 | train_net_path = os.path.join(args.output, args.model + '_train.prototxt')
183 | test_net_path = os.path.join(args.output, args.model + '_inference.prototxt')
184 | with open(train_net_path, 'r') as f:
185 | batch_size = re.search('batch_size:\s*(\d+)', f.read())
186 | batch_size = int(batch_size.group(1))
187 | train_size = sum(1 for line in open(os.path.join(args.data, 'train.txt')))
188 |
189 | # Look for a snapshot
190 | if args.sync:
191 | subprocess.call(['aws', 's3', 'sync', args.sync, args.output])
192 |
193 | # Determine which, if any, snapshot we're starting from
194 | current_iteration = 0
195 | initial_snapshot = None
196 | if os.path.isdir(snapshot_dir):
197 | # If there are snapshots, grab the latest one.
198 | solver_states = glob.glob(os.path.join(snapshot_dir, args.model + '*.solverstate'))
199 | # parse iteration numbers from the snapshot filenames
200 | snaps = map(lambda s: re.search('(\d+)\.solverstate', s), solver_states)
201 | snaps = map(lambda m: int(m.group(1)), filter(lambda m: m, snaps))
202 | snaps.sort()
203 | if len(snaps) > 0:
204 | current_iteration = snaps[-1]
205 | initial_snapshot = solver_states[-1].replace('solverstate', 'caffemodel')
206 | else:
207 | os.mkdir(snapshot_dir)
208 |
209 | # If there was a previous snapshot and it doesn't have an associated test
210 | # results dir, then run that test before we resume training.
211 | if initial_snapshot is not None:
212 | previous_results_dir = initial_snapshot.replace('caffemodel', 'results')
213 | if not os.path.exists(previous_results_dir):
214 | test_run(initial_snapshot, current_iteration)
215 |
216 | # Run the training/testing loop
217 | assert args.snapshot > 0
218 | while current_iteration < args.iterations:
219 | # Train for args.snapshot iterations
220 | snapshot = snapshot_run(current_iteration, args.snapshot)
221 | current_iteration += args.snapshot
222 | # Plot the loss curve
223 | logs_to_plot = glob.glob(os.path.join(args.output, 'train*.log'))
224 | logs_to_plot = map(lambda p: os.path.relpath(p, args.output), logs_to_plot)
225 | subprocess.call([plot_log] + logs_to_plot, cwd=args.output)
226 | # Test the net
227 | test_run(snapshot, current_iteration)
228 | # Sync to S3
229 | if args.sync:
230 | print('Syncing back to %s' % args.sync)
231 | subprocess.call(['aws', 's3', 'sync', args.output, args.sync])
232 |
--------------------------------------------------------------------------------
/segnet/vectorize.py:
--------------------------------------------------------------------------------
1 | """
2 | This file contains code copied and modified from 'beachfront-py', which is
3 | under the following license:
4 |
5 | ---
6 |
7 | beachfront-py
8 | https://github.com/venicegeo/beachfront-py
9 | Copyright 2016, RadiantBlue Technologies, Inc.
10 | Licensed under the Apache License, Version 2.0 (the "License");
11 | you may not use this file except in compliance with the License.
12 | You may obtain a copy of the License at
13 | http://www.apache.org/licenses/LICENSE-2.0
14 | Unless required by applicable law or agreed to in writing, software
15 | distributed under the License is distributed on an "AS IS" BASIS,
16 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17 | See the License for the specific language governing permissions and
18 | limitations under the License.
19 | """
20 |
21 | import numpy
22 | import potrace as _potrace
23 | from PIL import Image
24 | import click
25 | import json
26 |
27 |
28 | def lines_to_features(lines, source='imagery'):
29 | """ Create features from lines """
30 | gid = 0
31 | features = []
32 | for line in lines:
33 | feature = {
34 | 'type': 'Feature',
35 | 'geometry': {
36 | 'type': 'Polygon',
37 | 'coordinates': [line]
38 | },
39 | 'properties': {
40 | 'id': gid,
41 | 'source': source
42 | }
43 | }
44 | features.append(feature)
45 | gid += 1
46 | return features
47 |
48 |
49 | def to_geojson(lines, source='imagery'):
50 | geojson = {
51 | 'type': 'FeatureCollection',
52 | 'features': lines_to_features(lines, source=source),
53 | }
54 | return geojson
55 |
56 |
57 | def potrace_array(arr, turdsize=10.0, tolerance=0.2):
58 | """ Trace numpy array using potrace """
59 | bmp = _potrace.Bitmap(arr)
60 | polines = bmp.trace(turdsize=turdsize, turnpolicy=_potrace.TURNPOLICY_WHITE,
61 | alphamax=0.0, opticurve=1.0, opttolerance=tolerance)
62 | lines = []
63 | for line in polines:
64 | lines.append(line.tesselate().tolist())
65 |
66 | return lines
67 |
68 |
69 | def vectorize(img_file, turdsize=10.0, tolerance=0.2):
70 | img = Image.open(img_file)
71 | arr = numpy.asarray(img)
72 | arr = numpy.any(arr[:, :, :-1], axis=2)
73 | lines = potrace_array(arr, turdsize, tolerance)
74 | return to_geojson(lines)
75 |
76 |
77 | @click.command()
78 | @click.argument('img_file', default='-')
79 | def vectorize_cmd(img_file):
80 | click.echo(json.dumps(vectorize(img_file)))
81 |
82 |
83 | if __name__ == '__main__':
84 | vectorize_cmd()
85 |
--------------------------------------------------------------------------------
/start_instance:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env node
2 |
3 | var spawn = require('child_process').spawn
4 | var argv = require('minimist')(process.argv.slice(2), {
5 | alias: {
6 | r: 'region',
7 | i: 'ssh-keypath',
8 | t: 'instance-type',
9 | iam: 'iam-role'
10 | },
11 | default: {
12 | region: 'us-east-1',
13 | 'instance-type': 'p2.xlarge',
14 | 'iam-role': null
15 | }
16 | })
17 |
18 | if (!argv['ssh-keypath'] || argv._.length !== 1) {
19 | console.log('Usage: ' + process.argv[1] + ' --ssh-keypath /path/to/private/key [--instance-type p2.xlarge] [--region us-east-1] [--iam-role IAM_role] machine_name')
20 | process.exit(1)
21 | }
22 |
23 | var name = argv._[0]
24 |
25 | var args = [
26 | 'create',
27 | '--driver', 'amazonec2',
28 | '--amazonec2-region', argv.region,
29 | '--amazonec2-tags', 'Project,skynet',
30 | '--amazonec2-instance-type', argv['instance-type'],
31 | '--amazonec2-ssh-keypath', argv['ssh-keypath'],
32 | ]
33 |
34 | if (argv['iam-role']) {
35 | args.push('--amazonec2-iam-instance-profile', argv['iam-role'])
36 | }
37 |
38 | args.push(name)
39 |
40 | console.log('Creating ' + argv['instance-type'] + ' instance.')
41 | spawn('docker-machine', args, {stdio: 'inherit'})
42 | .on('exit', function (code) {
43 | if (code && code !== 0) {
44 | process.exit(code)
45 | }
46 |
47 | console.log('Installing NVIDIA drivers and nvidia-docker on instance.')
48 |
49 | var command = [
50 | // from https://github.com/NVIDIA/nvidia-docker/wiki/Deploy-on-Amazon-EC2
51 | // Install NVIDIA drivers 361.42
52 | 'sudo apt-get install --no-install-recommends -y gcc make libc-dev',
53 | 'wget -P /tmp http://us.download.nvidia.com/XFree86/Linux-x86_64/361.42/NVIDIA-Linux-x86_64-361.42.run',
54 | 'sudo sh /tmp/NVIDIA-Linux-x86_64-361.42.run --silent',
55 | // Install nvidia-docker and nvidia-docker-plugin
56 | 'wget -P /tmp https://github.com/NVIDIA/nvidia-docker/releases/download/v1.0.1/nvidia-docker_1.0.1-1_amd64.deb',
57 | 'sudo dpkg -i /tmp/nvidia-docker*.deb && rm /tmp/nvidia-docker*.deb',
58 |
59 | // Install awscli
60 | 'sudo apt-get install -y awscli'
61 | ].join(' && ')
62 | spawn('docker-machine', ['ssh', name, command], {stdio: 'inherit'})
63 | .on('exit', function (code) {
64 | if (!code) { console.log('Success!') }
65 | process.exit(code)
66 | })
67 | })
68 |
--------------------------------------------------------------------------------
/start_spot_instance:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env node
2 |
3 | var spawn = require('child_process').spawn
4 | var argv = require('minimist')(process.argv.slice(2), {
5 | alias: {
6 | r: 'region',
7 | i: 'ssh-keypath',
8 | t: 'instance-type',
9 | iam: 'iam-role',
10 | p: 'price'
11 | },
12 | default: {
13 | region: 'us-east-1',
14 | 'instance-type': 'p2.xlarge',
15 | 'iam-role': null,
16 | 'price': '0.90'
17 | }
18 | })
19 |
20 | if (!argv['ssh-keypath'] || argv._.length !== 1) {
21 | console.log('Usage: ' + process.argv[1] + ' --ssh-keypath /path/to/private/key [--instance-type p2.xlarge] [--region us-east-1] [--iam-role IAM_role] [--price 0.90] machine_name')
22 | process.exit(1)
23 | }
24 |
25 | var name = argv._[0]
26 |
27 | var args = [
28 | 'create',
29 | '--driver', 'amazonec2',
30 | '--amazonec2-region', argv.region,
31 | '--amazonec2-tags', 'Project,skynet',
32 | '--amazonec2-instance-type', argv['instance-type'],
33 | '--amazonec2-ssh-keypath', argv['ssh-keypath'],
34 | '--amazonec2-request-spot-instance',
35 | '--amazonec2-spot-price', argv['price']
36 | ]
37 |
38 | if (argv['iam-role']) {
39 | args.push('--amazonec2-iam-instance-profile', argv['iam-role'])
40 | }
41 |
42 | args.push(name)
43 |
44 | console.log('Creating ' + argv['instance-type'] + ' instance.')
45 | spawn('docker-machine', args, {stdio: 'inherit'})
46 | .on('exit', function (code) {
47 | if (code && code !== 0) {
48 | process.exit(code)
49 | }
50 |
51 | console.log('Installing NVIDIA drivers and nvidia-docker on instance.')
52 |
53 | var command = [
54 | // from https://github.com/NVIDIA/nvidia-docker/wiki/Deploy-on-Amazon-EC2
55 | // Install NVIDIA drivers 361.42
56 | 'sudo apt-get install --no-install-recommends -y gcc make libc-dev',
57 | 'wget -P /tmp http://us.download.nvidia.com/XFree86/Linux-x86_64/361.42/NVIDIA-Linux-x86_64-361.42.run',
58 | 'sudo sh /tmp/NVIDIA-Linux-x86_64-361.42.run --silent',
59 | // Install nvidia-docker and nvidia-docker-plugin
60 | 'wget -P /tmp https://github.com/NVIDIA/nvidia-docker/releases/download/v1.0.1/nvidia-docker_1.0.1-1_amd64.deb',
61 | 'sudo dpkg -i /tmp/nvidia-docker*.deb && rm /tmp/nvidia-docker*.deb',
62 |
63 | // Install awscli
64 | 'sudo apt-get install -y awscli'
65 | ].join(' && ')
66 | spawn('docker-machine', ['ssh', name, command], {stdio: 'inherit'})
67 | .on('exit', function (code) {
68 | if (!code) { console.log('Success!') }
69 | process.exit(code)
70 | })
71 | })
72 |
--------------------------------------------------------------------------------
/user_data.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | ## Install docker (ubuntu Xenial 16.04 (LTS))
4 | ## https://docs.docker.com/engine/installation/linux/ubuntulinux/
5 | apt-get update
6 | apt-get install apt-transport-https ca-certificates -y
7 | apt-key adv \
8 | --keyserver hkp://ha.pool.sks-keyservers.net:80 \
9 | --recv-keys 58118E89F3A912897C070ADBF76221572C52609D
10 |
11 | echo "deb https://apt.dockerproject.org/repo ubuntu-xenial main" | tee /etc/apt/sources.list.d/docker.list
12 | apt-get update
13 | apt-get install linux-image-extra-$(uname -r) linux-image-extra-virtual -y
14 | apt-get update
15 | apt-get install docker-engine -y
16 | service docker start
17 |
18 | # Install NVIDIA drivers 361.42
19 | # Install nvidia-docker and nvidia-docker-plugin
20 | # https://github.com/NVIDIA/nvidia-docker/wiki/Deploy-on-Amazon-EC2
21 | apt-get install --no-install-recommends -y gcc make libc-dev
22 | wget -P /tmp http://us.download.nvidia.com/XFree86/Linux-x86_64/361.42/NVIDIA-Linux-x86_64-361.42.run
23 | wget -P /tmp https://github.com/NVIDIA/nvidia-docker/releases/download/v1.0.1/nvidia-docker_1.0.1-1_amd64.deb
24 | sh /tmp/NVIDIA-Linux-x86_64-361.42.run --silent
25 | dpkg -i /tmp/nvidia-docker*.deb && rm /tmp/nvidia-docker*.deb
26 |
27 | # Install Node
28 | # https://nodejs.org/en/download/package-manager/#debian-and-ubuntu-based-linux-distributions
29 | curl -sL https://deb.nodesource.com/setup_6.x | -E bash -
30 | apt-get install -y nodejs
31 |
32 | # Install awscli
33 | apt-get install awscli -y
34 |
--------------------------------------------------------------------------------
/util/README.md:
--------------------------------------------------------------------------------
1 |
2 | Helpers / utils for training, some adapted from https://github.com/BVLC/caffe
3 |
4 |
--------------------------------------------------------------------------------
/util/draw_net.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | # https://github.com/BVLC/caffe/issues/861
4 | import matplotlib
5 | matplotlib.use('Agg')
6 |
7 | """
8 | Draw a graph of the net architecture.
9 | """
10 | from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
11 | from google.protobuf import text_format
12 |
13 | import caffe
14 | import caffe.draw
15 | from caffe.proto import caffe_pb2
16 |
17 |
18 | def parse_args():
19 | """Parse input arguments
20 | """
21 |
22 | parser = ArgumentParser(description=__doc__,
23 | formatter_class=ArgumentDefaultsHelpFormatter)
24 |
25 | parser.add_argument('input_net_proto_file',
26 | help='Input network prototxt file')
27 | parser.add_argument('output_image_file',
28 | help='Output image file')
29 | parser.add_argument('--rankdir',
30 | help=('One of TB (top-bottom, i.e., vertical), '
31 | 'RL (right-left, i.e., horizontal), or another '
32 | 'valid dot option; see '
33 | 'http://www.graphviz.org/doc/info/'
34 | 'attrs.html#k:rankdir'),
35 | default='TB')
36 |
37 | args = parser.parse_args()
38 | return args
39 |
40 |
41 | def main():
42 | args = parse_args()
43 | net = caffe_pb2.NetParameter()
44 | text_format.Merge(open(args.input_net_proto_file).read(), net)
45 | print('Drawing net to %s' % args.output_image_file)
46 | caffe.draw.draw_net_to_file(net, args.output_image_file, args.rankdir)
47 |
48 |
49 | if __name__ == '__main__':
50 | main()
51 |
--------------------------------------------------------------------------------
/util/extract_seconds.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | import datetime
3 | import os
4 | import sys
5 |
6 | def extract_datetime_from_line(line, year):
7 | # Expected format: I0210 13:39:22.381027 25210 solver.cpp:204] Iteration 100, lr = 0.00992565
8 | line = line.strip().split()
9 | month = int(line[0][1:3])
10 | day = int(line[0][3:])
11 | timestamp = line[1]
12 | pos = timestamp.rfind('.')
13 | ts = [int(x) for x in timestamp[:pos].split(':')]
14 | hour = ts[0]
15 | minute = ts[1]
16 | second = ts[2]
17 | microsecond = int(timestamp[pos + 1:])
18 | dt = datetime.datetime(year, month, day, hour, minute, second, microsecond)
19 | return dt
20 |
21 |
22 | def get_log_created_year(input_file):
23 | """Get year from log file system timestamp
24 | """
25 |
26 | log_created_time = os.path.getctime(input_file)
27 | log_created_year = datetime.datetime.fromtimestamp(log_created_time).year
28 | return log_created_year
29 |
30 |
31 | def get_start_time(line_iterable, year):
32 | """Find start time from group of lines
33 | """
34 |
35 | start_datetime = None
36 | for line in line_iterable:
37 | line = line.strip()
38 | if line.find('Solving') != -1:
39 | start_datetime = extract_datetime_from_line(line, year)
40 | break
41 | return start_datetime
42 |
43 |
44 | def extract_seconds(input_file, output_file):
45 | with open(input_file, 'r') as f:
46 | lines = f.readlines()
47 | log_created_year = get_log_created_year(input_file)
48 | start_datetime = get_start_time(lines, log_created_year)
49 | assert start_datetime, 'Start time not found'
50 |
51 | out = open(output_file, 'w')
52 | for line in lines:
53 | line = line.strip()
54 | if line.find('Iteration') != -1:
55 | dt = extract_datetime_from_line(line, log_created_year)
56 | elapsed_seconds = (dt - start_datetime).total_seconds()
57 | out.write('%f\n' % elapsed_seconds)
58 | out.close()
59 |
60 | if __name__ == '__main__':
61 | if len(sys.argv) < 3:
62 | print('Usage: ./extract_seconds input_file output_file')
63 | exit(1)
64 | extract_seconds(sys.argv[1], sys.argv[2])
65 |
--------------------------------------------------------------------------------
/util/extract_seconds.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/developmentseed/skynet-train/c43c71ecf4bd20cba21820bdbad0b5f1be4d70c3/util/extract_seconds.pyc
--------------------------------------------------------------------------------
/util/parse_log.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Usage parse_log.sh caffe.log
3 | # It creates the following two text files, each containing a table:
4 | # caffe.log.test (columns: '#Iters Seconds TestAccuracy TestLoss')
5 | # caffe.log.train (columns: '#Iters Seconds TrainingLoss LearningRate')
6 |
7 |
8 | # get the dirname of the script
9 | DIR="$( cd "$(dirname "$0")" ; pwd -P )"
10 |
11 | if [ "$#" -lt 1 ]
12 | then
13 | echo "Usage parse_log.sh /path/to/your.log"
14 | exit
15 | fi
16 | LOG=`basename $1`
17 | grep -B 1 'Test ' $1 > aux.txt
18 | grep 'Iteration ' aux.txt | sed 's/.*Iteration \([[:digit:]]*\).*/\1/g' > aux0.txt
19 | grep 'Test net output #0' aux.txt | awk '{print $11}' > aux1.txt
20 | grep 'Test net output #1' aux.txt | awk '{print $11}' > aux2.txt
21 |
22 | # Extracting elapsed seconds
23 | # For extraction of time since this line contains the start time
24 | grep '] Solving ' $1 > aux3.txt
25 | grep 'Testing net' $1 >> aux3.txt
26 | $DIR/extract_seconds.py aux3.txt aux4.txt
27 |
28 | # Generating
29 | echo '#Iters Seconds TestAccuracy TestLoss'> $LOG.test
30 | paste aux0.txt aux4.txt aux1.txt aux2.txt | column -t >> $LOG.test
31 | rm aux.txt aux0.txt aux1.txt aux2.txt aux3.txt aux4.txt
32 |
33 | # For extraction of time since this line contains the start time
34 | grep '] Solving ' $1 > aux.txt
35 | grep ', loss = ' $1 >> aux.txt
36 | grep 'Iteration ' aux.txt | sed 's/.*Iteration \([[:digit:]]*\).*/\1/g' > aux0.txt
37 | grep ', loss = ' $1 | awk '{print $9}' > aux1.txt
38 | grep ', lr = ' $1 | awk '{print $9}' > aux2.txt
39 |
40 | # Extracting elapsed seconds
41 | $DIR/extract_seconds.py aux.txt aux3.txt
42 |
43 | # Generating
44 | echo '#Iters Seconds TrainingLoss LearningRate'> $LOG.train
45 | paste aux0.txt aux3.txt aux1.txt aux2.txt | column -t >> $LOG.train
46 | rm aux.txt aux0.txt aux1.txt aux2.txt aux3.txt
47 |
--------------------------------------------------------------------------------
/util/plot_training_log.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | # https://github.com/BVLC/caffe/issues/861
4 | import matplotlib
5 | matplotlib.use('Agg')
6 |
7 | import argparse
8 | import inspect
9 | import os
10 | import random
11 | import time
12 | import numpy as np
13 | import matplotlib.pyplot as plt
14 | import matplotlib.markers as mks
15 |
16 |
17 | def get_log_parsing_script():
18 | currentpath = os.path.abspath(inspect.getfile(inspect.currentframe()))
19 | dirname = os.path.dirname(currentpath)
20 | return dirname + '/parse_log.sh'
21 |
22 |
23 | def get_log_file_suffix():
24 | return '.log'
25 |
26 |
27 | def get_chart_type_description_separator():
28 | return ' vs. '
29 |
30 |
31 | def is_x_axis_field(field):
32 | x_axis_fields = ['Iters', 'Seconds']
33 | return field in x_axis_fields
34 |
35 |
36 | def is_y_axis_field(field):
37 | x_axis_only_fields = ['Seconds']
38 | return not (field in x_axis_only_fields)
39 |
40 |
41 | def create_field_index():
42 | train_key = 'Train'
43 | test_key = 'Test'
44 | field_index = {train_key: {'Iters': 0, 'Seconds': 1,
45 | train_key + ' loss': 2,
46 | train_key + ' learning rate': 3},
47 | test_key: {'Iters': 0, 'Seconds': 1,
48 | test_key + ' accuracy': 2,
49 | test_key + ' loss': 3}}
50 | fields = set()
51 | for data_file_type in field_index.keys():
52 | fields = fields.union(set(field_index[data_file_type].keys()))
53 | fields = list(fields)
54 | fields.sort()
55 | return field_index, fields
56 |
57 |
58 | def get_supported_chart_types():
59 | field_index, fields = create_field_index()
60 | num_fields = len(fields)
61 | supported_chart_types = []
62 | for i in xrange(num_fields):
63 | if is_y_axis_field(fields[i]):
64 | for j in xrange(num_fields):
65 | if i != j and is_x_axis_field(fields[j]):
66 | supported_chart_types.append('%s%s%s' % (
67 | fields[i], get_chart_type_description_separator(),
68 | fields[j]))
69 | return supported_chart_types
70 |
71 |
72 | def get_chart_type_description(chart_type):
73 | supported_chart_types = get_supported_chart_types()
74 | chart_type_description = supported_chart_types[chart_type]
75 | return chart_type_description
76 |
77 |
78 | def get_data_file_type(chart_type):
79 | if chart_type == 0:
80 | return 'Train'
81 | description = get_chart_type_description(chart_type)
82 | data_file_type = description.split()[0]
83 | return data_file_type
84 |
85 |
86 | def get_data_file(chart_type, path_to_log):
87 | return os.path.basename(path_to_log) + '.' + \
88 | get_data_file_type(chart_type).lower()
89 |
90 |
91 | def get_field_descriptions(chart_type):
92 | description = get_chart_type_description(chart_type).split(
93 | get_chart_type_description_separator())
94 | y_axis_field = description[0]
95 | x_axis_field = description[1]
96 | return x_axis_field, y_axis_field
97 |
98 |
99 | def get_field_indecies(x_axis_field, y_axis_field):
100 | data_file_type = get_data_file_type(chart_type)
101 | fields = create_field_index()[0][data_file_type]
102 | return fields[x_axis_field], fields[y_axis_field]
103 |
104 |
105 | def load_data(data_file, field_idx0, field_idx1):
106 | data = [[], []]
107 | with open(data_file, 'r') as f:
108 | for line in f:
109 | line = line.strip()
110 | if line[0] != '#':
111 | fields = line.split()
112 | data[0].append(float(fields[field_idx0].strip()))
113 | data[1].append(float(fields[field_idx1].strip()))
114 | return data
115 |
116 |
117 | def random_marker():
118 | markers = mks.MarkerStyle.markers
119 | num = len(markers.values())
120 | idx = random.randint(0, num - 1)
121 | return markers.values()[idx]
122 |
123 |
124 | def get_data_label(path_to_log):
125 | label = path_to_log[path_to_log.rfind('/') + 1: path_to_log.rfind(
126 | get_log_file_suffix())]
127 | return label
128 |
129 |
130 | def get_legend_loc(chart_type):
131 | x_axis, y_axis = get_field_descriptions(chart_type)
132 | loc = 'lower right'
133 | if y_axis.find('accuracy') != -1:
134 | pass
135 | if y_axis.find('loss') != -1 or y_axis.find('learning rate') != -1:
136 | loc = 'upper right'
137 | return loc
138 |
139 |
140 | def runningMeanFast(x, N):
141 | return np.convolve(np.array(x), np.ones((N,)) / N, 'valid')
142 |
143 |
144 | def plot_chart(chart_type, path_to_png, logpath, smooth):
145 | logpath = logpath.split(':')
146 | path_to_log = logpath[0]
147 | if len(logpath) == 1:
148 | shift = 0
149 | else:
150 | shift = int(logpath[1])
151 |
152 | os.system('%s %s' % (get_log_parsing_script(), path_to_log))
153 | data_file = get_data_file(chart_type, path_to_log)
154 | x_axis_field, y_axis_field = get_field_descriptions(chart_type)
155 | x, y = get_field_indecies(x_axis_field, y_axis_field)
156 | data = load_data(data_file, x, y)
157 |
158 | # TODO: more systematic color cycle for lines
159 | color = [random.random(), random.random(), random.random()]
160 | label = get_data_label(path_to_log)
161 | linewidth = 0.75
162 | # If there too many datapoints, do not use marker.
163 | # use_marker = False
164 | if smooth and smooth > 0 and len(data[0]) > smooth:
165 | data[1] = runningMeanFast(data[1], smooth)
166 | data[0] = data[0][:len(data[1])]
167 | data[0] = [d + shift for d in data[0]]
168 |
169 | plt.plot(data[0], data[1], label=label, color=color,
170 | linewidth=linewidth)
171 | os.remove(get_data_file(0, path_to_log)) # remove xxx.log.test file
172 | os.remove(get_data_file(4, path_to_log)) # remove xxx.log.train file
173 |
174 |
175 | def plot_charts(chart_type, path_to_png, path_to_log_list, smooth):
176 | plt.clf()
177 | x_axis_field, y_axis_field = get_field_descriptions(chart_type)
178 | legend_loc = get_legend_loc(chart_type)
179 | for path_to_log in path_to_log_list:
180 | plot_chart(chart_type, path_to_png, path_to_log, smooth)
181 | plt.legend(loc=legend_loc, ncol=1) # adjust ncol to fit the space
182 | plt.title(get_chart_type_description(chart_type))
183 | plt.xlabel(x_axis_field)
184 | plt.ylabel(y_axis_field)
185 | plt.savefig(path_to_png)
186 | plt.show()
187 |
188 |
189 | def print_help():
190 | print """
191 | Usage:
192 | ./plot_log.sh chart_type[0-%s] /path/to/output.png /path/to/train.log
193 | [/path/to/train2.log path/to/train3.log ...]
194 | Notes:
195 | Log file name must end with the lower-cased "%s".
196 | Supported chart types:""" % (len(get_supported_chart_types()) - 1,
197 | get_log_file_suffix())
198 | supported_chart_types = get_supported_chart_types()
199 | num = len(supported_chart_types)
200 | for i in xrange(num):
201 | print ' %d: %s' % (i, supported_chart_types[i])
202 | exit()
203 |
204 |
205 | def is_valid_chart_type(chart_type):
206 | return chart_type >= 0 and chart_type < len(get_supported_chart_types())
207 |
208 | if __name__ == '__main__':
209 | parser = argparse.ArgumentParser()
210 | parser.add_argument('inputs', metavar='FILE.log', type=str, nargs='+')
211 | parser.add_argument('--output', type=str)
212 | parser.add_argument('--type', type=int, default=7)
213 | parser.add_argument('--smooth', type=int, default=50)
214 | parser.add_argument('--watch', action='store_true', default=False)
215 | args = parser.parse_args()
216 | chart_type = args.type
217 | if not is_valid_chart_type(chart_type):
218 | print_help()
219 |
220 | if args.output:
221 | path_to_png = args.output
222 | else:
223 | path_to_png = get_chart_type_description(chart_type)
224 | path_to_png = path_to_png.replace(' ', '-').replace('.', '').lower()
225 | path_to_png = path_to_png + '.png'
226 |
227 | path_to_log_list = args.inputs
228 | for logpath in path_to_log_list:
229 | path_to_log = logpath.split(':')[0]
230 | if not os.path.exists(path_to_log):
231 | print 'Path does not exist: %s' % path_to_log
232 | exit()
233 | if not path_to_log.endswith(get_log_file_suffix()):
234 | print_help()
235 | delay = 60
236 | while True:
237 | print('%s updating chart' %
238 | time.strftime('%Y-%m-%d %H:%M', time.gmtime()))
239 | plot_charts(chart_type, path_to_png, path_to_log_list, args.smooth)
240 | if not args.watch:
241 | break
242 | time.sleep(delay)
243 |
--------------------------------------------------------------------------------
/util/stop-this-instance.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | die() { status=$1; shift; echo "FATAL: $*"; exit $status; }
4 | EC2_INSTANCE_ID="`wget -q -O - http://instance-data/latest/meta-data/instance-id || die \"wget instance-id has failed: $?\"`"
5 | test -n "$EC2_INSTANCE_ID" || die 'cannot obtain instance-id'
6 | EC2_AVAIL_ZONE="`wget -q -O - http://instance-data/latest/meta-data/placement/availability-zone || die \"wget availability-zone has failed: $?\"`"
7 | test -n "$EC2_AVAIL_ZONE" || die 'cannot obtain availability-zone'
8 | EC2_REGION="`echo \"$EC2_AVAIL_ZONE\" | sed -e 's:\([0-9][0-9]*\)[a-z]*\$:\\1:'`"
9 |
10 | echo Stop instance $EC2_INSTANCE_ID \( $EC2_REGION \)
11 |
12 | read -p "Are you sure? " -n 1 -r
13 | echo # (optional) move to a new line
14 | if [[ $REPLY =~ ^[Yy]$ ]]
15 | then
16 | aws --region $EC2_REGION ec2 stop-instances --instance-ids $EC2_INSTANCE_ID
17 | fi
18 |
19 |
--------------------------------------------------------------------------------
/util/train.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | ~/caffe-segnet/build/tools/caffe train -gpu ${3-0} -solver $1 2>&1 | tee $2
4 |
--------------------------------------------------------------------------------
/vectorize.js:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env node
2 |
3 | var fs = require('fs')
4 | var path = require('path')
5 | var argv = require('minimist')(process.argv.slice(2))
6 | var polyspine = require('polyspine')
7 | var tilebelt = require('tilebelt')
8 | var flatten = require('geojson-flatten')
9 | var normalize = require('geojson-normalize')
10 | var _ = require('lodash')
11 | var distance = require('turf-line-distance')
12 | var simplify = require('turf-simplify')
13 |
14 | var input = JSON.parse(fs.readFileSync(path.resolve(__dirname, argv._[0])))
15 | input = normalize(input)
16 | input.features = _.flatten(input.features.map(f => flatten(f)))
17 |
18 | // arguments given as filename, then x y z, then distance threshold as ratio of image width
19 | var tile = argv._.slice(1, 4)
20 | var tileBbox = tilebelt.tileToBBOX(tile)
21 |
22 | // given image coordinates (0-255), returns geo coordinates
23 | function scale (coords) {
24 | return [
25 | coords[0] * (Math.abs(tileBbox[2] - tileBbox[0]) / 255) + tileBbox[0],
26 | (255 - coords[1]) * (Math.abs(tileBbox[3] - tileBbox[1]) / 255) + tileBbox[1]
27 | ]
28 | }
29 |
30 | var xDistance = distance({
31 | type: 'LineString',
32 | coordinates: [
33 | [tileBbox[0], tileBbox[1]],
34 | [tileBbox[2], tileBbox[1]]
35 | ]
36 | })
37 | var thresholdDistance = argv._[4] * xDistance
38 |
39 | var scaledInput = {
40 | type: 'FeatureCollection',
41 | features: input.features.map(f => {
42 | return {
43 | type: f.type,
44 | properties: f.properties,
45 | geometry: {
46 | type: f.geometry.type,
47 | coordinates: [f.geometry.coordinates[0].map(c => scale(c))]
48 | }
49 | }
50 | })
51 | }
52 |
53 | var features = polyspine(scaledInput).map(function (linestring) {
54 | return {
55 | type: 'Feature',
56 | properties: {},
57 | geometry: {
58 | type: 'LineString',
59 | coordinates: linestring
60 | }
61 | }
62 | })
63 |
64 | var filteredFeatures = features.filter(f => {
65 | return distance(f) > thresholdDistance && distance(f) < 3 * xDistance
66 | }).map(f => simplify(f, 0.00007))
67 |
68 | console.log(filteredFeatures.map(f => JSON.stringify(f)).join('\n'))
69 |
--------------------------------------------------------------------------------