├── project
├── build.properties
├── build.sbt
├── Version.scala
├── Dependencies.scala
└── plugins.sbt
├── viewer
├── README.md
├── .gitignore
├── client
│ ├── favicon.ico
│ ├── color-ramp-horizontal.png
│ └── client.js
├── .eslintrc
├── components
│ ├── leaflet-icons.css
│ ├── ChartLayer.js
│ ├── Catalog.js
│ ├── charts
│ │ ├── TimeSeries.js
│ │ └── IndexComparison.js
│ ├── SingleLayer.js
│ ├── DiffLayer.js
│ ├── Panels.js
│ └── Leaflet.js
├── .babelrc
├── utils
│ └── utils.js
├── redux
│ ├── store.js
│ ├── reducer.js
│ └── actions.js
├── charts
│ └── timeseries.js
├── server
│ └── server.js
├── webpack.config.js
├── package.json
└── containers
│ └── App.js
├── server
├── src
│ └── main
│ │ ├── scala
│ │ └── demo
│ │ │ ├── LayerMetadata.scala
│ │ │ ├── NDVI.scala
│ │ │ ├── NDWI.scala
│ │ │ ├── TileReader.scala
│ │ │ ├── FileReaderSet.scala
│ │ │ ├── HBaseReaderSet.scala
│ │ │ ├── S3ReaderSet.scala
│ │ │ ├── AccumuloReaderSet.scala
│ │ │ ├── CassandraReaderSet.scala
│ │ │ ├── HadoopReaderSet.scala
│ │ │ ├── MetadataReader.scala
│ │ │ ├── ReaderSet.scala
│ │ │ ├── Main.scala
│ │ │ ├── Render.scala
│ │ │ └── Router.scala
│ │ └── resources
│ │ └── application.conf
├── build.sbt
└── sbt
├── ingest
├── src
│ ├── main
│ │ ├── scala
│ │ │ └── demo
│ │ │ │ ├── etl
│ │ │ │ └── landsat
│ │ │ │ │ ├── LandsatModule.scala
│ │ │ │ │ ├── TemporalMultibandLandsatInput.scala
│ │ │ │ │ ├── EtlConfLandsatMethods.scala
│ │ │ │ │ ├── LandsatInput.scala
│ │ │ │ │ └── package.scala
│ │ │ │ └── LandsatIngest.scala
│ │ └── resources
│ │ │ └── application.conf
│ └── test
│ │ └── scala
│ │ └── demo
│ │ ├── SparkTest.scala
│ │ └── TestEnvironment.scala
└── build.sbt
├── config-emr.mk
├── config-aws.mk
├── conf
├── input-local.json
├── input.json
├── output-local.json
├── output.json
└── backend-profiles.json
├── config-ingest.mk
├── .gitignore
├── scripts
├── emr
│ ├── bootstrap-cassandra.sh
│ ├── bootstrap-demo.sh
│ ├── bootstrap-geowave.sh
│ └── geowave-install-lib.sh
└── configurations.json
├── Jenkinsfile
├── bluegreen-deploy.sh
├── Makefile
├── LICENSE
├── README.md
└── sbt
/project/build.properties:
--------------------------------------------------------------------------------
1 | sbt.version=0.13.11
2 |
--------------------------------------------------------------------------------
/project/build.sbt:
--------------------------------------------------------------------------------
1 | scalaVersion := "2.10.6"
2 |
--------------------------------------------------------------------------------
/viewer/README.md:
--------------------------------------------------------------------------------
1 | # geotrellis-viewer
2 |
3 |
--------------------------------------------------------------------------------
/viewer/.gitignore:
--------------------------------------------------------------------------------
1 | node_modules
2 | npm-debug.log
3 | dist
4 | \#*#
5 | *~
6 | .#*
7 |
--------------------------------------------------------------------------------
/project/Version.scala:
--------------------------------------------------------------------------------
1 | object Version {
2 | val geotrellis = "1.2.0-RC2"
3 | val scala = "2.11.11"
4 | }
5 |
--------------------------------------------------------------------------------
/viewer/client/favicon.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/geotrellis/geotrellis-landsat-emr-demo/HEAD/viewer/client/favicon.ico
--------------------------------------------------------------------------------
/viewer/.eslintrc:
--------------------------------------------------------------------------------
1 | {
2 | "parser": "babel-eslint",
3 | "env": {
4 | "browser": true,
5 | "node": true
6 | }
7 | }
8 |
--------------------------------------------------------------------------------
/viewer/client/color-ramp-horizontal.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/geotrellis/geotrellis-landsat-emr-demo/HEAD/viewer/client/color-ramp-horizontal.png
--------------------------------------------------------------------------------
/project/Dependencies.scala:
--------------------------------------------------------------------------------
1 | import scala.util.Properties
2 |
3 | import sbt._
4 |
5 | object Dependencies {
6 | val akkaHttp = "com.typesafe.akka" %% "akka-http" % "10.0.3"
7 | }
8 |
--------------------------------------------------------------------------------
/server/src/main/scala/demo/LayerMetadata.scala:
--------------------------------------------------------------------------------
1 | package demo
2 |
3 | import geotrellis.spark.TileLayerMetadata
4 |
5 | case class LayerMetadata[K](rasterMetaData: TileLayerMetadata[K], times: Array[Long])
6 |
--------------------------------------------------------------------------------
/ingest/src/main/scala/demo/etl/landsat/LandsatModule.scala:
--------------------------------------------------------------------------------
1 | package demo.etl.landsat
2 |
3 | import geotrellis.spark.etl.TypedModule
4 |
5 | object LandsatModule extends TypedModule {
6 | register(new TemporalMultibandLandsatInput)
7 | }
8 |
--------------------------------------------------------------------------------
/config-emr.mk:
--------------------------------------------------------------------------------
1 | export NAME := Landsat Demo ${USER}
2 | export MASTER_INSTANCE:=m3.xlarge
3 | export MASTER_PRICE := 0.5
4 | export WORKER_INSTANCE:=m3.2xlarge
5 | export WORKER_PRICE := 0.5
6 | export WORKER_COUNT := 4
7 | export USE_SPOT := true
8 |
--------------------------------------------------------------------------------
/config-aws.mk:
--------------------------------------------------------------------------------
1 | export EC2_KEY:=geotrellis-emr
2 | export AWS_DEFAULT_REGION:=us-east-1
3 | export S3_URI:=s3://geotrellis-demo/emr/${USER}
4 | export SUBNET_ID:=subnet-c5fefdb1
5 | export HOSTED_ZONE:=ZIM2DOAEE0E8U
6 | export ROUTE53_RECORD:=geotrellis-ndvi.geotrellis.io
7 |
--------------------------------------------------------------------------------
/server/src/main/scala/demo/NDVI.scala:
--------------------------------------------------------------------------------
1 | package demo
2 |
3 | import geotrellis.raster._
4 |
5 | object NDVI {
6 | def apply(tile: MultibandTile): Tile =
7 | tile.convert(DoubleCellType).combineDouble(0, 3) { (r, nir) =>
8 | (nir - r) / (nir + r)
9 | }
10 | }
11 |
--------------------------------------------------------------------------------
/server/src/main/scala/demo/NDWI.scala:
--------------------------------------------------------------------------------
1 | package demo
2 |
3 | import geotrellis.raster._
4 |
5 | object NDWI extends (MultibandTile => Tile) {
6 | def apply(tile: MultibandTile): Tile =
7 | tile.convert(DoubleCellType).combineDouble(1, 3) { (g, nir) =>
8 | (g - nir) / (g + nir)
9 | }
10 | }
11 |
--------------------------------------------------------------------------------
/project/plugins.sbt:
--------------------------------------------------------------------------------
1 | addSbtPlugin("org.foundweekends" % "sbt-bintray" % "0.5.1")
2 | addSbtPlugin("io.spray" % "sbt-revolver" % "0.9.1")
3 | addSbtPlugin("com.eed3si9n" % "sbt-assembly" % "0.14.5")
4 | addSbtPlugin("net.virtual-void" % "sbt-dependency-graph" % "0.9.0")
5 | addSbtPlugin("com.timushev.sbt" % "sbt-updates" % "0.3.3")
6 |
--------------------------------------------------------------------------------
/conf/input-local.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "name": "landsat",
4 | "format": "geotiff",
5 | "backend": {
6 | "type": "landsat",
7 | "path": "--cache catalog-cache --bandsWanted 4,3,2,5,QA --bbox -43.9883,-22.3221,-42.7695,-21.0435 --startDate 2015-06-01 --endDate 2015-06-10 --limit 1"
8 | },
9 | "cache": "NONE"
10 | }
11 | ]
--------------------------------------------------------------------------------
/conf/input.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "name": "japan-typhoon",
4 | "format": "geotiff",
5 | "backend": {
6 | "type": "landsat",
7 | "path": "--cache /tmp/catalog-cache --bandsWanted 4,3,2,5,QA --bbox 135.35,33.23,143.01,41.1 --startDate 2015-07-01 --endDate 2015-11-30 --maxCloudCoverage 30 --limit 10"
8 | },
9 | "cache": "NONE"
10 | }
11 | ]
--------------------------------------------------------------------------------
/config-ingest.mk:
--------------------------------------------------------------------------------
1 | # Query parameters
2 | export LAYER_NAME := japan-typhoon
3 | export START_DATE := 2015-07-01
4 | export END_DATE := 2015-11-30
5 | export BBOX := 135.35,33.23,143.01,41.1
6 | export MAX_CLOUD_COVERAGE := 30.0
7 |
8 | export DRIVER_MEMORY := 4200M
9 | export DRIVER_CORES := 2
10 | export EXECUTOR_MEMORY := 4200M
11 | export EXECUTOR_CORES := 2
12 | export YARN_OVERHEAD := 700
13 |
--------------------------------------------------------------------------------
/viewer/components/leaflet-icons.css:
--------------------------------------------------------------------------------
1 | .unselected-marker {
2 | background-color: rgba(255, 255, 255, 0.5);
3 | border: 2px solid rgb(100, 197, 157);
4 | border-radius: 50%;
5 | width: 15px;
6 | height: 15px;
7 | }
8 |
9 | .selected-marker {
10 | background-color: rgba(255, 255, 255, 0.5);
11 | border: 3px solid rgb(255, 255, 100);
12 | border-radius: 50%;
13 | width: 18px;
14 | height: 18px;
15 | }
16 |
--------------------------------------------------------------------------------
/viewer/.babelrc:
--------------------------------------------------------------------------------
1 | {
2 | "stage": 2,
3 | "env": {
4 | "development": {
5 | "plugins": [
6 | "react-display-name",
7 | "react-transform"
8 | ],
9 | "extra": {
10 | "react-transform": {
11 | "transforms": [{
12 | "transform": "react-transform-hmr",
13 | "imports": ["react"],
14 | "locals": ["module"]
15 | }]
16 | }
17 | }
18 | }
19 | }
20 | }
21 |
--------------------------------------------------------------------------------
/conf/output-local.json:
--------------------------------------------------------------------------------
1 | {
2 | "backend": {
3 | "type": "file",
4 | "path": "catalog"
5 | },
6 | "reprojectMethod": "buffered",
7 | "cellSize": {
8 | "width": 256.0,
9 | "height": 256.0
10 | },
11 | "tileSize": 256,
12 | "pyramid": true,
13 | "resampleMethod": "bilinear",
14 | "keyIndexMethod": {
15 | "type": "zorder",
16 | "temporalResolution": 86400000
17 | },
18 | "layoutScheme": "zoomed",
19 | "crs": "EPSG:3857",
20 | "maxZoom": 13
21 | }
22 |
--------------------------------------------------------------------------------
/viewer/utils/utils.js:
--------------------------------------------------------------------------------
1 | /**
2 | * If all the arguments given to this functio are defined it will evaluate curried function,
3 | * Else it will always evaluate curried function to undefined.
4 | */
5 | export default function ifAllDefined() {
6 | if (! _.reduce(_.map(arguments, _.isUndefined), (a ,b) => { return a || b })){
7 | return f => {
8 | return f.apply(this, arguments)
9 | }
10 | } else {
11 | return f => {
12 | return undefined;
13 | }
14 | }
15 | }
--------------------------------------------------------------------------------
/viewer/redux/store.js:
--------------------------------------------------------------------------------
1 | import { applyMiddleware, compose, createStore } from 'redux';
2 | import reducer from './reducer';
3 | import logger from 'redux-logger';
4 | import thunk from 'redux-thunk';
5 |
6 | var finalCreateStore = compose(
7 | applyMiddleware(thunk, logger())
8 | )(createStore);
9 |
10 | var configureStore = function(initialState) {
11 | initialState = initialState || {};
12 | return finalCreateStore(reducer, initialState);
13 | };
14 |
15 | module.exports = configureStore;
16 |
--------------------------------------------------------------------------------
/conf/output.json:
--------------------------------------------------------------------------------
1 | {
2 | "backend": {
3 | "type": "accumulo",
4 | "path": "tiles",
5 | "profile": "accumulo-emr"
6 | },
7 | "reprojectMethod": "buffered",
8 | "cellSize": {
9 | "width": 256.0,
10 | "height": 256.0
11 | },
12 | "tileSize": 256,
13 | "pyramid": true,
14 | "resampleMethod": "bilinear",
15 | "keyIndexMethod": {
16 | "type": "zorder",
17 | "temporalResolution": 86400000
18 | },
19 | "layoutScheme": "zoomed",
20 | "crs": "EPSG:3857",
21 | "maxZoom": 13
22 | }
23 |
--------------------------------------------------------------------------------
/viewer/components/ChartLayer.js:
--------------------------------------------------------------------------------
1 | "use strict";
2 | import React from 'react';
3 | import _ from 'lodash';
4 | import { PanelGroup, Panel, Input, Button, ButtonGroup, Form } from 'react-bootstrap';
5 | import SingleLayer from "./SingleLayer";
6 | import DiffLayer from "./DiffLayer";
7 | import DiffLayers from "./DiffLayers";
8 | import AverageByState from "./AverageByState";
9 | import AverageDiffByState from "./AverageDiffByState";
10 |
11 | var Panels = React.createClass({
12 | getInitialState: function () {
13 | return {
14 |
--------------------------------------------------------------------------------
/ingest/src/main/resources/application.conf:
--------------------------------------------------------------------------------
1 | geotrellis.accumulo.threads {
2 | collection.read = default
3 | rdd.write = default
4 | }
5 | geotrellis.file.threads {
6 | collection.read = default
7 | rdd.read = default
8 | }
9 | geotrellis.hadoop.threads {
10 | collection.read = default
11 | }
12 | geotrellis.cassandra.threads {
13 | collection.read = default
14 | rdd {
15 | write = default
16 | read = default
17 | }
18 | }
19 | geotrellis.s3.threads {
20 | collection.read = default
21 | rdd {
22 | write = default
23 | read = default
24 | }
25 | }
26 |
--------------------------------------------------------------------------------
/viewer/charts/timeseries.js:
--------------------------------------------------------------------------------
1 | import MG from "metrics-graphics";
2 | import "metrics-graphics/dist/metricsgraphics.css";
3 |
4 | var timeSeries = function(targetElemId, data, title, width, height, rightOffset, xAccessor, yAccessor) {
5 | MG.data_graphic({
6 | target: document.getElementById(targetElemId),
7 | data: data,
8 | title: title || "",
9 | width: width || 400,
10 | height: height || 200,
11 | right: rightOffset || 40,
12 | x_accessor: xAccessor || 'date',
13 | y_accessor: yAccessor || 'value'
14 | });
15 | };
16 |
17 | module.exports = timeSeries;
18 |
19 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 |
2 | project/boot
3 | project/plugins/project
4 | project/plugins/target
5 | project/target
6 | viewer/dist
7 | viewer/site.tgz
8 | cluster-id.txt
9 | last-step-id.txt
10 | target
11 | .ensime
12 | .ensime_cache/
13 | \#*#
14 | *~
15 | .#*
16 | .lib
17 | *.aux.xml
18 | *.jar
19 | *.crc
20 | _SUCCESS
21 |
22 | *.pyc
23 | .project
24 | .classpath
25 | .cache
26 | .settings
27 | .history
28 | .idea
29 | .DS_Store
30 | *.iml
31 | *.swp
32 | *.swo
33 | *.sublime-*
34 | .vagrant
35 | .ensime
36 | .ensime_cache
37 | tags
38 | catalog
39 | catalog-cache/
40 | scripts/upsert.json
41 | config-aws.mk
42 | logs
43 |
--------------------------------------------------------------------------------
/server/src/main/scala/demo/TileReader.scala:
--------------------------------------------------------------------------------
1 | package demo
2 |
3 | import geotrellis.spark._
4 | import geotrellis.spark.io._
5 | import geotrellis.spark.io.avro._
6 |
7 | import spray.json._
8 |
9 | import scala.collection.concurrent.TrieMap
10 | import scala.reflect._
11 |
12 | class TileReader[K: AvroRecordCodec: JsonFormat: ClassTag, V: AvroRecordCodec](
13 | valueReader: ValueReader[LayerId]
14 | ) {
15 | private val cache = new TrieMap[LayerId, Reader[K, V]]
16 |
17 | def read(layerId: LayerId, key: K): V = {
18 | val reader = cache.getOrElseUpdate(layerId, valueReader.reader[K,V](layerId))
19 | reader.read(key)
20 | }
21 | }
22 |
--------------------------------------------------------------------------------
/server/src/main/scala/demo/FileReaderSet.scala:
--------------------------------------------------------------------------------
1 | package demo
2 |
3 | import geotrellis.raster._
4 | import geotrellis.spark._
5 | import geotrellis.spark.io._
6 | import geotrellis.spark.io.file._
7 |
8 | import org.apache.spark.SparkContext
9 |
10 | class FileReaderSet(path: String)(implicit sc: SparkContext) extends ReaderSet {
11 | val attributeStore = FileAttributeStore(path)
12 | val metadataReader = new MetadataReader(attributeStore)
13 | val layerReader = FileLayerReader(attributeStore)
14 | val layerCReader = FileCollectionLayerReader(attributeStore)
15 | val singleBandTileReader = new TileReader[SpaceTimeKey, Tile](FileValueReader(path))
16 | val multiBandTileReader = new TileReader[SpaceTimeKey, MultibandTile](FileValueReader(path))
17 | }
18 |
--------------------------------------------------------------------------------
/server/src/main/scala/demo/HBaseReaderSet.scala:
--------------------------------------------------------------------------------
1 | package demo
2 |
3 | import geotrellis.raster._
4 | import geotrellis.spark._
5 | import geotrellis.spark.io._
6 | import geotrellis.spark.io.hbase._
7 |
8 | import org.apache.spark.SparkContext
9 |
10 | class HBaseReaderSet(instance: HBaseInstance)(implicit sc: SparkContext) extends ReaderSet {
11 | val attributeStore = HBaseAttributeStore(instance)
12 | val metadataReader = new MetadataReader(attributeStore)
13 | val layerReader = HBaseLayerReader(instance)
14 | val layerCReader = HBaseCollectionLayerReader(instance)
15 | val singleBandTileReader = new TileReader[SpaceTimeKey, Tile](HBaseValueReader(instance))
16 | val multiBandTileReader = new TileReader[SpaceTimeKey, MultibandTile](HBaseValueReader(instance))
17 | }
18 |
--------------------------------------------------------------------------------
/viewer/client/client.js:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import { render } from 'react-dom';
3 | import { Provider } from 'react-redux';
4 | import App from '../containers/App';
5 | import configureStore from '../redux/store.js'
6 |
7 | var initialState = {
8 | rootUrl: "http://" + window.location.hostname + ":8899",
9 | layerName: undefined,
10 | analysisLayer: undefined,
11 | layerType: 'singleLayer',
12 | times: {}, // A map from layer to times selected for that layer
13 | ndi: 'ndvi',
14 | map: {
15 | url: [],
16 | bounds: undefined
17 | },
18 | catalog: {
19 | layers : []
20 | }
21 | };
22 |
23 | var store = configureStore(initialState);
24 |
25 | render(
26 |
27 |
28 | ,
29 | document.body
30 | );
31 |
--------------------------------------------------------------------------------
/server/src/main/scala/demo/S3ReaderSet.scala:
--------------------------------------------------------------------------------
1 | package demo
2 |
3 | import geotrellis.raster._
4 | import geotrellis.spark._
5 | import geotrellis.spark.io._
6 | import geotrellis.spark.io.s3._
7 |
8 | import org.apache.spark.SparkContext
9 |
10 | class S3ReaderSet(bucket: String, prefix: String)(implicit sc: SparkContext) extends ReaderSet {
11 | val attributeStore = S3AttributeStore(bucket, prefix)
12 | val metadataReader = new MetadataReader(attributeStore)
13 | val layerReader = S3LayerReader(attributeStore)
14 | val layerCReader = S3CollectionLayerReader(attributeStore)
15 | val singleBandTileReader = new TileReader[SpaceTimeKey, Tile](S3ValueReader(bucket, prefix))
16 | val multiBandTileReader = new TileReader[SpaceTimeKey, MultibandTile](S3ValueReader(bucket, prefix))
17 | }
18 |
--------------------------------------------------------------------------------
/ingest/src/test/scala/demo/SparkTest.scala:
--------------------------------------------------------------------------------
1 | package demo
2 |
3 | import org.scalatest._
4 | import geotrellis.spark.testkit._
5 |
6 | class SparkSampleSpec extends FunSpec with TestEnvironment with Matchers {
7 | describe("Sample spark test") {
8 | it("can trigger a spark job") {
9 | sc.parallelize(Array(1,2,3,4)).reduce(_ + _) should be (10)
10 | }
11 |
12 | it("should serialize a case class errors") {
13 | case class Box(x: Int)
14 | val b = Box(10)
15 | b.serializeAndDeserialize()
16 | }
17 |
18 | it("should fail to serialize some class") {
19 | class Box(val x: Int)
20 | val b = new Box(10)
21 | intercept[java.io.NotSerializableException] {
22 | b.serializeAndDeserialize()
23 | }
24 | }
25 | }
26 |
27 |
28 | }
29 |
--------------------------------------------------------------------------------
/server/src/main/scala/demo/AccumuloReaderSet.scala:
--------------------------------------------------------------------------------
1 | package demo
2 |
3 | import geotrellis.raster._
4 | import geotrellis.spark._
5 | import geotrellis.spark.io._
6 | import geotrellis.spark.io.accumulo._
7 |
8 | import org.apache.spark.SparkContext
9 |
10 | class AccumuloReaderSet(instance: AccumuloInstance)(implicit sc: SparkContext) extends ReaderSet {
11 | val attributeStore = AccumuloAttributeStore(instance.connector)
12 | val metadataReader = new MetadataReader(attributeStore)
13 | val layerReader = AccumuloLayerReader(instance)
14 | val layerCReader = AccumuloCollectionLayerReader(instance)
15 | val singleBandTileReader = new TileReader[SpaceTimeKey, Tile](AccumuloValueReader(instance))
16 | val multiBandTileReader = new TileReader[SpaceTimeKey, MultibandTile](AccumuloValueReader(instance))
17 | }
18 |
--------------------------------------------------------------------------------
/server/src/main/scala/demo/CassandraReaderSet.scala:
--------------------------------------------------------------------------------
1 | package demo
2 |
3 | import geotrellis.raster._
4 | import geotrellis.spark._
5 | import geotrellis.spark.io._
6 | import geotrellis.spark.io.cassandra._
7 |
8 | import org.apache.spark.SparkContext
9 |
10 | class CassandraReaderSet(instance: CassandraInstance)(implicit sc: SparkContext) extends ReaderSet {
11 | val attributeStore = CassandraAttributeStore(instance)
12 | val metadataReader = new MetadataReader(attributeStore)
13 | val layerReader = CassandraLayerReader(instance)
14 | val layerCReader = CassandraCollectionLayerReader(instance)
15 | val singleBandTileReader = new TileReader[SpaceTimeKey, Tile](CassandraValueReader(instance))
16 | val multiBandTileReader = new TileReader[SpaceTimeKey, MultibandTile](CassandraValueReader(instance))
17 | }
18 |
--------------------------------------------------------------------------------
/server/src/main/scala/demo/HadoopReaderSet.scala:
--------------------------------------------------------------------------------
1 | package demo
2 |
3 | import geotrellis.raster._
4 | import geotrellis.spark._
5 | import geotrellis.spark.io._
6 | import geotrellis.spark.io.hadoop._
7 |
8 | import org.apache.hadoop.fs.Path
9 | import org.apache.spark.SparkContext
10 |
11 | class HadoopReaderSet(path: Path)(implicit sc: SparkContext) extends ReaderSet {
12 | val attributeStore = HadoopAttributeStore(path)
13 | val metadataReader = new MetadataReader(attributeStore)
14 | val layerReader = HadoopLayerReader(attributeStore)
15 | val layerCReader = HadoopCollectionLayerReader(attributeStore)
16 | val singleBandTileReader = new TileReader[SpaceTimeKey, Tile](HadoopValueReader(path))
17 | val multiBandTileReader = new TileReader[SpaceTimeKey, MultibandTile](HadoopValueReader(path))
18 | }
19 |
--------------------------------------------------------------------------------
/server/src/main/resources/application.conf:
--------------------------------------------------------------------------------
1 | tutorial.colorbreaks="0:ffffe5ff;0.1:f7fcb9ff;0.2:d9f0a3ff;0.3:addd8eff;0.4:78c679ff;0.5:41ab5dff;0.6:238443ff;0.7:006837ff;1:004529ff"
2 | spray.can.server {
3 | request-timeout = 4 minutes
4 | idle-timeout = 10 minutes
5 | }
6 | geotrellis.accumulo.threads {
7 | collection.read = default
8 | rdd.write = default
9 | }
10 | geotrellis.file.threads {
11 | collection.read = default
12 | rdd.read = default
13 | }
14 | geotrellis.hadoop.threads {
15 | collection.read = default
16 | }
17 | geotrellis.cassandra.threads {
18 | collection.read = default
19 | rdd {
20 | write = default
21 | read = default
22 | }
23 | }
24 | geotrellis.s3.threads {
25 | collection.read = default
26 | rdd {
27 | write = default
28 | read = default
29 | }
30 | }
31 |
--------------------------------------------------------------------------------
/viewer/server/server.js:
--------------------------------------------------------------------------------
1 | var config = require('../webpack.config')
2 | var express = require('express');
3 | var path = require('path');
4 | var webpack = require('webpack')
5 | var webpackDevMiddleware = require('webpack-dev-middleware')
6 | var webpackHotMiddleware = require('webpack-hot-middleware')
7 |
8 | var app = express();
9 | var port = 3000;
10 |
11 | var compiler = webpack(config);
12 | app.use(webpackDevMiddleware(compiler, { noInfo: true, publicPath: config.output.publicPath }));
13 | app.use(webpackHotMiddleware(compiler));
14 |
15 | app.use(express.static('./dist'));
16 |
17 | app.get("/", function(req, res) {
18 | res.sendFile(path.resolve('client/index.html'));
19 | });
20 |
21 | app.get("/favicon.ico", function(req, res) {
22 | res.sendFile(path.resolve('client/favicon.ico'));
23 | });
24 |
25 | app.listen(port, function(error) {
26 | if (error) {
27 | console.error(error);
28 | } else {
29 | console.log("Express server listening on port", port);
30 | }
31 | });
32 |
--------------------------------------------------------------------------------
/viewer/components/Catalog.js:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import { Input, Button } from 'react-bootstrap';
3 |
4 | var Catalog = React.createClass({
5 | getInitialState: function () {
6 | return {
7 | url: this.props.defaultUrl
8 | };
9 | },
10 | handleSubmit: function() {
11 | this.props.onSubmit(this.state.url);
12 | },
13 | handleOnChange: function (ev) {
14 | this.setState({url: ev.target.value});
15 | },
16 | handleKeyDown: function(ev) {
17 | if (ev.keyCode == 13) {
18 | this.handleSubmit();
19 | }
20 | },
21 | render: function() {
22 | const goButton = ;
23 | return (
24 |
25 |
34 |
35 | );
36 | }
37 | });
38 |
39 | module.exports = Catalog;
40 |
--------------------------------------------------------------------------------
/scripts/emr/bootstrap-cassandra.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | # Bootstrap docker Cassandra on EMR cluster
4 | for i in "$@"
5 | do
6 | case $i in
7 | --continue)
8 | CONTINUE=true
9 | shift ;;
10 | *)
11 | ;;
12 | esac
13 | done
14 |
15 | is_master() {
16 | if [ $(jq '.isMaster' /mnt/var/lib/info/instance.json) = 'true' ]; then
17 | return 0
18 | else
19 | return 1
20 | fi
21 | }
22 |
23 | if [ ! $CONTINUE ]; then
24 | sudo yum -y install docker
25 | sudo usermod -aG docker hadoop
26 | sudo service docker start
27 |
28 | THIS_SCRIPT="$(realpath "${BASH_SOURCE[0]}")"
29 | TIMEOUT= is_master && TIMEOUT=3 || TIMEOUT=4
30 | echo "bash -x $THIS_SCRIPT --continue > /tmp/cassandra-bootstrap.log" | at now + $TIMEOUT min
31 | exit 0 # Bail and let EMR finish initializing
32 | fi
33 |
34 | MASTER_IP=$(xmllint --xpath "//property[name='yarn.resourcemanager.hostname']/value/text()" /etc/hadoop/conf/yarn-site.xml)
35 |
36 | sudo mkdir -p /mnt2/cassandra
37 | sudo docker run --name=cassandra -d --net=host \
38 | -v /mnt2/cassandra:/var/lib/cassandra \
39 | -e CASSANDRA_SEEDS=${MASTER_IP} \
40 | cassandra:latest
41 |
--------------------------------------------------------------------------------
/server/src/main/scala/demo/MetadataReader.scala:
--------------------------------------------------------------------------------
1 | package demo
2 |
3 |
4 | import geotrellis.spark._
5 | import geotrellis.spark.io._
6 |
7 | import spray.json._
8 | import spray.json.DefaultJsonProtocol._
9 |
10 | /** Aside from reading our metadata we also do some processing to figure out how many time stamps we have */
11 | class MetadataReader(attributeStore: AttributeStore) {
12 | def read[K: SpatialComponent: JsonFormat](layer: LayerId) = {
13 | val md = attributeStore.readMetadata[TileLayerMetadata[K]](layer)
14 | val times = attributeStore.read[Array[Long]](LayerId(layer.name, 0), "times")
15 | LayerMetadata(md, times)
16 | }
17 |
18 | lazy val layerNamesToZooms =
19 | attributeStore.layerIds
20 | .groupBy(_.name)
21 | .map { case (name, layerIds) => (name, layerIds.map(_.zoom).sorted.toArray) }
22 | .toMap
23 |
24 | lazy val layerNamesToMaxZooms: Map[String, Int] =
25 | layerNamesToZooms.mapValues(_.max)
26 |
27 | /** Read an attribute that pertains to all the zooms of the layer
28 | * by convention this is stored for zoom level 0 */
29 | def readLayerAttribute[T: JsonFormat](layerName: String, attributeName: String): T =
30 | attributeStore.read[T](LayerId(layerName, 0), attributeName)
31 | }
32 |
--------------------------------------------------------------------------------
/ingest/build.sbt:
--------------------------------------------------------------------------------
1 | name := "ingest"
2 | scalaVersion := Version.scala
3 | javaOptions += "-Xmx8G"
4 |
5 | fork in run := true
6 |
7 | connectInput in run := true
8 |
9 | libraryDependencies ++= Seq(
10 | "com.azavea" %% "scala-landsat-util" % "1.0.1-SNAPSHOT",
11 | "org.locationtech.geotrellis" %% "geotrellis-spark-etl" % Version.geotrellis,
12 | "org.apache.spark" %% "spark-core" % "2.1.0" % "provided",
13 | "org.locationtech.geotrellis" %% "geotrellis-spark-testkit" % Version.geotrellis % "test",
14 | "org.scalatest" %% "scalatest" % "3.0.1" % "test"
15 | )
16 |
17 | assemblyShadeRules in assembly := {
18 | val shadePackage = "com.azavea.shaded.demo"
19 | Seq(
20 | ShadeRule.rename("com.google.common.**" -> s"$shadePackage.google.common.@1")
21 | .inLibrary(
22 | "com.azavea.geotrellis" %% "geotrellis-cassandra" % Version.geotrellis,
23 | "com.github.fge" % "json-schema-validator" % "2.2.6"
24 | ).inAll
25 | )
26 | }
27 |
28 | test in assembly := {}
29 |
30 | assemblyMergeStrategy in assembly := {
31 | case m if m.toLowerCase.endsWith("manifest.mf") => MergeStrategy.discard
32 | case m if m.toLowerCase.matches("meta-inf.*\\.sf$") => MergeStrategy.discard
33 | case "reference.conf" | "application.conf" => MergeStrategy.concat
34 | case _ => MergeStrategy.first
35 | }
36 |
37 |
--------------------------------------------------------------------------------
/conf/backend-profiles.json:
--------------------------------------------------------------------------------
1 | {
2 | "backend-profiles": [
3 | {
4 | "name": "accumulo-emr",
5 | "type": "accumulo",
6 | "zookeepers": "",
7 | "instance": "accumulo",
8 | "user": "root",
9 | "password": "secret"
10 | },
11 | {
12 | "name": "accumulo-local",
13 | "type": "accumulo",
14 | "zookeepers": "localhost",
15 | "instance": "gis",
16 | "user": "user",
17 | "password": "secret"
18 | },
19 | {
20 | "name": "cassandra-local",
21 | "type": "cassandra",
22 | "allowRemoteDCsForLocalConsistencyLevel": false,
23 | "localDc": "datacenter1",
24 | "usedHostsPerRemoteDc": 0,
25 | "hosts": "localhost",
26 | "replicationStrategy": "SimpleStrategy",
27 | "replicationFactor": 1,
28 | "user": "",
29 | "password": ""
30 | },
31 | {
32 | "name": "cassandra-emr",
33 | "type": "cassandra",
34 | "allowRemoteDCsForLocalConsistencyLevel": false,
35 | "localDc":"datacenter1",
36 | "usedHostsPerRemoteDc": 0,
37 | "hosts": "",
38 | "replicationStrategy": "SimpleStrategy",
39 | "replicationFactor": 1,
40 | "user": "",
41 | "password": ""
42 | },
43 | {
44 | "name": "hbase-emr",
45 | "type": "hbase",
46 | "master": "",
47 | "zookeepers": ""
48 | }
49 | ]
50 | }
--------------------------------------------------------------------------------
/ingest/src/test/scala/demo/TestEnvironment.scala:
--------------------------------------------------------------------------------
1 | package demo
2 |
3 | import geotrellis.spark.testkit._
4 | import org.apache.spark._
5 | import org.scalatest._
6 |
7 | trait TestEnvironment extends BeforeAndAfterAll
8 | with TileLayerRDDBuilders
9 | with TileLayerRDDMatchers
10 | { self: Suite with BeforeAndAfterAll =>
11 |
12 |
13 | def setKryoRegistrator(conf: SparkConf): Unit =
14 | conf
15 | lazy val _sc: SparkContext = {
16 | System.setProperty("spark.driver.port", "0")
17 | System.setProperty("spark.hostPort", "0")
18 | System.setProperty("spark.ui.enabled", "false")
19 |
20 | val conf = new SparkConf()
21 | .setMaster("local")
22 | .setAppName("Test Context")
23 | .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
24 | .set("spark.kryo.registrator", "geotrellis.spark.io.kryo.KryoRegistrator")
25 | .set("spark.kryoserializer.buffer.max", "500m")
26 | .set("spark.kryo.registrationRequired","false")
27 |
28 | val sparkContext = new SparkContext(conf)
29 |
30 | System.clearProperty("spark.driver.port")
31 | System.clearProperty("spark.hostPort")
32 | System.clearProperty("spark.ui.enabled")
33 |
34 | sparkContext
35 | }
36 |
37 | implicit def sc: SparkContext = _sc
38 |
39 | // get the name of the class which mixes in this trait
40 | val name = this.getClass.getName
41 |
42 | // a hadoop configuration
43 | val conf = sc.hadoopConfiguration
44 |
45 | override def afterAll() = sc.stop()
46 | }
47 |
--------------------------------------------------------------------------------
/Jenkinsfile:
--------------------------------------------------------------------------------
1 | /* The pipeline job parameters are given as groovy variables,
2 | * which may either be used in string interpolation or
3 | * assigned to env dictionary to be exposed as environment
4 | * variables in the shell command.
5 | *
6 | * This is on contracts to a freeform job where they job parameters
7 | * would come in as shell environment variables directly.
8 | */
9 | env.EC2_KEY = EC2_KEY
10 | env.S3_URI = S3_URI
11 | env.BBOX = BBOX
12 | env.START_DATE = START_DATE
13 | env.END_DATE = END_DATE
14 | env.WORKER_COUNT = WORKER_COUNT
15 |
16 | node {
17 | withCredentials(
18 | [[$class: 'UsernamePasswordMultiBinding',
19 | credentialsId: CREDENTIALS,
20 | usernameVariable: 'AWS_ACCESS_KEY_ID',
21 | passwordVariable: 'AWS_SECRET_ACCESS_KEY'
22 | ]])
23 | {
24 | stage "Launch"
25 | sh "make -e create-cluster || exit 1"
26 | sh "make -e start-ingest || (make -e terminate-cluster && exit 1)"
27 |
28 | stage "Wait"
29 | sh "make -e wait-for-step || (make -e terminate-cluster && exit 1)"
30 |
31 | stage "Cleanup"
32 | def terminate = input (id: 'Cluster Cleanup',
33 | message: 'Cluster Cleanup',
34 | ok: 'Okay',
35 | parameters: [
36 | [
37 | $class: 'BooleanParameterDefinition',
38 | defaultValue: true,
39 | name: 'TERMINATE_CLUSTER',
40 | description: 'Finish jenkins job and terminate cluster'
41 | ]
42 | ])
43 | if (terminate) { sh "make -e terminate-cluster" }
44 | }
45 | }
46 |
--------------------------------------------------------------------------------
/server/build.sbt:
--------------------------------------------------------------------------------
1 | name := "server"
2 | scalaVersion := Version.scala
3 | javaOptions += "-Xmx4G"
4 |
5 | fork in run := true
6 |
7 | connectInput in run := true
8 |
9 | libraryDependencies ++= Seq(
10 | "org.locationtech.geotrellis" %% "geotrellis-spark" % Version.geotrellis,
11 | "org.locationtech.geotrellis" %% "geotrellis-s3" % Version.geotrellis,
12 | "org.locationtech.geotrellis" %% "geotrellis-accumulo" % Version.geotrellis,
13 | "org.locationtech.geotrellis" %% "geotrellis-hbase" % Version.geotrellis,
14 | "org.locationtech.geotrellis" %% "geotrellis-cassandra" % Version.geotrellis,
15 | "org.apache.spark" %% "spark-core" % "2.1.0" % "provided",
16 | Dependencies.akkaHttp,
17 | "com.typesafe.akka" %% "akka-http-spray-json" % "10.0.3",
18 | "ch.megard" %% "akka-http-cors" % "0.1.11",
19 | "org.scalatest" %% "scalatest" % "3.0.1" % "test"
20 | )
21 |
22 | Revolver.settings
23 |
24 | assemblyShadeRules in assembly := {
25 | val shadePackage = "com.azavea.shaded.demo"
26 | Seq(
27 | ShadeRule.rename("com.google.common.**" -> s"$shadePackage.google.common.@1")
28 | .inLibrary(
29 | "com.azavea.geotrellis" %% "geotrellis-cassandra" % Version.geotrellis,
30 | "com.github.fge" % "json-schema-validator" % "2.2.6"
31 | ).inAll
32 | )
33 | }
34 |
35 | test in assembly := {}
36 |
37 | assemblyMergeStrategy in assembly := {
38 | case m if m.toLowerCase.endsWith("manifest.mf") => MergeStrategy.discard
39 | case m if m.toLowerCase.matches("meta-inf.*\\.sf$") => MergeStrategy.discard
40 | case "reference.conf" | "application.conf" => MergeStrategy.concat
41 | case _ => MergeStrategy.first
42 | }
43 |
--------------------------------------------------------------------------------
/bluegreen-deploy.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | function cluster_color() {
4 | COLOR=$( aws emr describe-cluster --cluster-id $1 --output text \
5 | | egrep "TAGS\s+Color" \
6 | | cut -f3 \
7 | | tr "[:upper:]" "[:lower:]" )
8 |
9 | if [ $COLOR != "" ] ; then
10 | echo "$1:$COLOR"
11 | fi
12 | }
13 |
14 | # Find active clusters with my name
15 | IDS=$(aws emr list-clusters --active --output text \
16 | | egrep CLUSTERS \
17 | | egrep "$NAME" \
18 | | cut -f2)
19 |
20 | # List only clusters that have a color
21 | COLORS=$( for ID in $IDS; do cluster_color $ID; done; )
22 |
23 | # Verify there is only one color up
24 | COUNT=$(wc -w <<< $COLORS)
25 | if [[ $COUNT -gt 1 ]]; then
26 | echo "Multiple active clusters named '$NAME':"
27 | echo $COLORS
28 | exit 1
29 | fi
30 |
31 | # Pick a new color and note old cluster id
32 | if [[ $COUNT -gt 0 ]]; then
33 | OLD_CLUSTER=$(cut -f1 -d: <<< $COLORS)
34 | OLD_COLOR=$(cut -f2 -d: <<< $COLORS)
35 | if [ $OLD_COLOR = "blue" ]; then
36 | NEW_COLOR="green"
37 | elif [ $OLD_COLOR = "green" ]; then
38 | NEW_COLOR="blue"
39 | else
40 | echo "Active cluster named '$NAME' is neither green nor blue, but: '$OLD_COLOR'"
41 | fi
42 | else
43 | NEW_COLOR="green"
44 | fi
45 |
46 | set -x
47 | # Deploy the next color and wait for ingest to finish before taking down old cluster
48 | make -e COLOR=$NEW_COLOR create-cluster
49 | make -e start-ingest || (make -e terminate-cluster && exit 1)
50 | make -e wait || (make -e terminate-cluster && exit 1)
51 | make -e update-route53 || (exit 1)
52 | if [ -v $OLD_CLUSTER ]; then
53 | make -e CLUSTER_ID=$OLD_CLUSTER terminate-cluster
54 | fi
55 |
--------------------------------------------------------------------------------
/viewer/webpack.config.js:
--------------------------------------------------------------------------------
1 | var path = require('path')
2 | var webpack = require('webpack')
3 | var HtmlWebpackPlugin = require('html-webpack-plugin');
4 |
5 | module.exports = {
6 | devtool: 'inline-source-map',
7 | entry: [
8 | 'webpack-hot-middleware/client',
9 | './client/client.js'
10 | ],
11 | output: {
12 | path: path.join(__dirname, 'dist'),
13 | filename: 'bundle.js'
14 | },
15 | plugins: [
16 | new webpack.optimize.OccurenceOrderPlugin(),
17 | new webpack.HotModuleReplacementPlugin(),
18 | new webpack.NoErrorsPlugin(),
19 | new HtmlWebpackPlugin({
20 | title: 'GeoTrellis Landsat Demo',
21 | favicon: './client/favicon.ico'
22 | })
23 | ],
24 | module: {
25 | loaders: [{
26 | test: /\.js$/,
27 | loaders: ['babel-loader'],
28 | exclude: /node_modules/,
29 | },
30 | // **IMPORTANT** This is needed so that each bootstrap js file required by
31 | // bootstrap-webpack has access to the jQuery object
32 | { test: /bootstrap\/js\//, loader: 'imports?jQuery=jquery' },
33 |
34 | // Needed for the css-loader when [bootstrap-webpack](https://github.com/bline/bootstrap-webpack)
35 | // loads bootstrap's css.
36 | { test: /\.woff(\?v=\d+\.\d+\.\d+)?$/, loader: "url?limit=10000&mimetype=application/font-woff" },
37 | { test: /\.woff2(\?v=\d+\.\d+\.\d+)?$/, loader: "url?limit=10000&mimetype=application/font-woff" },
38 | { test: /\.ttf(\?v=\d+\.\d+\.\d+)?$/, loader: "url?limit=10000&mimetype=application/octet-stream" },
39 | { test: /\.eot(\?v=\d+\.\d+\.\d+)?$/, loader: "file" },
40 | { test: /\.svg(\?v=\d+\.\d+\.\d+)?$/, loader: "url?limit=10000&mimetype=image/svg+xml" },
41 | { test: /\.css$/, loader: 'style-loader!css-loader' },
42 | { test: /\.(png|jpg)$/, loader: 'url-loader?limit=8192' }
43 | ]
44 | }
45 | }
46 |
--------------------------------------------------------------------------------
/ingest/src/main/scala/demo/etl/landsat/TemporalMultibandLandsatInput.scala:
--------------------------------------------------------------------------------
1 | package demo.etl.landsat
2 |
3 | import geotrellis.raster.MultibandTile
4 | import geotrellis.spark.TemporalProjectedExtent
5 | import geotrellis.spark.etl.config.EtlConf
6 | import geotrellis.vector.Extent
7 |
8 | import com.azavea.landsatutil.Landsat8Query
9 | import org.apache.spark.SparkContext
10 | import org.apache.spark.rdd.RDD
11 | import com.typesafe.scalalogging.LazyLogging
12 |
13 | import java.time.{LocalDate, ZoneOffset}
14 | import scala.util.{Success, Failure}
15 |
16 | class TemporalMultibandLandsatInput extends LandsatInput[TemporalProjectedExtent, MultibandTile] with LazyLogging {
17 | val format = "temporal-landsat"
18 |
19 | def apply(conf: EtlConf)(implicit sc: SparkContext): RDD[(TemporalProjectedExtent, MultibandTile)] = {
20 | val input = conf.landsatInput
21 |
22 | val images =
23 | Landsat8Query()
24 | .withStartDate(
25 | input
26 | .get('startDate)
27 | .map(LocalDate.parse)
28 | .getOrElse(LocalDate.of(2014,1,1))
29 | .atStartOfDay(ZoneOffset.UTC)
30 | ).withEndDate(
31 | input
32 | .get('endDate)
33 | .map(LocalDate.parse)
34 | .getOrElse(LocalDate.of(2015,1,1))
35 | .atStartOfDay(ZoneOffset.UTC)
36 | )
37 | .withMaxCloudCoverage(
38 | input
39 | .get('maxCloudCoverage)
40 | .map(_.toDouble)
41 | .getOrElse(100d)
42 | )
43 | .intersects(Extent.fromString(input('bbox)))
44 | .collect() match {
45 | case Success(r) => r
46 | case Failure(e) => throw e
47 | }
48 |
49 | logger.info(s"Found ${images.length} landsat images")
50 |
51 | this.images = input.get('limit).fold(images)(limit => images.take(limit.toInt))
52 |
53 | fetch(conf, this.images, fetchMethod)
54 | }
55 | }
56 |
--------------------------------------------------------------------------------
/viewer/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "geotrellis-viewer",
3 | "version": "0.0.2",
4 | "description": "Basic UI to display GeoTrellis layers and opeartions",
5 | "author": "Eugene Cheipesh",
6 | "scripts": {
7 | "start": "node_modules/nodemon/bin/nodemon.js server/server.js --ignore components --ignore containers",
8 | "build": "node_modules/webpack/bin/webpack.js -d"
9 | },
10 | "dependencies": {
11 | "babel-core": "^5.8.3",
12 | "babel-loader": "^5.3.2",
13 | "babel-plugin-react-display-name": "^2.0.0",
14 | "babel-plugin-react-transform": "^1.1.0",
15 | "babel-runtime": "^5.8.20",
16 | "bootstrap": "^3.3.6",
17 | "bootstrap-webpack": "0.0.5",
18 | "css-loader": "^0.23.1",
19 | "es6-promise": "^3.2.1",
20 | "exports-loader": "^0.6.2",
21 | "express": "^4.13.3",
22 | "extract-text-webpack-plugin": "^1.0.1",
23 | "file-loader": "^0.8.5",
24 | "imports-loader": "^0.6.5",
25 | "isomorphic-fetch": "^2.2.0",
26 | "jquery": "^2.2.0",
27 | "d3": "^3.5.17",
28 | "metrics-graphics": "^2.8.0",
29 | "shortid": "^2.2.6",
30 | "leaflet": "^0.7.7",
31 | "leaflet-draw": "^0.3.0",
32 | "less": "^2.5.3",
33 | "less-loader": "^2.2.2",
34 | "lodash": "^4.0.0",
35 | "react": "^15.0.2",
36 | "react-bootstrap": "^0.28.2",
37 | "react-dom": "^15.1.0",
38 | "react-leaflet": "0.11.6",
39 | "react-leaflet-draw": "0.7.0",
40 | "react-redux": "^4.0.0",
41 | "react-transform-hmr": "^1.0.1",
42 | "redux": "^3.0.4",
43 | "redux-logger": "^2.0.4",
44 | "redux-thunk": "^1.0.0",
45 | "style-loader": "^0.13.0",
46 | "url-loader": "^0.5.7",
47 | "react-loader": "^2.4.0",
48 | "webpack": "^1.12.9",
49 | "webpack-dev-middleware": "^1.4.0",
50 | "webpack-hot-middleware": "^2.5.0"
51 | },
52 | "devDependencies": {
53 | "html-webpack-plugin": "^2.19.0",
54 | "nodemon": "1.9.2"
55 |
56 | }
57 | }
58 |
--------------------------------------------------------------------------------
/scripts/emr/bootstrap-demo.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | for i in "$@"
4 | do
5 | case $i in
6 | --tsj=*)
7 | TILE_SERVER_JAR="${i#*=}"
8 | shift;;
9 | --site=*)
10 | SITE_TGZ="${i#*=}"
11 | shift;;
12 | --s3u=*)
13 | S3U="${i#*=}"
14 | shift;;
15 | --backend=*)
16 | BACKEND="${i#*=}"
17 | shift;;
18 | esac
19 | done
20 |
21 | set -x
22 |
23 | BACKEND=${BACKEND:-accumulo}
24 | SERVER_RUN_CMD="accumulo accumulo `hostname` root secret"
25 |
26 | case $BACKEND in
27 | "accumulo")
28 | shift;;
29 | "cassandra")
30 | SERVER_RUN_CMD="cassandra `hostname` `hostname`"
31 | shift;;
32 | "file")
33 | SERVER_RUN_CMD="local /tmp/catalog"
34 | shift;;
35 | "hadoop")
36 | SERVER_RUN_CMD="hdfs /catalog"
37 | shift;;
38 | "s3")
39 | SERVER_RUN_CMD="s3 key prefix"
40 | shift;;
41 | "hbase")
42 | SERVER_RUN_CMD="hbase `hostname` `hostname`"
43 | shift;;
44 | esac
45 | # Download Tile Server
46 | aws s3 cp $TILE_SERVER_JAR /tmp/tile-server.jar
47 | aws s3 cp $S3U/backend-profiles.json /tmp/backend-profiles.json
48 | aws s3 cp $S3U/input.json /tmp/input.json
49 | aws s3 cp $S3U/output.json /tmp/output.json
50 |
51 | echo "\
52 | description \"Landsat Demo Tile Server\"
53 | start on started hadoop-hdfs-namenode
54 | stop on stopping hadoop-hdfs-namenode
55 | respawn
56 | respawn limit unlimited
57 | exec spark-submit --master yarn-client \
58 | --driver-memory 5G --driver-cores 4 \
59 | --executor-cores 2 --executor-memory 5G \
60 | --conf spark.dynamicAllocation.enabled=true \
61 | /tmp/tile-server.jar $SERVER_RUN_CMD
62 | post-stop exec sleep 60
63 | " | sudo tee /etc/init/tile-server.conf
64 |
65 | # Start Static Web Server
66 | aws s3 cp $SITE_TGZ /tmp/site.tgz
67 | sudo chmod 644 /var/www/html/*
68 | sudo chmod 755 /var/www/html
69 | sudo tar -xzf /tmp/site.tgz -C /var/www/html
70 | sudo mkdir -p /tmp/catalog/attributes
71 | sudo mkdir -p /tmp/catalog-cache
72 | sudo chmod -R 777 /tmp/catalog
73 | sudo chmod -R 777 /tmp/catalog-cache
74 |
--------------------------------------------------------------------------------
/scripts/configurations.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "Classification": "spark",
4 | "Properties": {
5 | "maximizeResourceAllocation": "false",
6 | "spark.dynamicAllocation.enabled": "true"
7 | }
8 | },
9 | {
10 | "Classification": "hdfs-site",
11 | "Properties": {
12 | "dfs.replication": "1",
13 | "dfs.permissions": "false",
14 | "dfs.datanode.max.xcievers": "16384",
15 | "dfs.datanode.max.transfer.threads": "16384",
16 | "dfs.datanode.balance.max.concurrent.moves": "1000",
17 | "dfs.datanode.balance.bandwidthPerSec": "100000000"
18 | }
19 | },
20 | {
21 | "Classification": "yarn-site",
22 | "Properties": {
23 | "yarn.resourcemanager.am.max-attempts": "1"
24 | }
25 | },
26 | {
27 | "Classification": "hbase-site",
28 | "Properties": {
29 | "hbase.rootdir": "/hbase",
30 | "hbase.hregion.max.filesize": "16777216",
31 | "hbase.regionserver.handler.count": "30",
32 | "hbase.hregion.memstore.flush.size": "16777216",
33 | "hbase.zookeeper.property.dataDir": "/mnt/zookeeper",
34 | "hbase.cluster.distributed": "true",
35 | "hbase.client.scanner.caching": "100000",
36 | "zookeeper.session.timeout": "60000"
37 | }
38 | },
39 | {
40 | "Classification": "hadoop-env",
41 | "Configurations": [
42 | {
43 | "Classification": "export",
44 | "Properties": {
45 | "JAVA_HOME": "/usr/lib/jvm/java-1.8.0",
46 | "GDAL_DATA": "/usr/local/share/gdal",
47 | "LD_LIBRARY_PATH": "/usr/local/lib",
48 | "PYSPARK_PYTHON": "python27",
49 | "PYSPARK_DRIVER_PYTHON": "python27"
50 | }
51 | }
52 | ]
53 | },
54 | {
55 | "Classification": "spark-env",
56 | "Configurations": [
57 | {
58 | "Classification": "export",
59 | "Properties": {
60 | "JAVA_HOME": "/usr/lib/jvm/java-1.8.0",
61 | "GDAL_DATA": "/usr/local/share/gdal",
62 | "LD_LIBRARY_PATH": "/usr/local/lib",
63 | "PYSPARK_PYTHON": "python27",
64 | "PYSPARK_DRIVER_PYTHON": "python27"
65 | }
66 | }
67 | ]
68 | },
69 | {
70 | "Classification": "yarn-env",
71 | "Configurations": [
72 | {
73 | "Classification": "export",
74 | "Properties": {
75 | "JAVA_HOME": "/usr/lib/jvm/java-1.8.0",
76 | "GDAL_DATA": "/usr/local/share/gdal",
77 | "LD_LIBRARY_PATH": "/usr/local/lib",
78 | "PYSPARK_PYTHON": "python27",
79 | "PYSPARK_DRIVER_PYTHON": "python27"
80 | }
81 | }
82 | ]
83 | }
84 | ]
85 |
--------------------------------------------------------------------------------
/ingest/src/main/scala/demo/etl/landsat/EtlConfLandsatMethods.scala:
--------------------------------------------------------------------------------
1 | package demo.etl.landsat
2 |
3 | import geotrellis.spark.etl.config.EtlConf
4 | import com.azavea.landsatutil.IOHook
5 | import java.io.File
6 |
7 | trait EtlConfLandsatMethods {
8 | val self: EtlConf
9 |
10 | val help = """
11 | |geotrellis-etl-landsat-input
12 | |
13 | |Usage: geotrellis-etl-landsat-input [options]
14 | |
15 | | --bandsWanted
16 | | bandsWanted is a non-empty String property
17 | | --startDate
18 | | startDate is a non-empty String property
19 | | --endDate
20 | | endDate is a non-empty String property
21 | | --maxCloudCoverage
22 | | maxCloudCoverage is a non-empty String property
23 | | --bbox
24 | | bbox is a non-empty String property
25 | | --cache
26 | | cache is a non-empty String property
27 | | --limit
28 | | limit is a non-empty String property
29 | | --help
30 | | prints this usage text
31 | """.stripMargin
32 |
33 | def nextOption(map: Map[Symbol, String], list: Seq[String]): Map[Symbol, String] =
34 | list.toList match {
35 | case Nil => map
36 | case "--bandsWanted" :: value :: tail =>
37 | nextOption(map ++ Map('bandsWanted -> value), tail)
38 | case "--startDate" :: value :: tail =>
39 | nextOption(map ++ Map('startDate -> value), tail)
40 | case "--endDate" :: value :: tail =>
41 | nextOption(map ++ Map('endDate -> value), tail)
42 | case "--maxCloudCoverage" :: value :: tail =>
43 | nextOption(map ++ Map('maxCloudCoverage -> value), tail)
44 | case "--bbox" :: value :: tail =>
45 | nextOption(map ++ Map('bbox -> value), tail)
46 | case "--limit" :: value :: tail =>
47 | nextOption(map ++ Map('limit -> value), tail)
48 | case "--cache" :: value :: tail =>
49 | nextOption(map ++ Map('cache -> value), tail)
50 | case "--help" :: tail => {
51 | println(help)
52 | sys.exit(1)
53 | }
54 | case option :: tail => {
55 | println(s"Unknown option ${option} in landsat input string")
56 | println(help)
57 | sys.exit(1)
58 | }
59 | }
60 |
61 | def landsatInput = nextOption(Map(), getPath(self.input.backend).path.split(" ").toList)
62 |
63 | def cacheHook: IOHook = landsatInput.get('cache).map(new File(_)) match {
64 | case Some(dir) => IOHook.localCache(dir)
65 | case None => IOHook.passthrough
66 | }
67 |
68 | def bandsWanted = landsatInput('bandsWanted).split(",")
69 | }
70 |
--------------------------------------------------------------------------------
/scripts/emr/bootstrap-geowave.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | #
3 | # Bootstrap a GeoWave cluster node
4 | #
5 |
6 | # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
7 | #
8 | # Config Settings you might want to update
9 |
10 | # Accumulo
11 | USER=accumulo
12 | # NOTE: This password, the Accumulo instance secret and the geoserver password are left at
13 | # The default settings. The default EMR Security group setting only allows ssh/22 open to
14 | # external access so access to internal consoles and web UIs has to be done over SSH.
15 | # At some point in the future when this is revisited remember that nodes can be added to an
16 | # EMR at any point after creation so the password set during the initial spin-up would have
17 | # to be persisted somewhere and provided to the newly created nodes at some later date.
18 | USERPW=secret # TODO: Can't change until trace.password in accumulo-site.xml is updated
19 | ACCUMULO_VERSION=1.7.1
20 | ACCUMULO_TSERVER_OPTS=3GB
21 | INSTALL_DIR=/opt
22 | #ACCUMULO_DOWNLOAD_BASE_URL=https://archive.apache.org/dist/accumulo
23 | ACCUMULO_DOWNLOAD_BASE_URL=s3://geotrellis-test/accumulo
24 |
25 | # GeoWave
26 | GEOWAVE_REPO_RPM=geowave-repo-dev-1.0-3.noarch.rpm # TODO: Should have a prod->latest rpm
27 | GEOWAVE_VERSION='0.9.1'
28 | GEOSERVER_PORT='8000'
29 | GEOSERVER_MEMORY="-Xmx512m -XX:MaxPermSize=128m"
30 |
31 | # Java JAI and ImageIO URLS
32 | JAI_URL=http://data.opengeo.org/suite/jai/jai-1_1_3-lib-linux-amd64-jdk.bin
33 | IMAGEIO_URL=http://data.opengeo.org/suite/jai/jai_imageio-1_1-lib-linux-amd64-jdk.bin
34 | # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
35 |
36 | # Step #1: I've externalized commands into library functions for clarity, download and source
37 | if [ ! -f /tmp/geowave-install-lib.sh ]; then
38 | aws s3 cp s3://geotrellis-test/emr/geowave-install-lib.sh /tmp/geowave-install-lib.sh
39 | fi
40 | source /tmp/geowave-install-lib.sh
41 |
42 | # Step #2: The EMR customize hooks run _before_ everything else, so Hadoop is not yet ready
43 | THIS_SCRIPT="$(realpath "${BASH_SOURCE[0]}")"
44 | RUN_FLAG="${THIS_SCRIPT}.run"
45 | # On first boot skip past this script to allow EMR to set up the environment. Set a callback
46 | # which will poll for availability of HDFS and then install Accumulo and then GeoWave
47 | if [ ! -f "$RUN_FLAG" ]; then
48 | touch "$RUN_FLAG"
49 | TIMEOUT= is_master && TIMEOUT=3 || TIMEOUT=4
50 | echo "bash -x $(realpath "${BASH_SOURCE[0]}") > /tmp/geowave-install.log" | at now + $TIMEOUT min
51 | exit 0 # Bail and let EMR finish initializing
52 | fi
53 |
54 | # Step #3: Get Accumulo running
55 | os_tweaks && configure_zookeeper
56 | create_accumulo_user && install_accumulo && configure_accumulo
57 |
58 | # Step #4: Install imagery libs then GeoWave components on master node
59 | install_image_libs
60 | if is_master ; then
61 | install_geowave
62 | fi
63 |
64 | # Step #5: Optionally initialize all volumes
65 | if $INIT_VOLUMES ; then
66 | initialize_volumes
67 | fi
68 |
--------------------------------------------------------------------------------
/viewer/redux/reducer.js:
--------------------------------------------------------------------------------
1 | var reducer = function (state, action) {
2 | switch (action.type) {
3 | case 'SET_ANALYSIS_LAYER':
4 | return Object.assign({}, state, { analysisLayer: action.layer });
5 | case 'SET_NDI':
6 | return Object.assign({}, state, { ndi: action.ndi });
7 | case 'SET_LAYER_TYPE':
8 | return Object.assign({}, state, { layerType: action.layer });
9 | case 'REGISTER_TIME':
10 | var updatedTimes = state.times;
11 | updatedTimes[state.layerName][action.index] = action.time;
12 | return Object.assign({}, state, { times: updatedTimes });
13 | case 'SET_LAYERNAME':
14 | var delta = {
15 | layerName: action.name,
16 | times: {}
17 | };
18 | if (! state.times[action.name]) { delta.times[action.name] = []; }
19 | return Object.assign({}, state, delta);
20 | case 'SHOW_LAYER':
21 | return Object.assign({}, state, {
22 | map: {
23 | url: [action.url],
24 | activeLayerId: action.id
25 | }
26 | });
27 | case 'CENTER_MAP':
28 | return Object.assign({}, state, {
29 | map: { extent: action.extent }
30 | });
31 | case 'LOAD_CATALOG_SUCCESS': {
32 | // On new catalog, set layer to first in list; times to the corresponding times
33 | var layer = action.catalog.layers[0];
34 | var times = {};
35 | times[layer.name] = layer.times;
36 | return Object.assign({}, state, {
37 | rootUrl: action.url,
38 | catalog: action.catalog,
39 | layerName: layer.name,
40 | times: times // set this to be equal to times - values are updated later
41 | });
42 | }
43 | case 'SHOW_BOUNDS': {
44 | return _.merge({}, state, { map: { bounds: action.bounds } });
45 | }
46 | case 'SHOW_MAX_STATE': {
47 | console.log("SHOW_MAX_STATE");
48 | console.log(action.geojson);
49 | return _.merge({}, state, { map: { maxState: action.geojson } });
50 | }
51 | case 'HIDE_MAX_STATE': {
52 | console.log("HIDE_MAX_STATE");
53 | return _.merge({}, state, { map: { maxState: null } });
54 | }
55 | case 'SHOW_MAX_AVERAGE_STATE': {
56 | console.log("SHOW_MAX_AVERAGE_STATE");
57 | console.log(action.geojson);
58 | return _.merge({}, state, { map: { maxAverageState: action.geojson } });
59 | }
60 | case 'HIDE_MAX_AVERAGE_STATE': {
61 | console.log("HIDE_MAX_AVERAGE_STATE");
62 | return _.merge({}, state, { map: { maxAverageState: null } });
63 | }
64 | case 'SHOW_STATE_AVERAGE': {
65 | console.log("SHOW_STATE_AVERAGE");
66 | console.log(action.geojson);
67 | return _.merge({}, state, { map: { stateAverage: action.geojson } });
68 | }
69 | case 'SHOW_STATE_DIFF_AVERAGE': {
70 | console.log("SHOW_STATE_DIFF_AVERAGE");
71 | console.log(action.geojson);
72 | return _.merge({}, state, { map: { stateDiffAverage: action.geojson } });
73 | }
74 | default:
75 | return state;
76 | }
77 | };
78 |
79 | module.exports = reducer;
80 |
--------------------------------------------------------------------------------
/server/src/main/scala/demo/ReaderSet.scala:
--------------------------------------------------------------------------------
1 | package demo
2 |
3 | import geotrellis.proj4._
4 | import geotrellis.raster._
5 | import geotrellis.raster.resample._
6 | import geotrellis.spark._
7 | import geotrellis.spark.io._
8 | import geotrellis.spark.tiling._
9 |
10 | import java.time.ZonedDateTime
11 |
12 | trait ReaderSet {
13 | val layoutScheme = ZoomedLayoutScheme(WebMercator, 256)
14 | def attributeStore: AttributeStore
15 | def metadataReader: MetadataReader
16 | def layerReader: FilteringLayerReader[LayerId]
17 | def layerCReader: CollectionLayerReader[LayerId]
18 | def singleBandTileReader: TileReader[SpaceTimeKey, Tile]
19 | def multiBandTileReader: TileReader[SpaceTimeKey, MultibandTile]
20 |
21 | /** Do "overzooming", where we resample lower zoom level tiles to serve out higher zoom level tiles. */
22 | def readSinglebandTile(layer: String, zoom: Int, x: Int, y: Int, time: ZonedDateTime): Option[Tile] =
23 | try {
24 | val z = metadataReader.layerNamesToMaxZooms(layer)
25 |
26 | if(zoom > z) {
27 | val layerId = LayerId(layer, z)
28 |
29 | val meta = metadataReader.read(layerId)
30 | val rmd = meta.rasterMetaData
31 |
32 | val requestZoomMapTransform = layoutScheme.levelForZoom(zoom).layout.mapTransform
33 | val requestExtent = requestZoomMapTransform(x, y)
34 | val centerPoint = requestZoomMapTransform(x, y).center
35 | val SpatialKey(nx, ny) = rmd.mapTransform(centerPoint)
36 | val sourceExtent = rmd.mapTransform(nx, ny)
37 |
38 |
39 | val largerTile =
40 | singleBandTileReader.read(layerId, SpaceTimeKey(nx, ny, time))
41 |
42 | Some(largerTile.resample(sourceExtent, RasterExtent(requestExtent, 256, 256), Bilinear))
43 | } else {
44 | Some(singleBandTileReader.read(LayerId(layer, zoom), SpaceTimeKey(x, y, time)))
45 | }
46 | } catch {
47 | case e: ValueNotFoundError =>
48 | None
49 | }
50 |
51 | /** Do "overzooming", where we resample lower zoom level tiles to serve out higher zoom level tiles. */
52 | def readMultibandTile(layer: String, zoom: Int, x: Int, y: Int, time: ZonedDateTime): Option[MultibandTile] =
53 | try {
54 | val z = metadataReader.layerNamesToMaxZooms(layer)
55 |
56 | if(zoom > z) {
57 | val layerId = LayerId(layer, z)
58 |
59 | val meta = metadataReader.read(layerId)
60 | val rmd = meta.rasterMetaData
61 |
62 | val requestZoomMapTransform = layoutScheme.levelForZoom(zoom).layout.mapTransform
63 | val requestExtent = requestZoomMapTransform(x, y)
64 | val centerPoint = requestZoomMapTransform(x, y).center
65 | val SpatialKey(nx, ny) = rmd.mapTransform(centerPoint)
66 | val sourceExtent = rmd.mapTransform(nx, ny)
67 |
68 |
69 | val largerTile =
70 | multiBandTileReader.read(layerId, SpaceTimeKey(nx, ny, time))
71 |
72 | Some(largerTile.resample(sourceExtent, RasterExtent(requestExtent, 256, 256), Bilinear))
73 | } else {
74 | Some(multiBandTileReader.read(LayerId(layer, zoom), SpaceTimeKey(x, y, time)))
75 | }
76 | } catch {
77 | case e: ValueNotFoundError =>
78 | None
79 | }
80 | }
81 |
--------------------------------------------------------------------------------
/viewer/components/charts/TimeSeries.js:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import { render } from 'react-dom';
3 | import fetch from 'isomorphic-fetch';
4 | import shortid from 'shortid';
5 | import _ from 'lodash';
6 | import $ from 'jquery';
7 | import Loader from 'react-loader';
8 | import MG from 'metrics-graphics';
9 | import "metrics-graphics/dist/metricsgraphics.css";
10 |
11 | function round(num) { return +(Math.round(num + "e+2") + "e-2"); }
12 |
13 | var TimeSeries = React.createClass({
14 | getInitialState: function () {
15 | return { loaded: false };
16 | },
17 | _fetchTimeSeries: function(pointLayer, ndi) {
18 | let root = pointLayer.chartProps.rootURL;
19 | let layerName = pointLayer.chartProps.layerName;
20 | let latlng = pointLayer._latlng;
21 | let url = `${root}/series/${layerName}/${ndi}?lat=${latlng.lat}&lng=${latlng.lng}`;
22 |
23 | return fetch(url).then( response => {
24 | response.json().then( summary => {
25 | var data = _.chain(summary.answer)
26 | .map(function(d) { return { "date": new Date(d[0]), "value": d[1] }; })
27 | .filter(function(d) { return _.isNull(d.value) ? false : true; })
28 | .value();
29 |
30 | pointLayer.stats[ndi] = data;
31 | this.setState({ loaded: true });
32 | this._renderChart(pointLayer, ndi);
33 | });
34 | },
35 | error => {});
36 | },
37 | _renderChart: function(point, ndi) {
38 | if (_.isEmpty(point.stats[ndi])) {
39 | MG.data_graphic({
40 | target: document.getElementById(this.domId),
41 | missing_text: "No data available for the current point",
42 | chart_type: 'missing-data',
43 | full_width: true,
44 | height: this.props.height || 200,
45 | right: this.props.rightOffset || 40
46 | });
47 | } else {
48 | MG.data_graphic({
49 | target: document.getElementById(this.domId),
50 | data: point.stats[ndi],
51 | title: (ndi === 'ndvi' ? 'NDVI' : 'NDWI') + ` values at ${round(point._latlng.lat) + ', ' + round(point._latlng.lng) }`,
52 | full_width: true,
53 | height: (this.props.height || 200),
54 | right: (this.props.rightOffset || 40),
55 | min_y: -1.0,
56 | max_y: 1.0,
57 | x_accessor: this.props.xAccessor || 'date',
58 | y_accessor: this.props.yAccessor || 'value',
59 | animate_on_load: true,
60 | color: (ndi === 'ndvi' ? '#64c59d' : '#add8e6')
61 | });
62 | }
63 | },
64 | componentDidMount: function() {
65 | if (! this.props.point.stats[this.props.ndi]) {
66 | this.setState({ loaded: false });
67 | this._fetchTimeSeries(this.props.point, this.props.ndi);
68 | } else {
69 | this.setState({ loaded: true });
70 | this._renderChart(this.props.point, this.props.ndi);
71 | }
72 | },
73 | componentWillReceiveProps: function(nextProps) {
74 | if (! nextProps.point.stats[nextProps.ndi]) {
75 | this.setState({ loaded: false });
76 | this._fetchTimeSeries(nextProps.point, nextProps.ndi);
77 | } else if (this.state.loaded) {
78 | this._renderChart(nextProps.point, nextProps.ndi);
79 | }
80 | },
81 | render: function() {
82 | let loading = this.state.loaded ? null : (Loading data...
)
83 | if (! this.domId) { this.domId = shortid.generate(); }
84 |
85 | return (
86 |
90 | );
91 | }
92 | });
93 |
94 | module.exports = TimeSeries;
95 |
--------------------------------------------------------------------------------
/server/src/main/scala/demo/Main.scala:
--------------------------------------------------------------------------------
1 | package demo
2 |
3 | import geotrellis.spark.io.accumulo._
4 | import geotrellis.spark.io.cassandra._
5 | import geotrellis.spark.io.hbase._
6 |
7 | import org.apache.spark._
8 | import org.apache.accumulo.core.client.security.tokens._
9 | import akka.actor._
10 | import akka.io.IO
11 |
12 | import java.time.format.DateTimeFormatter
13 |
14 | import akka.actor.ActorSystem
15 | import akka.event.Logging
16 | import akka.http.scaladsl.Http
17 | import akka.stream.ActorMaterializer
18 |
19 | object AkkaSystem {
20 | implicit val system = ActorSystem("iaas-system")
21 | implicit val materializer = ActorMaterializer()
22 |
23 | trait LoggerExecutor {
24 | protected implicit val log = Logging(system, "app")
25 | }
26 | }
27 |
28 | object Main {
29 | def main(args: Array[String]): Unit = {
30 | import AkkaSystem._
31 |
32 | val conf: SparkConf =
33 | new SparkConf()
34 | .setIfMissing("spark.master", "local[*]")
35 | .setAppName("Demo Server")
36 | .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
37 | .set("spark.kryo.registrator", "geotrellis.spark.io.kryo.KryoRegistrator")
38 |
39 | implicit val sc = new SparkContext(conf)
40 |
41 | val readerSet =
42 | if(args(0) == "local") {
43 | val localCatalog = args(1)
44 |
45 | new FileReaderSet(localCatalog)
46 | } else if(args(0) == "hdfs"){
47 | val path = new org.apache.hadoop.fs.Path(args(1))
48 |
49 | new HadoopReaderSet(path)
50 | } else if(args(0) == "s3"){
51 | val bucket = args(1)
52 | val prefix = args(2)
53 |
54 | new S3ReaderSet(bucket, prefix)
55 | } else if(args(0) == "accumulo") {
56 | val instanceName = args(1)
57 | val zooKeeper = args(2)
58 | val user = args(3)
59 | val password = new PasswordToken(args(4))
60 | val instance = AccumuloInstance(instanceName, zooKeeper, user, password)
61 |
62 | new AccumuloReaderSet(instance)
63 | } else if(args(0) == "cassandra") {
64 | val zooKeeper = args(1).split(",")
65 | val master = args(2)
66 | val instance = BaseCassandraInstance(zooKeeper, master)
67 |
68 | new CassandraReaderSet(instance)
69 | } else if(args(0) == "hbase") {
70 | val zooKeepers = args(1).split(",").toSeq
71 | val master = args(2)
72 | val instance = HBaseInstance(zooKeepers, master)
73 |
74 | new HBaseReaderSet(instance)
75 | } else {
76 | sys.error(s"Unknown catalog type ${args(0)}")
77 | }
78 |
79 | val router = new Router(readerSet, sc)
80 | Http().bindAndHandle(router.routes, "0.0.0.0", 8899)
81 | }
82 | }
83 |
84 |
85 | // object Main {
86 | // val dateTimeFormat = DateTimeFormatter.ofPattern("yyyy-MM-dd'T'HH:mm:ssZ")
87 |
88 | // /** Usage:
89 | // * First argument is catalog type. Others are dependant on the first argument.
90 | // *
91 | // * local CATALOG_DIR
92 | // * s3 BUCKET_NAME CATALOG_KEY
93 | // * accumulo INSTANCE ZOOKEEPER USER PASSWORD
94 | // */
95 | // def main(args: Array[String]): Unit = {
96 |
97 | // // create and start our service actor
98 | // val service =
99 | // system.actorOf(Props(classOf[DemoServiceActor], readerSet, sc), "demo")
100 |
101 | // // start a new HTTP server on port 8899 with our service actor as the handler
102 | // IO(Http) ! Http.Bind(service, "0.0.0.0", 8899)
103 | // }
104 | // }
105 |
--------------------------------------------------------------------------------
/server/src/main/scala/demo/Render.scala:
--------------------------------------------------------------------------------
1 | package demo
2 |
3 | import geotrellis.raster._
4 | import geotrellis.raster.render._
5 |
6 | object Render {
7 | val ndviColorBreaks =
8 | ColorMap.fromStringDouble("0.05:ffffe5aa;0.1:f7fcb9ff;0.2:d9f0a3ff;0.3:addd8eff;0.4:78c679ff;0.5:41ab5dff;0.6:238443ff;0.7:006837ff;1:004529ff").get
9 |
10 | val ndwiColorBreaks =
11 | ColorMap.fromStringDouble("0:aacdff44;0.1:70abffff;0.2:3086ffff;0.3:1269e2ff;0.4:094aa5ff;1:012c69ff").get
12 |
13 | val ndviDiffColorBreaks =
14 | ColorMap.fromStringDouble("-0.6:FF4040FF;-0.5:FF5353FF;-0.4:FF6666FF;-0.3:FF7979FF;-0.2:FF8C8CFF;-0.1:FF9F9FFF;0:709AB244;0.1:81D3BBFF;0.2:67CAAEFF;0.3:4EC2A0FF;0.4:35B993FF;0.5:1CB085FF;0.6:03A878FF").get
15 |
16 | val waterDiffColorBreaks =
17 | ColorMap.fromStringDouble("0.2:aacdff44;0.3:1269e2ff;0.4:094aa5ff;1:012c69ff").get
18 |
19 | def image(tile: MultibandTile): Png = {
20 | val (red, green, blue) =
21 | if(tile.cellType == UShortCellType) {
22 | // Landsat
23 |
24 | // magic numbers. Fiddled with until visually it looked ok. ¯\_(ツ)_/¯
25 | val (min, max) = (4000, 15176)
26 |
27 | def clamp(z: Int) = {
28 | if(isData(z)) { if(z > max) { max } else if(z < min) { min } else { z } }
29 | else { z }
30 | }
31 | val red = tile.band(0).convert(IntCellType).map(clamp _).normalize(min, max, 0, 255)
32 | val green = tile.band(1).convert(IntCellType).map(clamp _).normalize(min, max, 0, 255)
33 | val blue = tile.band(2).convert(IntCellType).map(clamp _).normalize(min, max, 0, 255)
34 |
35 | (red, green, blue)
36 | } else {
37 | // Planet Labs
38 | (tile.band(0).combine(tile.band(3)) { (z, m) => if(m == 0) 0 else z },
39 | tile.band(1).combine(tile.band(3)) { (z, m) => if(m == 0) 0 else z },
40 | tile.band(2).combine(tile.band(3)) { (z, m) => if(m == 0) 0 else z })
41 | }
42 |
43 |
44 | def clampColor(c: Int): Int =
45 | if(isNoData(c)) { c }
46 | else {
47 | if(c < 0) { 0 }
48 | else if(c > 255) { 255 }
49 | else c
50 | }
51 |
52 | // -255 to 255
53 | val brightness = 15
54 | def brightnessCorrect(v: Int): Int =
55 | if(v > 0) { v + brightness }
56 | else { v }
57 |
58 | // 0.01 to 7.99
59 | val gamma = 0.8
60 | val gammaCorrection = 1 / gamma
61 | def gammaCorrect(v: Int): Int =
62 | (255 * math.pow(v / 255.0, gammaCorrection)).toInt
63 |
64 | // -255 to 255
65 | val contrast: Double = 30.0
66 | val contrastFactor = (259 * (contrast + 255)) / (255 * (259 - contrast))
67 | def contrastCorrect(v: Int): Int =
68 | ((contrastFactor * (v - 128)) + 128).toInt
69 |
70 | def adjust(c: Int): Int = {
71 | if(isData(c)) {
72 | var cc = c
73 | cc = clampColor(brightnessCorrect(cc))
74 | cc = clampColor(gammaCorrect(cc))
75 | cc = clampColor(contrastCorrect(cc))
76 | cc
77 | } else {
78 | c
79 | }
80 | }
81 |
82 | val adjRed = red.map(adjust _)
83 | val adjGreen = green.map(adjust _)
84 | val adjBlue = blue.map(adjust _)
85 |
86 | ArrayMultibandTile(adjRed, adjGreen, adjBlue).renderPng
87 | }
88 |
89 | def ndvi(tile: MultibandTile): Png =
90 | NDVI(tile).renderPng(ndviColorBreaks)
91 |
92 | def ndvi(tile1: MultibandTile, tile2: MultibandTile): Png =
93 | (NDVI(tile1) - NDVI(tile2)).renderPng(ndviDiffColorBreaks)
94 |
95 | def ndwi(tile: MultibandTile): Png =
96 | NDWI(tile).renderPng(ndwiColorBreaks)
97 |
98 | def ndwi(tile1: MultibandTile, tile2: MultibandTile): Png =
99 | (NDWI(tile1) - NDWI(tile2)).renderPng(waterDiffColorBreaks)
100 | }
101 |
--------------------------------------------------------------------------------
/viewer/containers/App.js:
--------------------------------------------------------------------------------
1 | import React, { Component, PropTypes } from 'react';
2 | import { bindActionCreators } from 'redux';
3 | import { connect } from 'react-redux';
4 | import * as actions from '../redux/actions';
5 | import Leaflet from '../components/Leaflet';
6 | import Catalog from '../components/Catalog';
7 | import Panels from '../components/Panels';
8 | import _ from 'lodash';
9 |
10 | import "bootstrap-webpack";
11 |
12 | var App = React.createClass({
13 |
14 | render: function() {
15 | return (
16 |
17 |
18 |
37 |
38 |
39 |
40 |
41 |
this.props.actions.fetchCatalog(url)} />
45 |
69 |
70 |
71 |
72 | );
73 | }
74 | });
75 |
76 | var mapStateToProps = function (state) {
77 | return state;
78 | };
79 |
80 | var mapDispatchToProps = function (dispatch) {
81 | return { // binding actions triggers dispatch on call
82 | actions: bindActionCreators(actions, dispatch)
83 | };
84 | };
85 |
86 | module.exports = connect(mapStateToProps, mapDispatchToProps)(App);
87 |
--------------------------------------------------------------------------------
/ingest/src/main/scala/demo/LandsatIngest.scala:
--------------------------------------------------------------------------------
1 | package demo
2 |
3 | import demo.etl.landsat.{LandsatModule, TemporalMultibandLandsatInput}
4 | import geotrellis.vector.io._
5 | import geotrellis.raster._
6 | import geotrellis.spark._
7 | import geotrellis.spark.io._
8 | import geotrellis.spark.etl.{Etl, OutputPlugin}
9 | import geotrellis.spark.etl.config.EtlConf
10 | import geotrellis.spark.util._
11 | import geotrellis.spark.pyramid._
12 |
13 | import com.typesafe.scalalogging.LazyLogging
14 | import spray.json.DefaultJsonProtocol._
15 | import org.apache.spark._
16 | import org.apache.spark.rdd._
17 |
18 | object LandsatIngest extends LazyLogging {
19 |
20 | /** Accept a list of landsat image descriptors we will ingest into a geotrellis layer.
21 | * It is expected that the user will generate this list using `Landsat8Query` and prefilter.
22 | */
23 | def run(
24 | conf: EtlConf,
25 | reprojected: RDD[(TemporalProjectedExtent, MultibandTile)],
26 | inputPlugin: TemporalMultibandLandsatInput,
27 | writer: Writer[LayerId, RDD[(SpaceTimeKey, MultibandTile)] with Metadata[TileLayerMetadata[SpaceTimeKey]]],
28 | attributeStore: AttributeStore
29 | )(implicit sc: SparkContext): Unit = {
30 | // Our dataset can span UTM zones, we must reproject the tiles individually to common projection
31 | val output = conf.output
32 | val maxZoom = output.maxZoom.getOrElse(13) // We know this ahead of time based on Landsat resolution
33 | val layerName = conf.input.name
34 | val destCRS = output.getCrs.get
35 | val resampleMethod = output.resampleMethod
36 | val layoutScheme = output.getLayoutScheme
37 | val tileLayerMetadata = inputPlugin.calculateTileLayerMetadata(maxZoom, destCRS)
38 | logger.info(s"TileLayerMetadata calculated: $tileLayerMetadata")
39 | val tiledRdd = reprojected.tileToLayout(tileLayerMetadata, resampleMethod)
40 | val rdd = new ContextRDD(tiledRdd, tileLayerMetadata)
41 |
42 | Pyramid.upLevels(rdd, layoutScheme, maxZoom, 1, resampleMethod){ (rdd, zoom) =>
43 | writer.write(LayerId(layerName, zoom), rdd)
44 |
45 | if (zoom == 1) {
46 | // Store attributes common across zooms for catalog to see
47 | val id = LayerId(layerName, 0)
48 | attributeStore.write(id, "times",
49 | rdd
50 | .map(_._1.instant)
51 | .countByValue
52 | .keys.toArray
53 | .sorted)
54 | attributeStore.write(id, "extent",
55 | (rdd.metadata.extent, rdd.metadata.crs))
56 | }
57 | }
58 | }
59 | }
60 |
61 | object LandsatIngestMain extends LazyLogging {
62 | def main(args: Array[String]): Unit = {
63 | logger.info(s"Arguments: ${args.toSeq}")
64 | implicit val sc = SparkUtils.createSparkContext("GeoTrellis Landsat Ingest", new SparkConf(true))
65 | EtlConf(args) foreach { conf =>
66 | val uconf = demo.etl.landsat.confWithDefaults(conf)
67 | val etl = Etl(uconf, Etl.defaultModules :+ LandsatModule)
68 | val inputPlugin = new TemporalMultibandLandsatInput()
69 | val sourceTiles = inputPlugin(uconf)
70 |
71 | val outputPlugin = etl.combinedModule
72 | .findSubclassOf[OutputPlugin[SpaceTimeKey, MultibandTile, TileLayerMetadata[SpaceTimeKey]]]
73 | .find { _.suitableFor(uconf.output.backend.`type`.name) }
74 | .getOrElse(sys.error(s"Unable to find output module of type '${uconf.output.backend.`type`}'"))
75 |
76 | /* TODO if the layer exists the ingest will fail, we need to use layer updater*/
77 | LandsatIngest.run(
78 | conf = uconf,
79 | reprojected = sourceTiles,
80 | inputPlugin = inputPlugin,
81 | writer = outputPlugin.writer(uconf),
82 | attributeStore = outputPlugin.attributes(uconf))
83 | }
84 |
85 | sc.stop()
86 | }
87 | }
88 |
--------------------------------------------------------------------------------
/ingest/src/main/scala/demo/etl/landsat/LandsatInput.scala:
--------------------------------------------------------------------------------
1 | package demo.etl.landsat
2 |
3 | import geotrellis.proj4.{CRS, LatLng, WebMercator}
4 | import geotrellis.raster.split.Split
5 | import geotrellis.raster._
6 | import geotrellis.spark._
7 | import geotrellis.spark.etl.InputPlugin
8 | import geotrellis.spark.etl.config.EtlConf
9 | import geotrellis.spark.tiling._
10 |
11 | import com.azavea.landsatutil.LandsatImage
12 | import org.apache.spark.SparkContext
13 | import org.apache.spark.rdd.RDD
14 | import jp.ne.opt.chronoscala.Imports._
15 |
16 | import scala.util.Try
17 |
18 | abstract class LandsatInput[I, V] extends InputPlugin[I, V] {
19 | val name = "landsat"
20 |
21 | var images: Seq[LandsatImage] = Seq()
22 |
23 | def fetchMethod: (LandsatImage, EtlConf) => Option[ProjectedRaster[MultibandTile]] = { (img, conf) =>
24 | Try { img.getRasterFromS3(bandsWanted = conf.bandsWanted, hook = conf.cacheHook) }
25 | .recover{ case err => img.getFromGoogle(bandsWanted = conf.bandsWanted, hook = conf.cacheHook).raster }
26 | .toOption
27 | }
28 |
29 | /** Calculate the layer metadata for the incoming landsat images
30 | *
31 | * Normally we would have no information about the incoming rasters and we be forced
32 | * to use [[TileLayerMetadata.fromRdd]] to collect it before we could tile the imagery.
33 | * But in this case the pre-query from scala-landsatutil is providing enough
34 | * information that the metadata can be calculated.
35 | *
36 | * Collecting metadata before tiling step requires either reading the data twice
37 | * or caching records in spark memory. In either case avoiding collection is a performance boost.
38 | */
39 |
40 | def calculateTileLayerMetadata(maxZoom: Int = 13, destCRS: CRS = WebMercator) = {
41 | val layoutDefinition = ZoomedLayoutScheme.layoutForZoom(maxZoom, destCRS.worldExtent, 256)
42 | val imageExtent = images.map(_.footprint.envelope).reduce(_ combine _).reproject(LatLng, destCRS)
43 | val dateMin = images.map(_.aquisitionDate).min
44 | val dateMax = images.map(_.aquisitionDate).max
45 | val GridBounds(colMin, rowMin, colMax, rowMax) = layoutDefinition.mapTransform(imageExtent)
46 | TileLayerMetadata(
47 | cellType = UShortCellType,
48 | layout = layoutDefinition,
49 | extent = imageExtent,
50 | crs = destCRS,
51 | bounds = KeyBounds(
52 | SpaceTimeKey(colMin, rowMin, dateMin),
53 | SpaceTimeKey(colMax, rowMax, dateMax))
54 | )
55 | }
56 |
57 | /** Transforms a collection of Landsat image descriptions into RDD of MultibandTiles.
58 | * Each landsat scene is downloaded, reprojected and then split into 256x256 chunks.
59 | * Chunking the scene allows for greater parallism and reduces memory pressure
60 | * produces by processing each partition.
61 | */
62 | def fetch(
63 | conf: EtlConf,
64 | images: Seq[LandsatImage],
65 | source: (LandsatImage, EtlConf) => Option[ProjectedRaster[MultibandTile]]
66 | )(implicit sc: SparkContext): RDD[(TemporalProjectedExtent, MultibandTile)] = {
67 | sc.parallelize(images, images.length) // each image gets its own partition
68 | .mapPartitions({ iter =>
69 | for {
70 | img <- iter
71 | ProjectedRaster(raster, crs) <- source(img, conf).toList
72 | reprojected = raster.reproject(crs, WebMercator) // reprojection before chunking avoids NoData artifacts
73 | layoutCols = math.ceil(reprojected.cols.toDouble / 256).toInt
74 | layoutRows = math.ceil(reprojected.rows.toDouble / 256).toInt
75 | chunk <- reprojected.split(TileLayout(layoutCols, layoutRows, 256, 256), Split.Options(cropped = false, extend = false))
76 | } yield {
77 | TemporalProjectedExtent(chunk.extent, WebMercator, img.aquisitionDate) -> chunk.tile
78 | }
79 | }, preservesPartitioning = true)
80 | .repartition(images.length * 16) // Break up each scene into 16 partitions
81 | }
82 | }
--------------------------------------------------------------------------------
/ingest/src/main/scala/demo/etl/landsat/package.scala:
--------------------------------------------------------------------------------
1 | package demo.etl
2 |
3 | import geotrellis.spark.etl._
4 | import geotrellis.spark.etl.config._
5 | import geotrellis.spark.io.AttributeStore
6 | import geotrellis.spark.io.hadoop._
7 | import geotrellis.spark.io.accumulo.AccumuloAttributeStore
8 | import geotrellis.spark.io.hbase.HBaseAttributeStore
9 | import geotrellis.spark.io.cassandra.CassandraAttributeStore
10 | import geotrellis.spark.io.hadoop.HadoopAttributeStore
11 | import geotrellis.spark.io.s3.S3AttributeStore
12 |
13 | import org.apache.hadoop.conf.Configuration
14 | import org.apache.spark.SparkConf
15 | import org.apache.spark.deploy.SparkHadoopUtil
16 |
17 | import java.net.URI
18 |
19 | package object landsat {
20 | implicit class withEtlConfLandsatMethods(val self: EtlConf) extends EtlConfLandsatMethods
21 |
22 | private[landsat] def getAttributeStore(conf: EtlConf): AttributeStore = {
23 | conf.output.backend.`type` match {
24 | case AccumuloType => {
25 | AccumuloAttributeStore(conf.outputProfile.collect { case ap: AccumuloProfile =>
26 | ap.getInstance
27 | }.get.connector)
28 | }
29 | case HBaseType => {
30 | HBaseAttributeStore(conf.outputProfile.collect { case ap: HBaseProfile =>
31 | ap.getInstance
32 | }.get)
33 | }
34 | case CassandraType => {
35 | CassandraAttributeStore(conf.outputProfile.collect { case cp: CassandraProfile =>
36 | cp.getInstance
37 | }.get)
38 | }
39 | case HadoopType | FileType =>
40 | HadoopAttributeStore(hadoop.getPath(conf.output.backend).path, SparkHadoopUtil.get.newConfiguration(new SparkConf()))
41 | case S3Type => {
42 | val path = s3.getPath(conf.output.backend)
43 | S3AttributeStore(path.bucket, path.prefix)
44 | }
45 | case UserDefinedBackendType(s) => throw new Exception(s"No Attribute store for user defined backend type $s")
46 | case UserDefinedBackendInputType(s) => throw new Exception(s"No Attribute store for user defined backend input type $s")
47 | }
48 | }
49 |
50 | def confWithDefaults(conf: EtlConf) = {
51 | def getDefaultFS = {
52 | val conf = new Configuration // if not specified assume zookeeper is same as DFS master
53 | new URI(conf.get("fs.defaultFS")).getHost
54 | }
55 |
56 | conf.output.backend.`type` match {
57 | case AccumuloType =>
58 | new EtlConf(
59 | input = conf.input,
60 | output = conf.output.copy(
61 | backend = conf.output.backend.copy(
62 | profile = conf.output.backend.profile.map {
63 | case ap: AccumuloProfile => if(ap.zookeepers.isEmpty) ap.copy(zookeepers = getDefaultFS) else ap
64 | case p => p
65 | }
66 | )
67 | )
68 | )
69 | case CassandraType =>
70 | new EtlConf(
71 | input = conf.input,
72 | output = conf.output.copy(
73 | backend = conf.output.backend.copy(
74 | profile = conf.output.backend.profile.map {
75 | case ap: CassandraProfile => if(ap.hosts.isEmpty) ap.copy(hosts = getDefaultFS) else ap
76 | case p => p
77 | }
78 | )
79 | )
80 | )
81 | case HBaseType =>
82 | new EtlConf(
83 | input = conf.input,
84 | output = conf.output.copy(
85 | backend = conf.output.backend.copy(
86 | profile = conf.output.backend.profile.map {
87 | case ap: HBaseProfile => {
88 | val nap = if (ap.zookeepers.isEmpty) ap.copy(zookeepers = getDefaultFS) else ap
89 | if(ap.master.isEmpty) nap.copy(master = getDefaultFS) else nap
90 | }
91 | case p => p
92 | }
93 | )
94 | )
95 | )
96 | case _ => conf
97 | }
98 | }
99 |
100 | def getPath(b: Backend): UserDefinedPath = {
101 | b.path match {
102 | case p: UserDefinedPath => p
103 | case _ => throw new Exception("Path string not corresponds backend type")
104 | }
105 | }
106 | }
107 |
--------------------------------------------------------------------------------
/viewer/components/charts/IndexComparison.js:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import { render } from 'react-dom';
3 | import shortid from 'shortid';
4 | import _ from 'lodash';
5 | import Loader from 'react-loader';
6 |
7 | var IndexComparison = React.createClass({
8 | getInitialState: function () {
9 | return { loaded: false };
10 | },
11 | _fetchPolygonalSummary: function(polyLayer, ndi, times, layerType) {
12 | let root = polyLayer.chartProps.rootURL;
13 | let layerName = polyLayer.chartProps.layerName;
14 | let latlng = polyLayer._latlng;
15 | let timeQString = `?time=${this.props.times[0]}`;
16 | let otherTimeQString = (layerType == 'intraLayerDiff' ? `&otherTime=${this.props.times[1]}` : '');
17 | let url = `${root}/mean/${layerName}/${ndi}` + timeQString + otherTimeQString;
18 |
19 | return fetch(url, {
20 | method: 'POST',
21 | body: JSON.stringify(polyLayer.toGeoJSON().geometry)
22 | }).then( response => {
23 | response.json().then( summary => {
24 | var data = summary.answer;
25 |
26 | if (layerType == 'intraLayerDiff') {
27 | polyLayer.comparisonStats[ndi] = data;
28 | } else {
29 | polyLayer.stats[ndi] = data;
30 | }
31 | this.setState({ loaded: true });
32 | this._renderChart(polyLayer, ndi, layerType);
33 | });
34 | },
35 | error => {});
36 | },
37 | _fillBox: function(ctx, value, ndi) {
38 | let color = ndi === 'ndvi' ? '#64c59d' : '#add8e6';
39 | ctx.fillStyle = color;
40 | ctx.fillRect(
41 | (value > 0 ? 150 : 150 + (value * 150)),
42 | 50,
43 | Math.abs(value) * 150,
44 | 130
45 | );
46 | },
47 | _renderChart: function(polyLayer, ndi, layerType) {
48 | let ctx = document.getElementById("canvas").getContext('2d');
49 | let canvas = {
50 | width: 300,
51 | height: 200
52 | };
53 | ctx.clearRect(0, 0, canvas.width, canvas.height);
54 | if (layerType == 'intraLayerDiff') {
55 | this._fillBox(ctx, polyLayer.comparisonStats[ndi], ndi);
56 | } else {
57 | this._fillBox(ctx, polyLayer.stats[ndi], ndi);
58 | }
59 | ctx.fillStyle = '#000000';
60 | ctx.font = '15px Arial';
61 |
62 | // Index bottom
63 | ctx.textAlign = 'start';
64 | ctx.fillText('-1', 5, 20);
65 | ctx.beginPath();
66 | ctx.moveTo(0, 40);
67 | ctx.lineTo(0, canvas.height);
68 | ctx.stroke();
69 |
70 | // Index middle
71 | ctx.textAlign = 'center';
72 | ctx.fillText('0', 150, 20);
73 | ctx.beginPath();
74 | ctx.moveTo(150, 40);
75 | ctx.lineTo(150, canvas.height);
76 | ctx.stroke();
77 |
78 | // Index top
79 | ctx.textAlign = 'right';
80 | ctx.fillText('1', 295, 20);
81 | ctx.beginPath();
82 | ctx.moveTo(300, 40);
83 | ctx.lineTo(300, canvas.height);
84 | ctx.stroke();
85 | },
86 | componentDidMount: function() {
87 | if (this.props.layerType === 'intraLayerDiff') {
88 | if (! this.props.poly.comparisonStats[this.props.ndi]) {
89 | this.setState({ loaded: false });
90 | this._fetchPolygonalSummary(this.props.poly, this.props.ndi, this.props.times, this.props.layerType);
91 | } else {
92 | this.setState({ loaded: true });
93 | this._renderChart(this.props.poly, this.props.ndi, this.props.layerType);
94 | }
95 | } else {
96 | if (! this.props.poly.stats[this.props.ndi]) {
97 | this.setState({ loaded: false });
98 | this._fetchPolygonalSummary(this.props.poly, this.props.ndi, this.props.times, this.props.layerType);
99 | } else {
100 | this.setState({ loaded: true });
101 | this._renderChart(this.props.poly, this.props.ndi, this.props.layerType);
102 | }
103 | }
104 | },
105 | componentWillReceiveProps: function(nextProps) {
106 | if (nextProps.layerType === 'intraLayerDiff') {
107 | if (! nextProps.poly.comparisonStats[nextProps.ndi]) {
108 | this.setState({ loaded: false });
109 | this._fetchPolygonalSummary(nextProps.poly, nextProps.ndi, nextProps.times, nextProps.layerType);
110 | } else if (this.state.loaded) {
111 | this._renderChart(nextProps.poly, nextProps.ndi, nextProps.layerType);
112 | }
113 | } else {
114 | if (! nextProps.poly.stats[nextProps.ndi]) {
115 | this.setState({ loaded: false });
116 | this._fetchPolygonalSummary(nextProps.poly, nextProps.ndi, nextProps.times, nextProps.layerType);
117 | } else if (this.state.loaded) {
118 | this._renderChart(nextProps.poly, nextProps.ndi, nextProps.layerType);
119 | }
120 | }
121 | },
122 | render: function() {
123 | let loading = this.state.loaded ? null : (Loading data...
)
124 | return (
125 |
126 | {loading}
127 |
128 |
129 | );
130 | }
131 | });
132 |
133 | module.exports = IndexComparison;
134 |
--------------------------------------------------------------------------------
/viewer/components/SingleLayer.js:
--------------------------------------------------------------------------------
1 | "use strict";
2 | import React from 'react';
3 | import _ from 'lodash';
4 | import { PanelGroup, Panel, Input, Button, ButtonGroup } from 'react-bootstrap';
5 | import ifAllDefined from "../utils/utils";
6 |
7 | function updateSingleLayerMap (showLayerWithBreaks, showLayer, root, op, layer, t1) {
8 | // Single Band Calculation
9 | let time1 = layer.times[t1];
10 | if(layer.isLandsat) {
11 | let opc = (op != "none") ? `&operation=${op}` : "";
12 | showLayer(`${root}/tiles/${layer.name}/{z}/{x}/{y}?time=${time1}${opc}`);
13 | } else {
14 | showLayerWithBreaks(
15 | `${root}/tiles/${layer.name}/{z}/{x}/{y}?time=${time1}`,
16 | `${root}/tiles/breaks/${layer.name}?time=${time1}`
17 | );
18 | }
19 | };
20 |
21 | var SingleLayer = React.createClass({
22 | getInitialState: function () {
23 | return {
24 | operation: "none",
25 | layerId: undefined, // layer index
26 | timeId: undefined, // time index in layer
27 | times: {} // maps from layerId => {timeId1 , timeId2}
28 | };
29 | },
30 | handleTimeSelect: function(ev, currentLayer) {
31 | this.updateState("timeId", +ev.target.value);
32 | this.props.registerTime(currentLayer.times[ev.target.value], 0);
33 | },
34 | handleLayerSelect: function(ev) {
35 | let layerId = +ev.target.value;
36 | let newState = _.merge({}, this.state, {
37 | "layerId": layerId,
38 | "time": _.get(this.state.times[layerId], "time", undefined),
39 | "times": { // Saves time selection when switching layer
40 | [this.state.layerId]: {
41 | "time": this.state.time
42 | }
43 | }
44 | });
45 |
46 | this.setState(newState);
47 | this.props.setLayerName(this.props.layers[layerId])
48 | this.props.registerTime(this.state.time, 0)
49 | this.updateMap(newState);
50 | this.props.showExtent(this.props.layers[layerId].extent);
51 | },
52 | updateState: function(target, value) {
53 | let newState = _.merge({}, this.state, {[target]: value});
54 | this.setState(newState);
55 | this.updateMap(newState);
56 | },
57 | ping: function () {
58 | alert("PING");
59 | },
60 | updateMap: function (state) {
61 | if (! state) { state = this.state; }
62 | ifAllDefined(
63 | this.props.showLayerWithBreaks,
64 | this.props.showLayer,
65 | this.props.rootUrl,
66 | state.operation,
67 | this.props.layers[state.layerId],
68 | state.timeId
69 | )(updateSingleLayerMap);
70 | this.props.showExtent(this.props.layers[state.layerId].extent);
71 | },
72 | componentWillReceiveProps: function (nextProps){
73 | /** Use this as an opportunity to react to a prop transition before render() is called by updating the state using this.setState().
74 | * The old props can be accessed via this.props. Calling this.setState() within this function will not trigger an additional render. */
75 | if ( _.isUndefined(this.state.layerId) && ! _.isEmpty(nextProps.layers)) {
76 | // we are blank and now is our chance to choose a layer and some times
77 | let newState = _.merge({}, this.state, { layerId: 0, timeId: 0 });
78 | let layer = nextProps.layers[0];
79 | this.setState(newState);
80 | updateSingleLayerMap(nextProps.showLayerWithBreaks,
81 | nextProps.showLayer,
82 | nextProps.rootUrl,
83 | this.state.operation,
84 | layer, 0);
85 | nextProps.showExtent(layer.extent);
86 | }
87 | },
88 | render: function() {
89 | let layer = this.props.layers[this.state.layerId];
90 | let isLandsat = _.get(layer, "isLandsat", false);
91 |
92 | let layerOptions =
93 | _.map(this.props.layers, (layer, index) => {
94 | return ;
95 | });
96 |
97 | let layerTimes =
98 | _.map(_.get(layer, "times", []), (time, index) => {
99 | return ;
100 | });
101 |
102 |
103 | return (
104 |
105 | this.handleLayerSelect(e)}>
107 | {layerOptions}
108 |
109 |
110 | this.handleTimeSelect(e, layer)}>
112 | {layerTimes}
113 |
114 |
115 | this.updateState("operation", e.target.value)} >
121 |
122 |
123 |
124 |
125 |
126 | )
127 | }
128 | });
129 |
130 | module.exports = SingleLayer;
131 |
--------------------------------------------------------------------------------
/viewer/components/DiffLayer.js:
--------------------------------------------------------------------------------
1 | "use strict";
2 | import React from 'react';
3 | import _ from 'lodash';
4 | import { PanelGroup, Panel, Input, Button, ButtonGroup } from 'react-bootstrap';
5 | import ifAllDefined from "../utils/utils";
6 |
7 | function updateIntraLayerDiffMap (showLayerWithBreaks, showLayer, root, op, layer, t1, t2) {
8 | let time1 = layer.times[t1];
9 | let time2 = layer.times[t2];
10 | if(layer.isLandsat) {
11 | let opc = ((op != "none")) ? `&operation=${op}` : "";
12 | showLayer(`${root}/diff/${layer.name}/{z}/{x}/{y}?time1=${time1}&time2=${time2}${opc}`);
13 | } else {
14 | showLayerWithBreaks(
15 | `${root}/diff/${layer.name}/{z}/{x}/{y}?time1=${time1}&time2=${time2}`,
16 | `${root}/diff/breaks/${layer.name}?time1=${time1}&time2=${time2}`
17 | );
18 | }
19 | };
20 |
21 | var MapViews = React.createClass({
22 | getInitialState: function () {
23 | return {
24 | layerId: 0,
25 | operation: "ndvi",
26 | timeId1: 1,
27 | timeId2: 0,
28 | times: {}
29 | };
30 | },
31 | handleTimeSelect: function(ev, currentLayer, index) {
32 | let displayIndex = index + 1; // index is 0 indexed
33 | this.updateState("timeId" + displayIndex, +ev.target.value);
34 | debugger;
35 | this.props.registerTime(currentLayer.times[ev.target.value], index);
36 | },
37 | handleLayerSelect: function(ev) {
38 | let layerId = +ev.target.value;
39 | let newState = _.merge({}, this.state, {
40 | "layerId": layerId,
41 | "timeId1": _.get(this.state.times[layerId], "timeId1", undefined),
42 | "timeId2": _.get(this.state.times[layerId], "timeId2", undefined),
43 | "times": { // Saves time selectio when switching layer
44 | [this.state.layerId]: {
45 | "timeId1": this.state.timeId1,
46 | "timeId2": this.state.timeId2
47 | }
48 | }
49 | });
50 |
51 | this.setState(newState);
52 | this.updateMap(newState);
53 | this.props.showExtent(this.props.layers[layerId].extent);
54 | },
55 | updateState: function(target, value) {
56 | let newState = _.merge({}, this.state, {[target]: value});
57 | this.setState(newState);
58 | this.updateMap(newState);
59 | },
60 | updateMap: function (state) {
61 | if (! state) {state = this.state; }
62 | ifAllDefined(this.props.showLayerWithBreaks,
63 | this.props.showLayer,
64 | this.props.rootUrl,
65 | state.operation,
66 | this.props.layers[state.layerId],
67 | state.timeId1,
68 | state.timeId2)(updateIntraLayerDiffMap);
69 | this.props.showExtent(this.props.layers[state.layerId].extent);
70 | },
71 | componentWillReceiveProps: function (nextProps){
72 | /** Use this as an opportunity to react to a prop transition before render() is called by updating the state using this.setState().
73 | * The old props can be accessed via this.props. Calling this.setState() within this function will not trigger an additional render. */
74 | if ( _.isUndefined(this.state.layerId) && ! _.isEmpty(nextProps.layers)) {
75 | // we are blank and now is our chance to choose a layer and some times
76 | let newState = _.merge({}, this.state, { layerId: 0, timeId1: 1, timeId2: 0 });
77 | this.setState(newState);
78 | var layer = nextProps.layers[0];
79 | updateIntraLayerDiffMap(nextProps.showLayerWithBreaks, this.props.showLayer, nextProps.rootUrl, this.state.operation, layer, 0, 1);
80 | nextProps.showExtent(layer.extent);
81 | }
82 | },
83 | render: function() {
84 | let layer = this.props.layers[this.state.layerId];
85 | let isLandsat = _.get(layer, "isLandsat", false);
86 | let defaultOp = isLandsat ? "ndvi" : "none";
87 |
88 | let layerOptions =
89 | _.map(this.props.layers, (layer, index) => {
90 | return ;
91 | });
92 |
93 | let layerTimes =
94 | _.map(_.get(layer, "times", []), (time, index) => {
95 | return ;
96 | });
97 |
98 | return (
99 |
100 | this.handleLayerSelect(e)}>
102 | {layerOptions}
103 |
104 |
105 | this.handleTimeSelect(ev, layer, 0)}>
107 | {layerTimes}
108 |
109 |
110 | this.handleTimeSelect(ev, layer, 1)}>
112 | {layerTimes}
113 |
114 | this.updateState("operation", e.target.value)}>
117 | { isLandsat ? : null }
118 | { isLandsat ? : null }
119 | { isLandsat ? null : }
120 |
121 |
122 | )
123 | }
124 | });
125 |
126 | module.exports = MapViews;
127 |
--------------------------------------------------------------------------------
/viewer/components/Panels.js:
--------------------------------------------------------------------------------
1 | "use strict";
2 | import React from 'react';
3 | import _ from 'lodash';
4 | import { PanelGroup, Panel, Input, Button, ButtonGroup, Form } from 'react-bootstrap';
5 | import SingleLayer from "./SingleLayer";
6 | import DiffLayer from "./DiffLayer";
7 | import TimeSeries from "./charts/TimeSeries.js";
8 | import IndexComparison from "./charts/IndexComparison.js";
9 |
10 | var Panels = React.createClass({
11 | getInitialState: function () {
12 | return {
13 | activePane: 1,
14 | autoZoom: true
15 | };
16 | },
17 | handleNDI: function(ndi) {
18 | this.props.setIndexType(ndi);
19 | },
20 | handleAutoZoom: function(e) {
21 | let v = e.target.checked || false;
22 | this.setState(_.merge({}, this.state, {autoZoom: v}));
23 | if (v) this.props.showExtent(this.props.layers[this.state.layerId1].extent);
24 | },
25 | handlePaneSelect: function(id) {
26 | console.log("PANE SELECT %s", id);
27 | let newState = _.merge({}, this.state, { activePane: +id });
28 | this.setState(newState);
29 | if (id == 1) {
30 | this.props.setLayerType('singleLayer');
31 | } else if (id == 2) {
32 | this.props.setLayerType('intraLayerDiff');
33 | }
34 | },
35 | updateState: function(target, value) {
36 | let newState = _merge({}, this.state, {[target]: value});
37 | this.setState(newState);
38 | },
39 | showExtent: function(id) {
40 | var self = this;
41 | return function() {
42 | if (id == self.state.activePane && self.state.autoZoom) { // if the message is from active pane, pass it on
43 | self.props.showExtent.apply(this, arguments);
44 | }
45 | };
46 | },
47 | showLayer: function (id) {
48 | var self = this;
49 | return function() {
50 | if (id == self.state.activePane) { // if the message is from active pane, pass it on
51 | return self.props.showLayer.apply(self, arguments);
52 | } else {
53 | return null;
54 | }
55 | };
56 | },
57 | showLayerWithBreaks: function (id) {
58 | var self = this;
59 | return function() {
60 | if (id == self.state.activePane) { // if the message is from active pane, pass it on
61 | return self.props.showLayerWithBreaks.apply(self, arguments);
62 | } else {
63 | return null;
64 | }
65 | };
66 | },
67 | componentDidUpdate: function(prevProps, prevState) {
68 | // force map refresh if either the pane selection changed or auto-zoom was clicked
69 | // this must happen after state update in order for this.showLayerWithBreaks to pass the info
70 | if (this.state != prevState) {
71 | switch (this.state.activePane) {
72 | case 1:
73 | this.refs.single.updateMap();
74 | break;
75 | case 2:
76 | this.refs.diff.updateMap();
77 | break;
78 | }
79 | }
80 | },
81 | render: function() {
82 | let nonLandsatLayers = _.filter(this.props.layers, l => {return ! l.isLandsat});
83 |
84 | var chartPanel;
85 | if (this.props.analysisLayer) {
86 | if (this.props.analysisLayer.chartProps.geomType == 'point') {
87 | chartPanel = (
88 |
89 |
90 |
91 |
92 |
93 |
95 | )
96 | } else {
97 | chartPanel = (
98 |
99 |
100 |
101 |
102 |
103 |
107 |
108 | );
109 | }
110 | }
111 |
112 | return (
113 |
114 |
115 |
116 |
117 |
128 |
129 |
130 |
131 |
140 |
141 |
142 |
143 | {chartPanel}
144 |
)
145 | }
146 | });
147 |
148 | module.exports = Panels;
149 |
--------------------------------------------------------------------------------
/viewer/components/Leaflet.js:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import { render } from 'react-dom';
3 | import { Map, Marker, Popup, TileLayer, BaseTileLayer, GeoJson, FeatureGroup, Circle } from 'react-leaflet';
4 | import { EditControl } from 'react-leaflet-draw';
5 | import _ from 'lodash';
6 | import $ from 'jquery';
7 | import L from 'leaflet';
8 | import "leaflet/dist/leaflet.css";
9 | import 'leaflet-draw/dist/leaflet.draw.css';
10 | import './leaflet-icons.css';
11 |
12 |
13 | var Leaflet = React.createClass({
14 |
15 | _onlayeradd: function(ev) {
16 | var fgroup = ev.target;
17 | var addedLayer = ev.layer;
18 | var setAnalysisLayer = this.props.setAnalysisLayer;
19 |
20 | // Initialize object for holding chart-rendering parameters
21 | addedLayer.chartProps = {
22 | rootURL: this.props.rootUrl,
23 | times: this.props.times,
24 | layerName: this.props.layerName,
25 | comparisonType: this.props.layerType,
26 | geomType: (_.isUndefined(addedLayer._latlngs) ? 'point' : 'polygon'),
27 | selected: false
28 | };
29 |
30 | // Initialize object for different types of statistic
31 | addedLayer.stats = {};
32 | addedLayer.comparisonStats = {};
33 |
34 | let setMarkerSelection = function(marker) {
35 | if (marker.chartProps.selected) {
36 | marker.setIcon(L.divIcon({className: 'selected-marker'}));
37 | } else {
38 | marker.setIcon(L.divIcon({className: 'unselected-marker'}));
39 | }
40 | };
41 |
42 | let setPolySelection = function(poly) {
43 | if (poly.chartProps.selected) {
44 | poly.setStyle({ color: '#ffff64' });
45 | } else {
46 | poly.setStyle({ color: '#64c59d' });
47 | }
48 | };
49 |
50 | let selectLayer = function() {
51 | let allLayers = fgroup.getLayers();
52 |
53 | // deselect all other layers
54 | _.chain(allLayers)
55 | .filter(function(l) { return l !== addedLayer; })
56 | .each(function(l) { l.chartProps.selected = false; })
57 | .value();
58 |
59 | addedLayer.chartProps.selected = true;
60 | _.each(allLayers, function(l) {
61 | if (l._latlng) {
62 | setMarkerSelection(l);
63 | } else {
64 | setPolySelection(l);
65 | }
66 | });
67 |
68 | setAnalysisLayer(addedLayer);
69 | };
70 | addedLayer.on('click', function(ev) { selectLayer(); });
71 | selectLayer();
72 | },
73 |
74 | _onDeleted: function(e) {
75 | console.log("Delete", e)
76 | },
77 |
78 | render: function() {
79 | const style = {
80 | minHeight: "800px", width: "100%"
81 | };
82 | let tileLayers = _.map(this.props.url, u => {
83 | return ;
84 | });
85 |
86 | let vectorLayers = [];
87 | if(this.props.maxState) {
88 | vectorLayers.push();
89 | }
90 |
91 | if(this.props.maxAverageState) {
92 | let state = this.props.maxAverageState;
93 | vectorLayers.push(
94 |
98 |
99 |
100 |
Name: {state.properties.name}
101 |
Average Temperature: {state.properties["Mean Temperature"]} °F
102 |
103 |
104 |
105 | );
106 | }
107 |
108 | if(this.props.stateAverage) {
109 | let state = this.props.stateAverage;
110 | vectorLayers.push(
111 |
115 |
116 |
117 |
Name: {state.properties.name}
118 |
Average Temperature: {parseFloat(Math.round(state.properties.meanTemp * 100) / 100).toFixed(2)} °F
119 |
120 |
121 |
122 | );
123 | }
124 |
125 | if(this.props.stateDiffAverage) {
126 | let state = this.props.stateDiffAverage;
127 | vectorLayers.push(
128 |
132 |
133 |
134 |
Name: {state.properties.name}
135 |
Average Difference of Temperature: {state.properties.meanTemp} °F
136 |
137 |
138 |
139 | );
140 | }
141 |
142 | let polyOptions = {
143 | stroke: true,
144 | weight: 3,
145 | color: '#64c59d',
146 | fillOpacity: 0.15,
147 | fillColor: null // falls back on stroke color
148 | };
149 |
150 | let markerOptions = {
151 | };
152 |
153 | return (
154 |
175 | );
176 | }
177 | });
178 |
179 | module.exports = Leaflet;
180 |
--------------------------------------------------------------------------------
/viewer/redux/actions.js:
--------------------------------------------------------------------------------
1 | import fetch from 'isomorphic-fetch';
2 | import shortid from 'shortid';
3 | import $ from 'jquery';
4 |
5 | var actions = {
6 | setLayerType: function(layerType) {
7 | return {
8 | type: 'SET_LAYER_TYPE',
9 | layer: layerType
10 | };
11 | },
12 | registerTime: function(time, index) {
13 | return {
14 | type: 'REGISTER_TIME',
15 | time: time,
16 | index: index
17 | };
18 | },
19 | setIndexType: function(ndi) {
20 | return {
21 | type: 'SET_NDI',
22 | ndi: ndi
23 | };
24 | },
25 | setLayerName: function(layerName) {
26 | return {
27 | type: 'SET_LAYERNAME',
28 | name: layerName
29 | };
30 | },
31 | showLayer: function (url) {
32 | return {
33 | type: 'SHOW_LAYER',
34 | url: url
35 | };
36 | },
37 | centerMap: function (extent) {
38 | return {
39 | type: 'CENTER_MAP',
40 | extent: extent
41 | };
42 | },
43 | showBounds: function(bounds) {
44 | return {
45 | type: 'SHOW_BOUNDS',
46 | bounds: bounds
47 | };
48 | },
49 | showExtent: function(extent) {
50 | return actions.showBounds([ [extent[0][1], extent[0][0]], [extent[1][1], extent[1][0]] ]);
51 | },
52 | loadCatalogRequest: function(url) {
53 | return {
54 | type: 'LOAD_CATALOG_REQEST',
55 | url: url
56 | };
57 | },
58 | loadCatalogSuccess: function(url, catalog) {
59 | return {
60 | type: 'LOAD_CATALOG_SUCCESS',
61 | url: url,
62 | catalog: catalog
63 | };
64 | },
65 | loadCatalogFailure: function(url, error) {
66 | return {
67 | type: 'LOAD_CATALOG_ERROR',
68 | url: url,
69 | error: error
70 | };
71 | },
72 | setAnalysisLayer: function(layer) {
73 | return {
74 | type: 'SET_ANALYSIS_LAYER',
75 | layer: layer
76 | };
77 | },
78 | fetchCatalog: function (url) {
79 | return dispatch => {
80 | dispatch(actions.loadCatalogRequest(url));
81 | console.log("FETCH CATALOG", url + "/catalog");
82 | return fetch(url + "/catalog").then( response => {
83 | response.json().then( json => {
84 | dispatch(actions.loadCatalogSuccess(url, json));
85 | });
86 | },
87 | error => dispatch(actions.loadCatalogFailure(url, error)));
88 | };
89 | },
90 | fetchPolygonalSummary: function(polygonLayer) {
91 | // type should be NDVI or NDWI
92 | // answer should be the computed mean value
93 | let singlePolySummaryTemplate = _.template("Average <%- type %>: <%- answer %>
");
94 |
95 | return dispatch => {
96 | console.log("Fetching polygonal summary", polygonLayer.toGeoJSON().geometry);
97 | //polygonLayer.bindPopup('Loading...
');
98 | return fetch(url, {
99 | method: 'POST',
100 | body: JSON.stringify(polygonLayer.toGeoJSON().geometry)
101 | }).then( response => {
102 | response.json().then( summary => {
103 | summary.type = indexType;
104 | if (_.isNull(summary.answer)) {
105 | polygon.stats[polygonLayer.chartProps.ndi] = null;
106 | } else {
107 | polygon.stats[polygonLayer.chartProps.ndi] = summary.answer.toFixed(4);
108 | }
109 | });
110 | },
111 | error => {
112 | });
113 | };
114 | },
115 | showLayerWithBreaks: function(layerUrl, breaksUrl, layerId) {
116 | return dispatch => {
117 | console.log("Fetching breaks", breaksUrl);
118 | return fetch(breaksUrl).then( response => {
119 | response.json().then( breaks => {
120 | dispatch(actions.showLayer(layerUrl + "&breaks=" + breaks.join(","), layerId));
121 | });
122 | },
123 | error => {});
124 | };
125 | },
126 | showMaxState: function(url) {
127 | return dispatch => {
128 | console.log("Fetching max state", url);
129 | return fetch(url)
130 | .then(
131 | response => {
132 | response.json().then( geojson => {
133 | dispatch({
134 | type: 'SHOW_MAX_STATE',
135 | geojson: geojson
136 | });
137 | });
138 | },
139 | error => {}
140 | );
141 | };
142 | },
143 | hideMaxState: function() {
144 | return {
145 | type: 'HIDE_MAX_STATE'
146 | };
147 | },
148 | showMaxAverageState: function(url) {
149 | return dispatch => {
150 | console.log("Fetching max average state", url);
151 | return fetch(url)
152 | .then(
153 | response => {
154 | response.json().then( geojson => {
155 | dispatch({
156 | type: 'SHOW_MAX_AVERAGE_STATE',
157 | geojson: geojson
158 | });
159 | });
160 | },
161 | error => {}
162 | );
163 | };
164 | },
165 | hideMaxAverageState: function() {
166 | return {
167 | type: 'HIDE_MAX_AVERAGE_STATE'
168 | };
169 | },
170 | showStateAverage: function(url) {
171 | return dispatch => {
172 | console.log("Fetching state average", url);
173 | return fetch(url)
174 | .then(
175 | response => {
176 | response.json().then( geojson => {
177 | dispatch({
178 | type: 'SHOW_STATE_AVERAGE',
179 | geojson: geojson
180 | });
181 | });
182 | },
183 | error => {}
184 | );
185 | };
186 | },
187 | showStateDiffAverage: function(url) {
188 | return dispatch => {
189 | console.log("Fetching state average", url);
190 | return fetch(url)
191 | .then(
192 | response => {
193 | response.json().then( geojson => {
194 | dispatch({
195 | type: 'SHOW_STATE_DIFF_AVERAGE',
196 | geojson: geojson
197 | });
198 | });
199 | },
200 | error => {}
201 | );
202 | };
203 | }
204 | };
205 |
206 | module.exports = actions;
207 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | include config-aws.mk # Vars related to AWS credentials and services used
2 | include config-emr.mk # Vars related to type and size of EMR cluster
3 | include config-ingest.mk # Vars related to ingest step and spark parameters
4 |
5 | SERVER_ASSEMBLY := server/target/scala-2.11/server-assembly-0.1.0.jar
6 | INGEST_ASSEMBLY := ingest/target/scala-2.11/ingest-assembly-0.1.0.jar
7 | SCRIPT_RUNNER := s3://elasticmapreduce/libs/script-runner/script-runner.jar
8 |
9 | ifeq ($(USE_SPOT),true)
10 | MASTER_BID_PRICE:=BidPrice=${MASTER_PRICE},
11 | WORKER_BID_PRICE:=BidPrice=${WORKER_PRICE},
12 | BACKEND=accumulo
13 | endif
14 |
15 | ifdef COLOR
16 | COLOR_TAG=--tags Color=${COLOR}
17 | endif
18 |
19 | ifndef CLUSTER_ID
20 | CLUSTER_ID=$(shell if [ -e "cluster-id.txt" ]; then cat cluster-id.txt; fi)
21 | endif
22 |
23 | rwildcard=$(foreach d,$(wildcard $1*),$(call rwildcard,$d/,$2) $(filter $(subst *,%,$2),$d))
24 |
25 | ${SERVER_ASSEMBLY}: $(call rwildcard, server/src, *.scala) server/build.sbt
26 | ./sbt "project server" assembly -no-colors
27 | @touch -m ${SERVER_ASSEMBLY}
28 |
29 | ${INGEST_ASSEMBLY}: $(call rwildcard, ingest/src, *.scala) ingest/build.sbt
30 | ./sbt "project ingest" assembly -no-colors
31 | @touch -m ${INGEST_ASSEMBLY}
32 |
33 | viewer/site.tgz: $(call rwildcard, viewer/components, *.js)
34 | @cd viewer && npm install && npm run build
35 | tar -czf viewer/site.tgz -C viewer/dist .
36 |
37 | upload-code: ${SERVER_ASSEMBLY} ${INGEST_ASSEMBLY} scripts/emr/* viewer/site.tgz
38 | @aws s3 cp viewer/site.tgz ${S3_URI}/
39 | @aws s3 cp scripts/emr/bootstrap-demo.sh ${S3_URI}/
40 | @aws s3 cp scripts/emr/bootstrap-geowave.sh ${S3_URI}/
41 | @aws s3 cp scripts/emr/geowave-install-lib.sh ${S3_URI}/
42 | @aws s3 cp conf/backend-profiles.json ${S3_URI}/
43 | @aws s3 cp conf/input.json ${S3_URI}/
44 | @aws s3 cp conf/output.json ${S3_URI}/output.json
45 | @aws s3 cp ${SERVER_ASSEMBLY} ${S3_URI}/
46 | @aws s3 cp ${INGEST_ASSEMBLY} ${S3_URI}/
47 |
48 | create-cluster:
49 | aws emr create-cluster --name "${NAME}" ${COLOR_TAG} \
50 | --release-label emr-5.0.0 \
51 | --output text \
52 | --use-default-roles \
53 | --configurations "file://$(CURDIR)/scripts/configurations.json" \
54 | --log-uri ${S3_URI}/logs \
55 | --ec2-attributes KeyName=${EC2_KEY},SubnetId=${SUBNET_ID} \
56 | --applications Name=Ganglia Name=Hadoop Name=Hue Name=Spark Name=Zeppelin \
57 | --instance-groups \
58 | 'Name=Master,${MASTER_BID_PRICE}InstanceCount=1,InstanceGroupType=MASTER,InstanceType=${MASTER_INSTANCE},EbsConfiguration={EbsOptimized=true,EbsBlockDeviceConfigs=[{VolumeSpecification={VolumeType=io1,SizeInGB=500,Iops=5000},VolumesPerInstance=1}]}' \
59 | 'Name=Workers,${WORKER_BID_PRICE}InstanceCount=${WORKER_COUNT},InstanceGroupType=CORE,InstanceType=${WORKER_INSTANCE},EbsConfiguration={EbsOptimized=true,EbsBlockDeviceConfigs=[{VolumeSpecification={VolumeType=io1,SizeInGB=500,Iops=5000},VolumesPerInstance=1}]}' \
60 | --bootstrap-actions \
61 | Name=BootstrapGeoWave,Path=${S3_URI}/bootstrap-geowave.sh \
62 | Name=BootstrapDemo,Path=${S3_URI}/bootstrap-demo.sh,\
63 | Args=[--tsj=${S3_URI}/server-assembly-0.1.0.jar,--site=${S3_URI}/site.tgz,--s3u=${S3_URI},--backend=${BACKEND}] \
64 | | tee cluster-id.txt
65 |
66 | create-cluster-hbase:
67 | aws emr create-cluster --name "${NAME}" ${COLOR_TAG} \
68 | --release-label emr-5.0.0 \
69 | --output text \
70 | --use-default-roles \
71 | --configurations "file://$(CURDIR)/scripts/configurations.json" \
72 | --log-uri ${S3_URI}/logs \
73 | --ec2-attributes KeyName=${EC2_KEY},SubnetId=${SUBNET_ID} \
74 | --applications Name=Ganglia Name=Hadoop Name=Hue Name=Spark Name=Zeppelin Name=HBase \
75 | --instance-groups \
76 | Name=Master,${MASTER_BID_PRICE}InstanceCount=1,InstanceGroupType=MASTER,InstanceType=${MASTER_INSTANCE} \
77 | Name=Workers,${WORKER_BID_PRICE}InstanceCount=${WORKER_COUNT},InstanceGroupType=CORE,InstanceType=${WORKER_INSTANCE} \
78 | --bootstrap-actions \
79 | Name=BootstrapDemo,Path=${S3_URI}/bootstrap-demo.sh,\
80 | Args=[--tsj=${S3_URI}/server-assembly-0.1.0.jar,--site=${S3_URI}/site.tgz,--backend=hbase] \
81 | | tee cluster-id.txt
82 |
83 | ingest: LIMIT=9999
84 | ingest:
85 | @if [ -z $$START_DATE ]; then echo "START_DATE is not set" && exit 1; fi
86 | @if [ -z $$END_DATE ]; then echo "END_DATE is not set" && exit 1; fi
87 |
88 | aws emr add-steps --output text --cluster-id ${CLUSTER_ID} \
89 | --steps Type=CUSTOM_JAR,Name="Ingest ${LAYER_NAME}",Jar=command-runner.jar,Args=[\
90 | spark-submit,--master,yarn-cluster,\
91 | --class,demo.LandsatIngestMain,\
92 | --driver-memory,${DRIVER_MEMORY},\
93 | --driver-cores,${DRIVER_CORES},\
94 | --executor-memory,${EXECUTOR_MEMORY},\
95 | --executor-cores,${EXECUTOR_CORES},\
96 | --conf,spark.dynamicAllocation.enabled=true,\
97 | --conf,spark.yarn.executor.memoryOverhead=${YARN_OVERHEAD},\
98 | --conf,spark.yarn.driver.memoryOverhead=${YARN_OVERHEAD},\
99 | ${S3_URI}/ingest-assembly-0.1.0.jar,\
100 | --input,"file:///tmp/input.json",\
101 | --output,"file:///tmp/output.json",\
102 | --backend-profiles,"file:///tmp/backend-profiles.json"\
103 | ] | cut -f2 | tee last-step-id.txt
104 |
105 | wait: INTERVAL:=60
106 | wait: STEP_ID=$(shell cat last-step-id.txt)
107 | wait:
108 | @while (true); do \
109 | OUT=$$(aws emr describe-step --cluster-id ${CLUSTER_ID} --step-id ${STEP_ID}); \
110 | [[ $$OUT =~ (\"State\": \"([A-Z]+)\") ]]; \
111 | echo $${BASH_REMATCH[2]}; \
112 | case $${BASH_REMATCH[2]} in \
113 | PENDING | RUNNING) sleep ${INTERVAL};; \
114 | COMPLETED) exit 0;; \
115 | *) exit 1;; \
116 | esac; \
117 | done
118 |
119 | terminate-cluster:
120 | aws emr terminate-clusters --cluster-ids ${CLUSTER_ID}
121 | rm -f cluster-id.txt
122 | rm -f last-step-id.txt
123 |
124 | clean:
125 | ./sbt clean -no-colors
126 | rm -rf viewer/site.tgz
127 | rm -rf viewer/dist/*
128 |
129 | proxy:
130 | aws emr socks --cluster-id ${CLUSTER_ID} --key-pair-file "${HOME}/${EC2_KEY}.pem"
131 |
132 | ssh:
133 | aws emr ssh --cluster-id ${CLUSTER_ID} --key-pair-file "${HOME}/${EC2_KEY}.pem"
134 |
135 | local-ingest: ${INGEST_ASSEMBLY}
136 | spark-submit --name "${NAME} Ingest" --master "local[4]" --driver-memory 4G \
137 | ${INGEST_ASSEMBLY} \
138 | --backend-profiles "file:///${PWD}/conf/backend-profiles.json" \
139 | --input "file://${PWD}/conf/input-local.json" \
140 | --output "file://${PWD}/conf/output-local.json"
141 |
142 | local-tile-server: CATALOG=catalog
143 | local-tile-server:
144 | spark-submit --name "${NAME} Service" --master "local" --driver-memory 1G \
145 | ${SERVER_ASSEMBLY} local ${CATALOG}
146 |
147 | define UPSERT_BODY
148 | {
149 | "Changes": [{
150 | "Action": "UPSERT",
151 | "ResourceRecordSet": {
152 | "Name": "${1}",
153 | "Type": "CNAME",
154 | "TTL": 300,
155 | "ResourceRecords": [{
156 | "Value": "${2}"
157 | }]
158 | }
159 | }]
160 | }
161 | endef
162 |
163 | update-route53: VALUE=$(shell aws emr describe-cluster --output text --cluster-id $(CLUSTER_ID) | egrep "^CLUSTER" | cut -f5)
164 | update-route53: export UPSERT=$(call UPSERT_BODY,${ROUTE53_RECORD},${VALUE})
165 | update-route53:
166 | @tee scripts/upsert.json <<< "$$UPSERT"
167 | aws route53 change-resource-record-sets \
168 | --hosted-zone-id ${HOSTED_ZONE} \
169 | --change-batch "file://$(CURDIR)/scripts/upsert.json"
170 |
171 | get-logs:
172 | @aws emr ssh --cluster-id $(CLUSTER_ID) --key-pair-file "${HOME}/${EC2_KEY}.pem" \
173 | --command "rm -rf /tmp/spark-logs && hdfs dfs -copyToLocal /var/log/spark/apps /tmp/spark-logs"
174 | @mkdir -p logs/$(CLUSTER_ID)
175 | @aws emr get --cluster-id $(CLUSTER_ID) --key-pair-file "${HOME}/${EC2_KEY}.pem" --src "/tmp/spark-logs/" --dest logs/$(CLUSTER_ID)
176 |
177 | update-site: viewer/site.tgz
178 | @aws s3 cp viewer/site.tgz ${S3_URI}/
179 | @aws emr ssh --cluster-id $(CLUSTER_ID) --key-pair-file "${HOME}/${EC2_KEY}.pem" \
180 | --command "aws s3 cp ${S3_URI}/site.tgz /tmp/site.tgz && sudo tar -xzf /tmp/site.tgz -C /var/www/html"
181 |
182 | update-tile-server: ${SERVER_ASSEMBLY}
183 | @aws s3 cp ${SERVER_ASSEMBLY} ${S3_URI}/
184 | @aws emr ssh --cluster-id $(CLUSTER_ID) --key-pair-file "${HOME}/${EC2_KEY}.pem" \
185 | --command "aws s3 cp ${S3_URI}/server-assembly-0.1.0.jar /tmp/tile-server.jar && (sudo stop tile-server; sudo start tile-server)"
186 |
187 | .PHONY: local-ingest ingest local-tile-server update-route53 get-logs
188 |
--------------------------------------------------------------------------------
/scripts/emr/geowave-install-lib.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | #
3 | # Installing additional components on an EMR node depends on several config files
4 | # controlled by the EMR framework which may affect the is_master and configure_zookeeper
5 | # functions at some point in the future. I've grouped each unit of work into a function
6 | # with a descriptive name to help with understanding and maintainability
7 | #
8 |
9 | # You can change these but there is probably no need
10 | ACCUMULO_INSTANCE=accumulo
11 | ACCUMULO_HOME="${INSTALL_DIR}/accumulo"
12 | ZK_IPADDR=
13 | INTIAL_POLLING_INTERVAL=15 # This gets doubled for each attempt up to max_attempts
14 | HDFS_USER=hdfs
15 |
16 | # Parses a configuration file put in place by EMR to determine the role of this node
17 | is_master() {
18 | if [ $(jq '.isMaster' /mnt/var/lib/info/instance.json) = 'true' ]; then
19 | return 0
20 | else
21 | return 1
22 | fi
23 | }
24 |
25 | # Avoid race conditions and actually poll for availability of component dependencies
26 | # Credit: http://stackoverflow.com/questions/8350942/how-to-re-run-the-curl-command-automatically-when-the-error-occurs/8351489#8351489
27 | with_backoff() {
28 | local max_attempts=${ATTEMPTS-5}
29 | local timeout=${INTIAL_POLLING_INTERVAL-1}
30 | local attempt=0
31 | local exitCode=0
32 |
33 | while (( $attempt < $max_attempts ))
34 | do
35 | set +e
36 | "$@"
37 | exitCode=$?
38 | set -e
39 |
40 | if [[ $exitCode == 0 ]]
41 | then
42 | break
43 | fi
44 |
45 | echo "Retrying $@ in $timeout.." 1>&2
46 | sleep $timeout
47 | attempt=$(( attempt + 1 ))
48 | timeout=$(( timeout * 2 ))
49 | done
50 |
51 | if [[ $exitCode != 0 ]]
52 | then
53 | echo "Fail: $@ failed to complete after $max_attempts attempts" 1>&2
54 | fi
55 |
56 | return $exitCode
57 | }
58 |
59 | # Using zookeeper packaged by Apache BigTop for ease of installation
60 | configure_zookeeper() {
61 | if is_master ; then
62 | sudo yum -y install zookeeper-server # EMR 4.3.0 includes Apache Bigtop.repo config
63 | sudo initctl start zookeeper-server # EMR uses Amazon Linux which uses Upstart
64 | # Zookeeper installed on this node, record internal ip from instance metadata
65 | ZK_IPADDR=$(curl http://169.254.169.254/latest/meta-data/local-ipv4)
66 | else
67 | # Zookeeper intalled on master node, parse config file to find EMR master node
68 | ZK_IPADDR=$(xmllint --xpath "//property[name='yarn.resourcemanager.hostname']/value/text()" /etc/hadoop/conf/yarn-site.xml)
69 | fi
70 | }
71 |
72 | is_hdfs_available() {
73 | hadoop fs -ls /
74 | return $?
75 | }
76 |
77 | wait_until_hdfs_is_available() {
78 | with_backoff is_hdfs_available
79 | if [ $? != 0 ]; then
80 | echo "HDFS not available before timeout. Exiting ..."
81 | exit 1
82 | fi
83 | }
84 |
85 | # Settings recommended for Accumulo
86 | os_tweaks() {
87 | echo -e "net.ipv6.conf.all.disable_ipv6 = 1" | sudo tee --append /etc/sysctl.conf
88 | echo -e "net.ipv6.conf.default.disable_ipv6 = 1" | sudo tee --append /etc/sysctl.conf
89 | echo -e "net.ipv6.conf.lo.disable_ipv6 = 1" | sudo tee --append /etc/sysctl.conf
90 | echo -e "vm.swappiness = 0" | sudo tee --append /etc/sysctl.conf
91 | sudo sysctl -w vm.swappiness=0
92 | echo -e "" | sudo tee --append /etc/security/limits.conf
93 | echo -e "*\t\tsoft\tnofile\t65536" | sudo tee --append /etc/security/limits.conf
94 | echo -e "*\t\thard\tnofile\t65536" | sudo tee --append /etc/security/limits.conf
95 | }
96 |
97 | initialize_volumes() {
98 | sudo yum -y install fio
99 | local VOLS=$(lsblk | awk '{ print $1 }' | grep -v NAME)
100 |
101 | for v in $VOLS; do
102 | sudo fio --filename=/dev/${v} --rw=randread --bs=128k --iodepth=32 --ioengine=libaio --direct=1 --name=volume-init >> /tmp/emr-vol-init.log
103 | done
104 | }
105 |
106 | create_accumulo_user() {
107 | id $USER
108 | if [ $? != 0 ]; then
109 | sudo adduser $USER
110 | sudo sh -c "echo '$USERPW' | passwd $USER --stdin"
111 | fi
112 | }
113 |
114 | install_accumulo() {
115 | wait_until_hdfs_is_available
116 | ARCHIVE_FILE="accumulo-${ACCUMULO_VERSION}-bin.tar.gz"
117 | LOCAL_ARCHIVE="${INSTALL_DIR}/${ARCHIVE_FILE}"
118 | if [[ $ACCUMULO_DOWNLOAD_BASE_URL == s3* ]]
119 | then
120 | sudo sh -c "aws s3 cp '${ACCUMULO_DOWNLOAD_BASE_URL}/${ACCUMULO_VERSION}/${ARCHIVE_FILE}' $LOCAL_ARCHIVE"
121 | else
122 | sudo sh -c "curl '${ACCUMULO_DOWNLOAD_BASE_URL}/${ACCUMULO_VERSION}/${ARCHIVE_FILE}' > $LOCAL_ARCHIVE"
123 | fi
124 | sudo sh -c "tar xzf $LOCAL_ARCHIVE -C $INSTALL_DIR"
125 | sudo rm -f $LOCAL_ARCHIVE
126 | sudo ln -s "${INSTALL_DIR}/accumulo-${ACCUMULO_VERSION}" "${INSTALL_DIR}/accumulo"
127 | sudo chown -R accumulo:accumulo "${INSTALL_DIR}/accumulo-${ACCUMULO_VERSION}"
128 | sudo sh -c "echo 'export PATH=$PATH:${INSTALL_DIR}/accumulo/bin' > /etc/profile.d/accumulo.sh"
129 | }
130 |
131 | configure_accumulo() {
132 | sudo cp $INSTALL_DIR/accumulo/conf/examples/${ACCUMULO_TSERVER_OPTS}/native-standalone/* $INSTALL_DIR/accumulo/conf/
133 | sudo sed -i "s/localhost:2181<\/value>/${ZK_IPADDR}:2181<\/value>/" $INSTALL_DIR/accumulo/conf/accumulo-site.xml
134 | sudo sed -i '/HDP 2.0 requirements/d' $INSTALL_DIR/accumulo/conf/accumulo-site.xml
135 | sudo sed -i "s/\${LOG4J_JAR}/\${LOG4J_JAR}:\/usr\/lib\/hadoop\/lib\/*:\/usr\/lib\/hadoop\/client\/*/" $INSTALL_DIR/accumulo/bin/accumulo
136 |
137 | # Crazy escaping to get this shell to fill in values but root to write out the file
138 | ENV_FILE="export ACCUMULO_HOME=$INSTALL_DIR/accumulo; export HADOOP_HOME=/usr/lib/hadoop; export ACCUMULO_LOG_DIR=$INSTALL_DIR/accumulo/logs; export JAVA_HOME=/usr/lib/jvm/java; export ZOOKEEPER_HOME=/usr/lib/zookeeper; export HADOOP_PREFIX=/usr/lib/hadoop; export HADOOP_CONF_DIR=/etc/hadoop/conf"
139 | echo $ENV_FILE > /tmp/acc_env
140 | sudo sh -c "cat /tmp/acc_env > $INSTALL_DIR/accumulo/conf/accumulo-env.sh"
141 | sudo chown -R $USER:$USER $INSTALL_DIR/accumulo
142 | source $INSTALL_DIR/accumulo/conf/accumulo-env.sh
143 | sudo -u $USER $INSTALL_DIR/accumulo/bin/build_native_library.sh
144 |
145 | if is_master ; then
146 | sudo -u $HDFS_USER hadoop fs -chmod 777 /user # This is more for Spark than Accumulo but put here for expediency
147 | sudo -u $HDFS_USER hadoop fs -mkdir /accumulo
148 | sudo -u $HDFS_USER hadoop fs -chown accumulo:accumulo /accumulo
149 | sudo sh -c "hostname > $INSTALL_DIR/accumulo/conf/monitor"
150 | sudo sh -c "hostname > $INSTALL_DIR/accumulo/conf/gc"
151 | sudo sh -c "hostname > $INSTALL_DIR/accumulo/conf/tracers"
152 | sudo sh -c "hostname > $INSTALL_DIR/accumulo/conf/masters"
153 | sudo sh -c "echo > $INSTALL_DIR/accumulo/conf/slaves"
154 | sudo -u $USER $INSTALL_DIR/accumulo/bin/accumulo init --clear-instance-name --instance-name $ACCUMULO_INSTANCE --password $USERPW
155 | else
156 | sudo sh -c "echo $ZK_IPADDR > $INSTALL_DIR/accumulo/conf/monitor"
157 | sudo sh -c "echo $ZK_IPADDR > $INSTALL_DIR/accumulo/conf/gc"
158 | sudo sh -c "echo $ZK_IPADDR > $INSTALL_DIR/accumulo/conf/tracers"
159 | sudo sh -c "echo $ZK_IPADDR > $INSTALL_DIR/accumulo/conf/masters"
160 | sudo sh -c "hostname > $INSTALL_DIR/accumulo/conf/slaves"
161 | fi
162 |
163 | # EMR starts worker instances first so there will be timing issues
164 | # Test to ensure it's safe to continue before attempting to start things up
165 | if is_master ; then
166 | with_backoff is_accumulo_initialized
167 | else
168 | with_backoff is_accumulo_available
169 | fi
170 |
171 | sudo -u $USER $INSTALL_DIR/accumulo/bin/start-here.sh
172 | }
173 |
174 | is_accumulo_initialized() {
175 | hadoop fs -ls /accumulo/instance_id
176 | return $?
177 | }
178 |
179 | is_accumulo_available() {
180 | $INSTALL_DIR/accumulo/bin/accumulo info
181 | return $?
182 | }
183 |
184 | install_image_libs() {
185 | pushd /etc/alternatives/java_sdk
186 | sudo curl -O $JAI_URL
187 | sudo curl -O $IMAGEIO_URL
188 | sudo chmod +x *.bin
189 | # Magic spells to unzip and install with auto-confirm of terms and bypassing unzip error with export
190 | sudo ./jai-*.bin >/dev/null < <(echo y) >/dev/null < <(echo y)
191 | sudo bash -c "export _POSIX2_VERSION=199209; ./jai_*.bin >/dev/null < <(echo y) >/dev/null < <(echo y)"
192 | popd
193 | }
194 |
195 | install_geowave() {
196 | # Install the repo config file
197 | sudo rpm -Uvh http://s3.amazonaws.com/geowave-rpms/dev/noarch/$GEOWAVE_REPO_RPM
198 |
199 | # EMR has a tar bundle installed puppet in /home/ec2-user
200 | # So as not to install incompatible puppet from the dependencies of geowave-puppet
201 | # I'm doing this convoluted workaround to download and then install with no dep resolution
202 | sudo yumdownloader --enablerepo geowave-dev --destdir /tmp geowave-puppet
203 | sudo rpm -Uvh --force --nodeps /tmp/geowave-puppet.*.noarch.rpm
204 |
205 | cat << EOF > /tmp/geowave.pp
206 | class { 'geowave::repo':
207 | repo_base_url => 'http://s3.amazonaws.com/geowave-rpms/dev/noarch/',
208 | repo_enabled => 1,
209 | } ->
210 | class { 'geowave':
211 | geowave_version => '${GEOWAVE_VERSION}',
212 | hadoop_vendor_version => 'apache',
213 | install_accumulo => true,
214 | install_app => true,
215 | install_app_server => true,
216 | http_port => '${GEOSERVER_PORT}',
217 | }
218 |
219 | file { '/etc/geowave/geowave.config':
220 | ensure => file,
221 | owner => 'geowave',
222 | group => 'geowave',
223 | mode => 644,
224 | content => 'export JAVA_OPTS="${GEOSERVER_MEMORY}"',
225 | require => Package['geowave-core'],
226 | notify => Service['geowave'],
227 | }
228 | EOF
229 |
230 | sudo sh -c "puppet apply /tmp/geowave.pp"
231 | return 0
232 | }
233 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "{}"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright {yyyy} {name of copyright owner}
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/server/src/main/scala/demo/Router.scala:
--------------------------------------------------------------------------------
1 | package demo
2 |
3 | import akka.http.scaladsl.server.Directives
4 | import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport._
5 | import akka.http.scaladsl.model.{ContentType, HttpEntity, HttpResponse, MediaTypes}
6 | import akka.http.scaladsl.unmarshalling.Unmarshaller._
7 | import ch.megard.akka.http.cors.CorsDirectives._
8 | import com.typesafe.config._
9 | import geotrellis.proj4._
10 | import geotrellis.raster._
11 | import geotrellis.raster.interpolation._
12 | import geotrellis.raster.render._
13 | import geotrellis.spark._
14 | import geotrellis.spark.io._
15 | import geotrellis.spark.tiling._
16 | import geotrellis.vector._
17 | import geotrellis.vector.io._
18 | import geotrellis.vector.io.json._
19 | import org.apache.spark.SparkContext
20 | import spray.json._
21 |
22 | import java.lang.management.ManagementFactory
23 | import java.time.format.DateTimeFormatter
24 | import java.time.{ZonedDateTime, ZoneOffset}
25 |
26 | import scala.concurrent.Future
27 | import scala.concurrent.duration._
28 | import scala.util.Try
29 |
30 | class Router(readerSet: ReaderSet, sc: SparkContext) extends Directives with AkkaSystem.LoggerExecutor {
31 | import scala.concurrent.ExecutionContext.Implicits.global
32 |
33 | val dateTimeFormat = DateTimeFormatter.ofPattern("yyyy-MM-dd'T'HH:mm:ssZ")
34 | val metadataReader = readerSet.metadataReader
35 | val attributeStore = readerSet.attributeStore
36 |
37 | def pngAsHttpResponse(png: Png): HttpResponse =
38 | HttpResponse(entity = HttpEntity(ContentType(MediaTypes.`image/png`), png.bytes))
39 |
40 | def timedCreate[T](f: => T): (T, String) = {
41 | val s = System.currentTimeMillis
42 | val result = f
43 | val e = System.currentTimeMillis
44 | val t = "%,d".format(e - s)
45 | result -> t
46 | }
47 |
48 |
49 | def isLandsat(name: String) =
50 | name.contains("landsat")
51 |
52 | def routes =
53 | path("ping") { complete { "pong\n" } } ~
54 | path("catalog") { catalogRoute } ~
55 | pathPrefix("tiles") { tilesRoute } ~
56 | pathPrefix("diff") { diffRoute } ~
57 | pathPrefix("mean") { polygonalMeanRoute } ~
58 | pathPrefix("series") { timeseriesRoute } ~
59 | pathPrefix("readall") { readallRoute }
60 |
61 | def timeseriesRoute = {
62 | import spray.json.DefaultJsonProtocol._
63 |
64 | pathPrefix(Segment / Segment) { (layer, op) =>
65 | parameters('lat, 'lng, 'zoom ?) { (lat, lng, zoomLevel) =>
66 | cors() {
67 | complete {
68 | Future {
69 | val zoom = zoomLevel
70 | .map(_.toInt)
71 | .getOrElse(metadataReader.layerNamesToMaxZooms(layer))
72 |
73 | val catalog = readerSet.layerReader
74 | val layerId = LayerId(layer, zoom)
75 | val point = Point(lng.toDouble, lat.toDouble).reproject(LatLng, WebMercator)
76 |
77 | // Wasteful but safe
78 | val fn = op match {
79 | case "ndvi" => NDVI.apply(_)
80 | case "ndwi" => NDWI.apply(_)
81 | case _ => sys.error(s"UNKNOWN OPERATION")
82 | }
83 |
84 | val rdd = catalog.query[SpaceTimeKey, MultibandTile, TileLayerMetadata[SpaceTimeKey]](layerId)
85 | .where(Intersects(point.envelope))
86 | .result
87 |
88 | val mt = rdd.metadata.mapTransform
89 |
90 | val answer = rdd
91 | .map { case (k, tile) =>
92 | // reconstruct tile raster extent so we can map the point to the tile cell
93 | val re = RasterExtent(mt(k), tile.cols, tile.rows)
94 | val (tileCol, tileRow) = re.mapToGrid(point)
95 | val ret = fn(tile).getDouble(tileCol, tileRow)
96 | println(s"$point equals $ret at ($tileCol, $tileRow) in tile $re ")
97 | (k.time, ret)
98 | }
99 | .collect
100 | .filterNot(_._2.isNaN)
101 | .toJson
102 |
103 | JsObject("answer" -> answer)
104 | }
105 | }
106 | }
107 | }
108 | }
109 | }
110 |
111 | def polygonalMeanRoute = {
112 | import spray.json.DefaultJsonProtocol._
113 |
114 | pathPrefix(Segment / Segment) { (layer, op) =>
115 | parameters('time, 'otherTime ?, 'zoom ?) { (time, otherTime, zoomLevel) =>
116 | cors() {
117 | post {
118 | entity(as[String]) { json =>
119 | complete {
120 | Future {
121 | val zoom = zoomLevel
122 | .map(_.toInt)
123 | .getOrElse(metadataReader.layerNamesToMaxZooms(layer))
124 |
125 | val catalog = readerSet.layerReader
126 | val layerId = LayerId(layer, zoom)
127 |
128 | val rawGeometry = try {
129 | json.parseJson.convertTo[Geometry]
130 | } catch {
131 | case e: Exception => sys.error("THAT PROBABLY WASN'T GEOMETRY")
132 | }
133 | val geometry = rawGeometry match {
134 | case p: Polygon => MultiPolygon(p.reproject(LatLng, WebMercator))
135 | case mp: MultiPolygon => mp.reproject(LatLng, WebMercator)
136 | case _ => sys.error(s"BAD GEOMETRY")
137 | }
138 | val extent = geometry.envelope
139 |
140 | val fn = op match {
141 | case "ndvi" => NDVI.apply(_)
142 | case "ndwi" => NDWI.apply(_)
143 | case _ => sys.error(s"UNKNOWN OPERATION")
144 | }
145 |
146 | val rdd1 = catalog
147 | .query[SpaceTimeKey, MultibandTile, TileLayerMetadata[SpaceTimeKey]](layerId)
148 | .where(At(ZonedDateTime.parse(time, dateTimeFormat)))
149 | .where(Intersects(extent))
150 | .result
151 | val answer1 = ContextRDD(rdd1.mapValues({ v => fn(v) }), rdd1.metadata).polygonalMean(geometry)
152 |
153 | val answer2: Double = otherTime match {
154 | case None => 0.0
155 | case Some(otherTime) =>
156 | val rdd2 = catalog
157 | .query[SpaceTimeKey, MultibandTile, TileLayerMetadata[SpaceTimeKey]](layerId)
158 | .where(At(ZonedDateTime.parse(otherTime, dateTimeFormat)))
159 | .where(Intersects(extent))
160 | .result
161 |
162 | ContextRDD(rdd2.mapValues({ v => fn(v) }), rdd2.metadata).polygonalMean(geometry)
163 | }
164 |
165 | val answer = answer1 - answer2
166 |
167 | JsObject("answer" -> JsNumber(answer))
168 | }
169 | }
170 | }
171 | }
172 | }
173 | }
174 | }
175 | }
176 |
177 | /** Return a JSON representation of the catalog */
178 | def catalogRoute =
179 | cors() {
180 | get {
181 | import spray.json.DefaultJsonProtocol._
182 | complete {
183 | Future {
184 | val layerInfo =
185 | metadataReader.layerNamesToZooms //Map[String, Array[Int]]
186 | .keys
187 | .toList
188 | .sorted
189 | .map { name =>
190 | // assemble catalog from metadata common to all zoom levels
191 | val extent = {
192 | val (extent, crs) = Try {
193 | attributeStore.read[(Extent, CRS)](LayerId(name, 0), "extent")
194 | }.getOrElse((LatLng.worldExtent, LatLng))
195 |
196 | extent.reproject(crs, LatLng)
197 | }
198 |
199 | val times = attributeStore.read[Array[Long]](LayerId(name, 0), "times")
200 | .map { instant =>
201 | dateTimeFormat.format(ZonedDateTime.ofInstant(instant, ZoneOffset.ofHours(-4)))
202 | }
203 | (name, extent, times.sorted)
204 | }
205 |
206 |
207 | JsObject(
208 | "layers" ->
209 | layerInfo.map { li =>
210 | val (name, extent, times) = li
211 | JsObject(
212 | "name" -> JsString(name),
213 | "extent" -> JsArray(Vector(Vector(extent.xmin, extent.ymin).toJson, Vector(extent.xmax, extent.ymax).toJson)),
214 | "times" -> times.toJson,
215 | "isLandsat" -> JsBoolean(true)
216 | )
217 | }.toJson
218 | )
219 | }
220 | }
221 | }
222 | }
223 |
224 | def readallRoute = {
225 | import spray.json._
226 | import spray.json.DefaultJsonProtocol._
227 |
228 | pathPrefix(Segment / IntNumber) { (layer, zoom) =>
229 | get {
230 | cors() {
231 | complete {
232 | Future {
233 | val catalog = readerSet.layerReader
234 | val ccatalog = readerSet.layerCReader
235 | val id = LayerId(layer, zoom)
236 |
237 | JsObject("result" -> ((1 to 20) map { case i =>
238 | val (objrdd, strrdd) = timedCreate(
239 | catalog
240 | .query[SpaceTimeKey, MultibandTile, TileLayerMetadata[SpaceTimeKey]](id)
241 | .result.count()
242 | )
243 |
244 | val (objc, strc) = timedCreate(
245 | ccatalog
246 | .query[SpaceTimeKey, MultibandTile, TileLayerMetadata[SpaceTimeKey]](id)
247 | .result.length
248 | )
249 |
250 | JsObject(
251 | "n" -> i.toString.toJson,
252 | "obj_rdd" -> objrdd.toJson,
253 | "time_rdd" -> strrdd.toJson,
254 | "obj_collection" -> objc.toJson,
255 | "time_collection" -> strc.toJson,
256 | "conf" -> ConfigFactory.load().getObject("geotrellis").render(ConfigRenderOptions.concise()).toJson
257 | )
258 | }).toList.toJson)
259 | }
260 | }
261 | }
262 | }
263 | }
264 | }
265 |
266 | /** Find the breaks for one layer */
267 | def tilesRoute =
268 | pathPrefix(Segment / IntNumber / IntNumber / IntNumber) { (layer, zoom, x, y) =>
269 | parameters('time, 'operation ?) { (timeString, operationOpt) =>
270 | val time = ZonedDateTime.parse(timeString, dateTimeFormat)
271 | // println(layer, zoom, x, y, time)
272 | complete {
273 | Future {
274 | val tileOpt =
275 | readerSet.readMultibandTile(layer, zoom, x, y, time)
276 |
277 | tileOpt.map { tile =>
278 | val png =
279 | operationOpt match {
280 | case Some(op) =>
281 | op match {
282 | case "ndvi" =>
283 | Render.ndvi(tile)
284 | case "ndwi" =>
285 | Render.ndwi(tile)
286 | case _ =>
287 | sys.error(s"UNKNOWN OPERATION $op")
288 | }
289 | case None =>
290 | Render.image(tile)
291 | }
292 | println(s"BYTES: ${png.bytes.length}")
293 | pngAsHttpResponse(png)
294 | }
295 | }
296 | }
297 | }
298 | }
299 |
300 | def diffRoute =
301 | pathPrefix(Segment / IntNumber / IntNumber / IntNumber) { (layer, zoom, x, y) =>
302 | parameters('time1, 'time2, 'breaks ?, 'operation ?) { (timeString1, timeString2, breaksStrOpt, operationOpt) =>
303 | val time1 = ZonedDateTime.parse(timeString1, dateTimeFormat)
304 | val time2 = ZonedDateTime.parse(timeString2, dateTimeFormat)
305 | complete {
306 | Future {
307 | val tileOpt1 =
308 | readerSet.readMultibandTile(layer, zoom, x, y, time1)
309 |
310 | val tileOpt2 =
311 | tileOpt1.flatMap { tile1 =>
312 | readerSet.readMultibandTile(layer, zoom, x, y, time2).map { tile2 => (tile1, tile2) }
313 | }
314 |
315 | tileOpt2.map { case (tile1, tile2) =>
316 | val png =
317 | operationOpt match {
318 | case Some(op) =>
319 | op match {
320 | case "ndvi" =>
321 | Render.ndvi(tile1, tile2)
322 | case "ndwi" =>
323 | Render.ndwi(tile1, tile2)
324 | case _ =>
325 | sys.error(s"UNKNOWN OPERATION $op")
326 | }
327 | case None =>
328 | ???
329 | }
330 |
331 | pngAsHttpResponse(png)
332 | }
333 | }
334 | }
335 | }
336 | }
337 | }
338 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # GeoTrellis Landsat EMR demo
2 |
3 | This project is a demo of using GeoTrellis to ingest a set of Landsat scenes from S3 into an AWS EMR instance running Apache Accumulo and stand up tile sever that is serves the Landsat multi-band tiles as either RGB, NDVI, or NDWI layers. In addition it provides a change view between two layers, scenes captured at different time, for either NDWI or NDVI.
4 |
5 |
6 | - [Project Strucutre](#project-structure)
7 | - [Makefile](#makefile)
8 | - [Running](#running)
9 | - [Running Locally](#running-locally)
10 | - [Running on EMR](#running-on-emr)
11 | - [Configuration](#configuration)
12 | - [Upload Code](#upload-code)
13 | - [Create Cluster](#create-cluster)
14 | - [Ingest](#ingest)
15 | - [Debugging and Monitoring](#debugging-and-monitoring)
16 | - [Terminate Cluster](#terminate-cluster)
17 | - [Deployment](#deployment)
18 | - [Jenkins](#jenkins)
19 |
20 | ## Project Structure
21 |
22 | This project consist of three modules:
23 |
24 | ### [ingest](./ingest)
25 |
26 | A Scala-Spark project that implements a command-line program to query, re-project, tile and save Landsat 8 scenes as a TMS tile pyramid. The Landsat scenes are saved as multi-band tiles containing: red, green, blue, near-infrared, and QA bands.
27 |
28 | ### [server](./server)
29 |
30 | A Scala-Spark project that implements an HTTP service providing following endpoints:
31 |
32 | - Catalog of available layers and scene times
33 | - Render layer as RGB/NDVI/NDWI
34 | - Render change in NDVI/NDWI between two dates
35 | - Calculate average NDVI/NDWI value for query polygon
36 | - Construct time-series of NDVI/NDWI values for query pixel
37 |
38 | ### [viewer](./viewer)
39 |
40 | JavaScript project that queries the service and renders the layers on a Leaflet map.
41 |
42 | ## Makefile
43 |
44 | This project is reasonably complex in that it contains sub-projects in two different languages, using two different build systems, which may be deployed in at least two different ways using tools which are highly configurable. To manage the complexity of these steps we created a [`Makefile`](./Makefile) which encapsulates the logic.
45 |
46 | We will see them get used individually but here is the outline:
47 |
48 | | Command | Description
49 | |------------------|------------------------------------------------------------|
50 | |local-ingest |Run `ingest` project locally |
51 | |local-tile-server |Start `server` project locally |
52 | |upload-code |Upload code and scripts to S3 |
53 | |create-cluster |Create EMR cluster with configurations |
54 | |ingest |Add `ingest` step to running cluster |
55 | |wait |Wait for last step to finish |
56 | |proxy |Create SOCKS proxy for active cluster |
57 | |ssh |SSH into cluster master |
58 | |get-logs |Get spark history logs from active cluster |
59 | |update-route53 |Update Route53 DNS record with active cluster ip |
60 | |clean |Clean local projects |
61 |
62 | As you execute these commands you should look at the `Makefile` content and feel free to make it your own. The plethora of configuration options make many opportunities for mistakes and it is very helpful to capture the process in a script such as this.
63 |
64 | ## Running
65 |
66 | If you were developing a project like this your first steps should be to write unit tests, then the project should be run in spark local mode, it should be tested on a cluster with limited input, and finally with full input. These steps represent increasingly longer feedback cycles and should be followed in that order to save your time.
67 |
68 | ### Running Locally
69 |
70 | _Requires_: Spark installed locally such that `spark-submit` command is available in shell
71 |
72 | The first thing we need to do is to create `server` and `ingest` assemblies. Assemblies are fat jars, containing all projects transitive dependencies. We need to provide them as an argument to [`spark-submit`](http://spark.apache.org/docs/latest/submitting-applications.html) command which is a shim that will provide an instance of `SparkContext` for our application.
73 |
74 | ```console
75 | ❯ ./sbt "project server" assembly
76 | [info] Packaging /Users/eugene/proj/landsat-demo/ingest/target/scala-2.10/ingest-assembly-0.1.0.jar ...
77 | [info] Done packaging.
78 | [success] Total time: 31 s, completed Jun 29, 2016 2:38:20 PM
79 |
80 | ❯ ./sbt "project ingest" assembly
81 | [info] Packaging /Users/eugene/proj/landsat-demo/server/target/scala-2.10/server-assembly-0.1.0.jar ...
82 | [info] Done packaging.
83 | [success] Total time: 29 s, completed Jun 29, 2016 2:39:54 PM
84 | ```
85 |
86 | Now we can invoke `spark-submit` to run our ingest project through the Makefile. Helpfully it will echo out the command it has generated so we can inspect and verify:
87 |
88 | ```console
89 | ❯ make LIMIT=1 local-ingest
90 | spark-submit --name "Landsat Demo Ingest" --master "local[4]" --driver-memory 4G \
91 | ingest/target/scala-2.10/ingest-assembly-0.1.0.jar \
92 | --layerName landsat \
93 | --bbox 135.35,33.23,143.01,41.1 --startDate 2015-07-01 --endDate 2015-11-30 \
94 | --output file \
95 | --params path=catalog \
96 | --limit 1
97 | ```
98 |
99 | Note that we define the `LIMIT` in the make argument list, which sets its in the make environment and it gets passed as `--limit` option [parsed](./ingest/src/main/scala/demo/Config.scala) by `ingest` project. Specifically `LIMIT` will limit the result of the query to first `n` items, in whichever order the come in.
100 |
101 | We use the `file` output, so the tiles once processed will be written to `catalog` using Avro encoding specified by GeoTrellis.
102 |
103 | ```console
104 | ❯ tree -L 2 catalog
105 | catalog
106 | ├── attributes
107 | │ ├── landsat__.__0__.__extent.json
108 | │ ├── landsat__.__0__.__times.json
109 | │ ├── landsat__.__10__.__metadata.json
110 | │ ├── landsat__.__11__.__metadata.json
111 | │ ├── landsat__.__12__.__metadata.json
112 | │ ├── landsat__.__13__.__metadata.json
113 | │ ├── landsat__.__1__.__metadata.json
114 | │ ├── landsat__.__2__.__metadata.json
115 | │ ├── landsat__.__3__.__metadata.json
116 | │ ├── landsat__.__4__.__metadata.json
117 | │ ├── landsat__.__5__.__metadata.json
118 | │ ├── landsat__.__6__.__metadata.json
119 | │ ├── landsat__.__7__.__metadata.json
120 | │ ├── landsat__.__8__.__metadata.json
121 | │ └── landsat__.__9__.__metadata.json
122 | └── landsat
123 | ├── 1
124 | │ ├── 17592254351711
125 | │ └─- ...
126 | ├── ...
127 | ```
128 |
129 | Now we can start our server and ask it to read the catalog:
130 |
131 | ```console
132 | ❯ make local-tile-server
133 |
134 | spark-submit --name "Landsat Demo Service" --master "local" --driver-memory 1G \
135 | server/target/scala-2.10/server-assembly-0.1.0.jar local catalog
136 |
137 | ❯ curl localhost:8899/catalog
138 | {
139 | "layers": [{
140 | "name": "landsat",
141 | "extent": [[138.09185, 32.11207], [140.55872, 34.22866999999999]],
142 | "times": ["2015-11-26T01:00:00-0400"],
143 | "isLandsat": true
144 | }]
145 | }
146 | ```
147 |
148 | The remaining step is to start our viewer:
149 |
150 | ```console
151 | ❯ cd viewer
152 | ❯ npm install
153 | ...
154 | ❯ npm start
155 |
156 | > geotrellis-viewer@0.0.2 start /Users/eugene/proj/landsat-demo/viewer
157 | > node_modules/nodemon/bin/nodemon.js server/server.js --ignore components --ignore containers
158 |
159 | [nodemon] 1.9.2
160 | [nodemon] to restart at any time, enter `rs`
161 | [nodemon] watching: *.*
162 | [nodemon] starting `node server/server.js`
163 | Express server listening on port 3000
164 | ```
165 |
166 | [http://localhost:3000/](http://localhost:3000/) and hit the "Load" button in top right corner.
167 |
168 |
169 | ### Running on EMR
170 |
171 | _Requires_: Reasonably up to date [`aws-cli`](https://aws.amazon.com/cli/) this document is written with version `1.10`.
172 |
173 | To run this project on EMR we will need to allocate a cluster that has appropriate bootstrap steps to install Apache Accumulo, run our `server` project as a service on master node, run our `ingest` project as an EMR job step and upload our site to be served by `httpd` running on EMR master.
174 |
175 | EMR is going to be using YARN to distributed our applications and manage their resource consumption, so we will need to include some YARN specific configurations in our `spark-submit` arguments.
176 |
177 | The win here is that through EMR interface we get to refer to the whole cluster as a single unit and avoid the considerable trouble of managing the individual machines, their configuration and their cluster membership.
178 |
179 | #### Configuration
180 |
181 | Before anything we need to review the parameters for our cluster. They have been broken out into three sections which are imported by the `Makefile`.
182 |
183 | - [config-aws.mk](./config-aws.mk) AWS credentials, S3 staging bucket, subnet, etc
184 | - [config-emr.mk](./config-emr.mk) EMR cluster type and size
185 | - [config-ingest.mk](./config-ingest.mk) Ingest step parameters
186 |
187 | You will need to modify `config-aws.mk` to reflect your credentials and your VPC configuration. `config-emr.mk` and `config-ingest.mk` have been configured with an area over Japan. Be especially aware that as you change instance types `config-emr.mk` parameters like `EXECUTOR_MEMORY` and `EXECUTOR_CORES` need to be reviewed and likely adjusted.
188 |
189 | Aside from editing these files we have two more ways to affect the behavior of the `make` command.
190 |
191 | - Pass assign the variable in command line: `make NAME="My Cluster" create-cluster`
192 | - Instruct make overwrite defined vars with those found in the environment: `make -e create-cluster`
193 |
194 | #### Ingest configuration
195 |
196 | Inf the [./conf](conf) derectory provided templates for the ingest process. Detailed description can be found [there](https://github.com/geotrellis/geotrellis/blob/master/docs/spark-etl/spark-etl-intro.md).
197 |
198 | `output.json` configurations:
199 |
200 | ##### Accumulo
201 |
202 | ```json
203 | "backend": {
204 | "type": "accumulo",
205 | "path": "tiles",
206 | "profile": "accumulo-emr"
207 | }
208 | ```
209 |
210 | ##### Cassandra
211 |
212 | ```json
213 | "backend": {
214 | "type": "cassandra",
215 | "path": "geotrellis.tiles",
216 | "profile": "cassandra-emr"
217 | }
218 | ```
219 |
220 | ##### File
221 |
222 | ```json
223 | "backend": {
224 | "type": "file",
225 | "path": "/tmp/catalog"
226 | }
227 | ```
228 |
229 | ##### Hadoop
230 |
231 | ```json
232 | "backend": {
233 | "type": "hadoop",
234 | "path": "/catalog"
235 | }
236 | ```
237 |
238 | ##### HBase
239 |
240 | ```json
241 | "backend": {
242 | "type": "hbase",
243 | "path": "tiles",
244 | "profile": "hbase-emr"
245 | }
246 | ```
247 |
248 | #### Upload Code
249 |
250 | Now that we have configured AWS credentials we need to upload relevant files to S3 such that EMR is able to reference and download them during the bootstrap phase and during the job processing. Helpfully this will trigger rebuild if make notices that any of the source files have changed.
251 |
252 | ```console
253 | ❯ make upload-code
254 |
255 | upload: viewer/site.tgz to s3://geotrellis-test/emr/site.tgz
256 | upload: scripts/emr/bootstrap-demo.sh to s3://geotrellis-test/emr/bootstrap-demo.sh
257 | upload: scripts/emr/bootstrap-geowave.sh to s3://geotrellis-test/emr/bootstrap-geowave.sh
258 | upload: scripts/emr/geowave-install-lib.sh to s3://geotrellis-test/emr/geowave-install-lib.sh
259 | upload: server/target/scala-2.10/server-assembly-0.1.0.jar to s3://geotrellis-test/emr/server-assembly-0.1.0.jar
260 | upload: ingest/target/scala-2.10/ingest-assembly-0.1.0.jar to s3://geotrellis-test/emr/ingest-assembly-0.1.0.jar
261 | ```
262 |
263 | #### Create Cluster
264 |
265 | ```console
266 | ❯ make NAME="Landsat Cluster" USE_SPOT=true create-cluster
267 | aws emr create-cluster --name "LC Cassandra" \
268 | --release-label emr-4.5.0 \
269 | --output text \
270 | --use-default-roles \
271 | --configurations "file:///Users/eugene/proj/landsat-demo/scripts/configurations.json" \
272 | --log-uri s3://geotrellis-test/emr/logs \
273 | --ec2-attributes KeyName=geotrellis-emr,SubnetId=subnet-c5fefdb1 \
274 | --applications Name=Ganglia Name=Hadoop Name=Hue Name=Spark Name=Zeppelin-Sandbox \
275 | --instance-groups \
276 | Name=Master,BidPrice=0.5,InstanceCount=1,InstanceGroupType=MASTER,InstanceType=m3.xlarge \
277 | Name=Workers,BidPrice=0.5,InstanceCount=4,InstanceGroupType=CORE,InstanceType=m3.2xlarge \
278 | --bootstrap-actions \
279 | Name=BootstrapGeoWave,Path=s3://geotrellis-test/emr/bootstrap-geowave.sh \
280 | Name=BootstrapDemo,Path=s3://geotrellis-test/emr/bootstrap-demo.sh,\
281 | Args=[--tsj=s3://geotrellis-test/emr/server-assembly-0.1.0.jar,--site=s3://geotrellis-test/emr/site.tgz] \
282 | | tee cluster-id.txt
283 | j-2L3HJ8N2BMVDV
284 | ```
285 |
286 | All that happened here is that the `Makefile` constructed the `aws emr create-cluster` command with considerable arguments and executed it. We can actually see references to assemblies and code that was just uploaded given as arguments to the command.
287 |
288 | Finally `aws` command has given us a cluster id that was just created and we save it off to `cluster-id.txt` so we can refer to it in future commands.
289 |
290 | #### Ingest
291 |
292 | ```console
293 | ❯ make LIMIT=1 ingest
294 |
295 | aws emr add-steps --output text --cluster-id \
296 | --steps Type=CUSTOM_JAR,Name="Ingest japan-typhoon",Jar=command-runner.jar,Args=[\
297 | spark-submit,--master,yarn-cluster,\
298 | --class,demo.LandsatIngestMain,\
299 | --driver-memory,4200M,\
300 | --driver-cores,2,\
301 | --executor-memory,4100M,\
302 | --executor-cores,2,\
303 | --conf,spark.dynamicAllocation.enabled=true,\
304 | --conf,spark.yarn.executor.memoryOverhead=800,\
305 | --conf,spark.yarn.driver.memoryOverhead=800,\
306 | s3://geotrellis-test/emr/eac/ingest-assembly-0.1.0.jar,\
307 | --layerName,"japan-typhoon",\
308 | --bbox,\"135.35,33.23,143.01,41.1\",\
309 | --startDate,2015-07-01,\
310 | --endDate,2015-11-30,\
311 | --maxCloudCoverage,20.0,\
312 | --limit,1,\
313 | --output,accumulo,\
314 | --params,\"instance=accumulo,table=tiles,user=root,password=secret\"\
315 | ] | cut -f2 | tee last-step-id.txt
316 | s-324HJ2BMVD5
317 | ```
318 |
319 | When developing it is prudent to make the first job you run limited in its input so you can exercise the whole processing chain as quickly as possible. This is what we do here by providing the `LIMIT` variable to the `make` command.
320 |
321 | What basically happened here is that we instructed EMR to en-queue `spark-submit` command as job step, which will be executed when the cluster has sufficient resources available. `spark-submit` actually has two argument lists: Arguments before the application jar are configuration for Spark and YARN. Arguments after the application jar will be passed to our application to be parsed.
322 |
323 | We could have also overwritten some of the query parameters here and issued the command as:
324 |
325 | ```console
326 | ❯ make START_DATE=2015-06-01 END_DATE=2015-06-15 BBOX=135.35,33.23,143.01,41.1 ingest
327 | ```
328 |
329 | Another thing worth noting is the double escaping that is happening here. We must separate the step arguments by commas, which will be parsed and converted to spaces before they are executed on the cluster. You can read more about the required formats by running `aws emr add-steps help`.
330 |
331 |
332 | #### Debugging and Monitoring
333 |
334 | We need a way to monitor our job, inspect the logs, and see logs for previous jobs. Fortunately EMR provides us with those tools, we just need to connect to ports which are not open to the internet by default. We need to establish SSH SOCKS proxy.
335 |
336 | ```console
337 | ❯ make proxy
338 |
339 | aws emr socks --cluster-id j-2L3HJ8N2BMVDV --key-pair-file "/Users/eugene/geotrellis-emr.pem"
340 | ssh -o StrictHostKeyChecking=no -o ServerAliveInterval=10 -ND 8157 -i /Users/eugene/geotrellis-emr.pem hadoop@ec2-54-226-8-111.compute-1.amazonaws.com
341 | ```
342 |
343 | Now we just need a way to configure our browsers to use the proxy for EC2 addresses. The easiest way to do that is to use Chrome and Foxy Proxy extension. Detailed instructions are [provided by Amazon](https://docs.aws.amazon.com/ElasticMapReduce/latest/ManagementGuide/emr-connect-master-node-proxy.html)
344 |
345 | Once the proxy is configured we should be able to go to the EMR web console and see the links for `Resource Manager`, `Spark History Server`, and other status pages.
346 |
347 | Likewise now that we have a SOCKS proxy available we should be able to browse to the master address and see our JavaScript app. You will see the master address both in the output for `make proxy` command and in the EMR web console.
348 |
349 |
350 | If we need to connect to our cluster master to do a little poking we can:
351 |
352 | ```console
353 | ❯ make ssh
354 | aws emr ssh --cluster-id j-2L3HJ8N2BMVDV --key-pair-file "/Users/eugene/geotrellis-emr.pem"
355 | ```
356 |
357 | #### Terminate Cluster
358 |
359 | When you're done you can either use the AWS web console to terminate the cluster or:
360 |
361 | ```console
362 | ❯ make terminate-cluster
363 | aws emr terminate-clusters --cluster-ids j-2L3HJ8N2BMVDV
364 | ```
365 |
366 | # Deployment
367 |
368 | It is important to emphasize that GeoTrellis is a library and as such does not hold any opinions on deployment by itself. It is rather the nature of the application using GeoTrellis that dictates what is an appropriate deployment strategy. For instance we can imagine two different ways in which this demo application could be deployed:
369 |
370 | _Workspace Cluster_
371 |
372 | In this manner a user would bring up an EMR cluster that would serve as their workspace. Likely he would trigger one or more ingests that would bring relevant information into the instance of Accumulo running on EMR and utilize the quick query/response afforded by Accumulo to perform his analysis. Once the analysis is complete the user chooses when to terminate the cluster.
373 |
374 | In this manner we reap the benefits of scalability and isolation. From scalability perspective the size of the cluster and its existence is closely tied to the work required by a single user, once the analysis is done there is no further expense. From isolation standpoint the cluster encapsulates nearly all of the environment required to support the application. If there is a fault, other users are unaffected, multiple users would be able to run different versions of the application without conflict, there is no resource contention between multiple instance of the cluster etc.
375 |
376 | Importantly the user interaction involves triggering spark ingest jobs which may consume cluster resources for long periods of time. However this process is intuitive to the user since they represent the work they requested.
377 |
378 | _Ingest/Serve Cluster_
379 |
380 | A second way such application could be deployed is as a long-lived cluster that serves some view of the data. In this case the ingest step would be run once during the cluster setup stage and then optionally periodically to refresh the data. Then user interaction could happen at any point later in form of tile service requests that are satisfied through key/value lookups in Accumulo.
381 |
382 | It is important to note that in this scenario the Spark ingest is not triggered by the user interactions and in fact a spark context is not required to satisfy user requests. After the initial job we are in fact treating ERM cluster as an Accumulo backed tile service to satisfy user requests. Because Spark context is not required to satisfy user requests the requests are quite lite and we can feel safer about sharing this resource amongst many users.
383 |
384 |
385 | ## Jenkins
386 |
387 | To build and deploy the demo from Jenkins we can use the same `Makefile`.
388 | We can need to define Jenkins job parameters to match the environment variables used in the `Makefile` and then build targets it with the `-e` parameter to allow the environment variables to overwrite the file defaults.
389 |
390 | Since our scripts rely on AWS CLI we must use the Jenkins credentials plugin to define `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` for the build environment.
391 |
392 | #### Building
393 |
394 | ```console
395 | # Requires: S3_URI
396 | # Build and upload assemblies with scripts
397 | make -e upload-code || exit 1
398 | ```
399 |
400 | #### Deploying
401 |
402 | ```console
403 | # Requires: S3_URI, EC2_KEY, START_DATE, END_DATE, BBOX, WORKER_COUNT
404 |
405 | make -e create-cluster || exit 1
406 | make -e ingest || (make -e terminate-cluster && exit 1)
407 | make -e wait || (make -e terminate-cluster && exit 1)
408 | ```
409 |
410 | Included `Jenkinsfile` shows how we can use Jenkins DSL to build a job that deploys a an EMR cluster, monitors the ingest step and waits for user input to optionally terminate the cluster when the ingest step is done.
411 |
412 | Also note that after the cluster has been successfully initialized we need to check for success of subsequent steps and tare down the cluster on failure to avoid producing an orphan cluster from a failed job.
413 |
--------------------------------------------------------------------------------
/sbt:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | #
3 | # A more capable sbt runner, coincidentally also called sbt.
4 | # Author: Paul Phillips
5 |
6 | set -o pipefail
7 |
8 | declare -r sbt_release_version="0.13.12"
9 | declare -r sbt_unreleased_version="0.13.13-M1"
10 |
11 | declare -r latest_212="2.12.0-RC1"
12 | declare -r latest_211="2.11.8"
13 | declare -r latest_210="2.10.6"
14 | declare -r latest_29="2.9.3"
15 | declare -r latest_28="2.8.2"
16 |
17 | declare -r buildProps="project/build.properties"
18 |
19 | declare -r sbt_launch_ivy_release_repo="http://repo.typesafe.com/typesafe/ivy-releases"
20 | declare -r sbt_launch_ivy_snapshot_repo="https://repo.scala-sbt.org/scalasbt/ivy-snapshots"
21 | declare -r sbt_launch_mvn_release_repo="http://repo.scala-sbt.org/scalasbt/maven-releases"
22 | declare -r sbt_launch_mvn_snapshot_repo="http://repo.scala-sbt.org/scalasbt/maven-snapshots"
23 |
24 | declare -r default_jvm_opts_common="-Xms512m -Xmx1536m -Xss2m"
25 | declare -r noshare_opts="-Dsbt.global.base=project/.sbtboot -Dsbt.boot.directory=project/.boot -Dsbt.ivy.home=project/.ivy"
26 |
27 | declare sbt_jar sbt_dir sbt_create sbt_version sbt_script sbt_new
28 | declare sbt_explicit_version
29 | declare verbose noshare batch trace_level
30 | declare sbt_saved_stty debugUs
31 |
32 | declare java_cmd="java"
33 | declare sbt_launch_dir="$HOME/.sbt/launchers"
34 | declare sbt_launch_repo
35 |
36 | # pull -J and -D options to give to java.
37 | declare -a java_args scalac_args sbt_commands residual_args
38 |
39 | # args to jvm/sbt via files or environment variables
40 | declare -a extra_jvm_opts extra_sbt_opts
41 |
42 | echoerr () { echo >&2 "$@"; }
43 | vlog () { [[ -n "$verbose" ]] && echoerr "$@"; }
44 | die () { echo "Aborting: $@" ; exit 1; }
45 |
46 | # restore stty settings (echo in particular)
47 | onSbtRunnerExit() {
48 | [[ -n "$sbt_saved_stty" ]] || return
49 | vlog ""
50 | vlog "restoring stty: $sbt_saved_stty"
51 | stty "$sbt_saved_stty"
52 | unset sbt_saved_stty
53 | }
54 |
55 | # save stty and trap exit, to ensure echo is re-enabled if we are interrupted.
56 | trap onSbtRunnerExit EXIT
57 | sbt_saved_stty="$(stty -g 2>/dev/null)"
58 | vlog "Saved stty: $sbt_saved_stty"
59 |
60 | # this seems to cover the bases on OSX, and someone will
61 | # have to tell me about the others.
62 | get_script_path () {
63 | local path="$1"
64 | [[ -L "$path" ]] || { echo "$path" ; return; }
65 |
66 | local target="$(readlink "$path")"
67 | if [[ "${target:0:1}" == "/" ]]; then
68 | echo "$target"
69 | else
70 | echo "${path%/*}/$target"
71 | fi
72 | }
73 |
74 | declare -r script_path="$(get_script_path "$BASH_SOURCE")"
75 | declare -r script_name="${script_path##*/}"
76 |
77 | init_default_option_file () {
78 | local overriding_var="${!1}"
79 | local default_file="$2"
80 | if [[ ! -r "$default_file" && "$overriding_var" =~ ^@(.*)$ ]]; then
81 | local envvar_file="${BASH_REMATCH[1]}"
82 | if [[ -r "$envvar_file" ]]; then
83 | default_file="$envvar_file"
84 | fi
85 | fi
86 | echo "$default_file"
87 | }
88 |
89 | declare sbt_opts_file="$(init_default_option_file SBT_OPTS .sbtopts)"
90 | declare jvm_opts_file="$(init_default_option_file JVM_OPTS .jvmopts)"
91 |
92 | build_props_sbt () {
93 | [[ -r "$buildProps" ]] && \
94 | grep '^sbt\.version' "$buildProps" | tr '=\r' ' ' | awk '{ print $2; }'
95 | }
96 |
97 | update_build_props_sbt () {
98 | local ver="$1"
99 | local old="$(build_props_sbt)"
100 |
101 | [[ -r "$buildProps" ]] && [[ "$ver" != "$old" ]] && {
102 | perl -pi -e "s/^sbt\.version\b.*\$/sbt.version=${ver}/" "$buildProps"
103 | grep -q '^sbt.version[ =]' "$buildProps" || printf "\nsbt.version=%s\n" "$ver" >> "$buildProps"
104 |
105 | vlog "!!!"
106 | vlog "!!! Updated file $buildProps setting sbt.version to: $ver"
107 | vlog "!!! Previous value was: $old"
108 | vlog "!!!"
109 | }
110 | }
111 |
112 | set_sbt_version () {
113 | sbt_version="${sbt_explicit_version:-$(build_props_sbt)}"
114 | [[ -n "$sbt_version" ]] || sbt_version=$sbt_release_version
115 | export sbt_version
116 | }
117 |
118 | url_base () {
119 | local version="$1"
120 |
121 | case "$version" in
122 | 0.7.*) echo "http://simple-build-tool.googlecode.com" ;;
123 | 0.10.* ) echo "$sbt_launch_ivy_release_repo" ;;
124 | 0.11.[12]) echo "$sbt_launch_ivy_release_repo" ;;
125 | 0.*-[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9]-[0-9][0-9][0-9][0-9][0-9][0-9]) # ie "*-yyyymmdd-hhMMss"
126 | echo "$sbt_launch_ivy_snapshot_repo" ;;
127 | 0.*) echo "$sbt_launch_ivy_release_repo" ;;
128 | *-[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9]-[0-9][0-9][0-9][0-9][0-9][0-9]) # ie "*-yyyymmdd-hhMMss"
129 | echo "$sbt_launch_mvn_snapshot_repo" ;;
130 | *) echo "$sbt_launch_mvn_release_repo" ;;
131 | esac
132 | }
133 |
134 | make_url () {
135 | local version="$1"
136 |
137 | local base="${sbt_launch_repo:-$(url_base "$version")}"
138 |
139 | case "$version" in
140 | 0.7.*) echo "$base/files/sbt-launch-0.7.7.jar" ;;
141 | 0.10.* ) echo "$base/org.scala-tools.sbt/sbt-launch/$version/sbt-launch.jar" ;;
142 | 0.11.[12]) echo "$base/org.scala-tools.sbt/sbt-launch/$version/sbt-launch.jar" ;;
143 | 0.*) echo "$base/org.scala-sbt/sbt-launch/$version/sbt-launch.jar" ;;
144 | *) echo "$base/org/scala-sbt/sbt-launch/$version/sbt-launch.jar" ;;
145 | esac
146 | }
147 |
148 | addJava () { vlog "[addJava] arg = '$1'" ; java_args+=("$1"); }
149 | addSbt () { vlog "[addSbt] arg = '$1'" ; sbt_commands+=("$1"); }
150 | addScalac () { vlog "[addScalac] arg = '$1'" ; scalac_args+=("$1"); }
151 | addResidual () { vlog "[residual] arg = '$1'" ; residual_args+=("$1"); }
152 |
153 | addResolver () { addSbt "set resolvers += $1"; }
154 | addDebugger () { addJava "-Xdebug" ; addJava "-Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=$1"; }
155 | setThisBuild () {
156 | vlog "[addBuild] args = '$@'"
157 | local key="$1" && shift
158 | addSbt "set $key in ThisBuild := $@"
159 | }
160 | setScalaVersion () {
161 | [[ "$1" == *"-SNAPSHOT" ]] && addResolver 'Resolver.sonatypeRepo("snapshots")'
162 | addSbt "++ $1"
163 | }
164 | setJavaHome () {
165 | java_cmd="$1/bin/java"
166 | setThisBuild javaHome "_root_.scala.Some(file(\"$1\"))"
167 | export JAVA_HOME="$1"
168 | export JDK_HOME="$1"
169 | export PATH="$JAVA_HOME/bin:$PATH"
170 | }
171 |
172 | getJavaVersion() { "$1" -version 2>&1 | grep -E -e '(java|openjdk) version' | awk '{ print $3 }' | tr -d \"; }
173 |
174 | checkJava() {
175 | # Warn if there is a Java version mismatch between PATH and JAVA_HOME/JDK_HOME
176 |
177 | [[ -n "$JAVA_HOME" && -e "$JAVA_HOME/bin/java" ]] && java="$JAVA_HOME/bin/java"
178 | [[ -n "$JDK_HOME" && -e "$JDK_HOME/lib/tools.jar" ]] && java="$JDK_HOME/bin/java"
179 |
180 | if [[ -n "$java" ]]; then
181 | pathJavaVersion=$(getJavaVersion java)
182 | homeJavaVersion=$(getJavaVersion "$java")
183 | if [[ "$pathJavaVersion" != "$homeJavaVersion" ]]; then
184 | echoerr "Warning: Java version mismatch between PATH and JAVA_HOME/JDK_HOME, sbt will use the one in PATH"
185 | echoerr " Either: fix your PATH, remove JAVA_HOME/JDK_HOME or use -java-home"
186 | echoerr " java version from PATH: $pathJavaVersion"
187 | echoerr " java version from JAVA_HOME/JDK_HOME: $homeJavaVersion"
188 | fi
189 | fi
190 | }
191 |
192 | java_version () {
193 | local version=$(getJavaVersion "$java_cmd")
194 | vlog "Detected Java version: $version"
195 | echo "${version:2:1}"
196 | }
197 |
198 | # MaxPermSize critical on pre-8 JVMs but incurs noisy warning on 8+
199 | default_jvm_opts () {
200 | local v="$(java_version)"
201 | if [[ $v -ge 8 ]]; then
202 | echo "$default_jvm_opts_common"
203 | else
204 | echo "-XX:MaxPermSize=384m $default_jvm_opts_common"
205 | fi
206 | }
207 |
208 | build_props_scala () {
209 | if [[ -r "$buildProps" ]]; then
210 | versionLine="$(grep '^build.scala.versions' "$buildProps")"
211 | versionString="${versionLine##build.scala.versions=}"
212 | echo "${versionString%% .*}"
213 | fi
214 | }
215 |
216 | execRunner () {
217 | # print the arguments one to a line, quoting any containing spaces
218 | vlog "# Executing command line:" && {
219 | for arg; do
220 | if [[ -n "$arg" ]]; then
221 | if printf "%s\n" "$arg" | grep -q ' '; then
222 | printf >&2 "\"%s\"\n" "$arg"
223 | else
224 | printf >&2 "%s\n" "$arg"
225 | fi
226 | fi
227 | done
228 | vlog ""
229 | }
230 |
231 | [[ -n "$batch" ]] && exec /dev/null; then
248 | curl --fail --silent --location "$url" --output "$jar"
249 | elif which wget >/dev/null; then
250 | wget -q -O "$jar" "$url"
251 | fi
252 | } && [[ -r "$jar" ]]
253 | }
254 |
255 | acquire_sbt_jar () {
256 | {
257 | sbt_jar="$(jar_file "$sbt_version")"
258 | [[ -r "$sbt_jar" ]]
259 | } || {
260 | sbt_jar="$HOME/.ivy2/local/org.scala-sbt/sbt-launch/$sbt_version/jars/sbt-launch.jar"
261 | [[ -r "$sbt_jar" ]]
262 | } || {
263 | sbt_jar="$(jar_file "$sbt_version")"
264 | download_url "$(make_url "$sbt_version")" "$sbt_jar"
265 | }
266 | }
267 |
268 | usage () {
269 | set_sbt_version
270 | cat < display stack traces with a max of frames (default: -1, traces suppressed)
286 | -debug-inc enable debugging log for the incremental compiler
287 | -no-colors disable ANSI color codes
288 | -sbt-create start sbt even if current directory contains no sbt project
289 | -sbt-dir path to global settings/plugins directory (default: ~/.sbt/)
290 | -sbt-boot path to shared boot directory (default: ~/.sbt/boot in 0.11+)
291 | -ivy path to local Ivy repository (default: ~/.ivy2)
292 | -no-share use all local caches; no sharing
293 | -offline put sbt in offline mode
294 | -jvm-debug Turn on JVM debugging, open at the given port.
295 | -batch Disable interactive mode
296 | -prompt Set the sbt prompt; in expr, 's' is the State and 'e' is Extracted
297 | -script Run the specified file as a scala script
298 | # sbt version (default: sbt.version from $buildProps if present, otherwise $sbt_release_version)
299 | -sbt-force-latest force the use of the latest release of sbt: $sbt_release_version
300 | -sbt-version use the specified version of sbt (default: $sbt_release_version)
301 | -sbt-dev use the latest pre-release version of sbt: $sbt_unreleased_version
302 | -sbt-jar use the specified jar as the sbt launcher
303 | -sbt-launch-dir directory to hold sbt launchers (default: $sbt_launch_dir)
304 | -sbt-launch-repo repo url for downloading sbt launcher jar (default: $(url_base "$sbt_version"))
305 | # scala version (default: as chosen by sbt)
306 | -28 use $latest_28
307 | -29 use $latest_29
308 | -210 use $latest_210
309 | -211 use $latest_211
310 | -212 use $latest_212
311 | -scala-home use the scala build at the specified directory
312 | -scala-version use the specified version of scala
313 | -binary-version use the specified scala version when searching for dependencies
314 | # java version (default: java from PATH, currently $(java -version 2>&1 | grep version))
315 | -java-home alternate JAVA_HOME
316 | # passing options to the jvm - note it does NOT use JAVA_OPTS due to pollution
317 | # The default set is used if JVM_OPTS is unset and no -jvm-opts file is found
318 | $(default_jvm_opts)
319 | JVM_OPTS environment variable holding either the jvm args directly, or
320 | the reference to a file containing jvm args if given path is prepended by '@' (e.g. '@/etc/jvmopts')
321 | Note: "@"-file is overridden by local '.jvmopts' or '-jvm-opts' argument.
322 | -jvm-opts file containing jvm args (if not given, .jvmopts in project root is used if present)
323 | -Dkey=val pass -Dkey=val directly to the jvm
324 | -J-X pass option -X directly to the jvm (-J is stripped)
325 | # passing options to sbt, OR to this runner
326 | SBT_OPTS environment variable holding either the sbt args directly, or
327 | the reference to a file containing sbt args if given path is prepended by '@' (e.g. '@/etc/sbtopts')
328 | Note: "@"-file is overridden by local '.sbtopts' or '-sbt-opts' argument.
329 | -sbt-opts file containing sbt args (if not given, .sbtopts in project root is used if present)
330 | -S-X add -X to sbt's scalacOptions (-S is stripped)
331 | EOM
332 | }
333 |
334 | process_args () {
335 | require_arg () {
336 | local type="$1"
337 | local opt="$2"
338 | local arg="$3"
339 |
340 | if [[ -z "$arg" ]] || [[ "${arg:0:1}" == "-" ]]; then
341 | die "$opt requires <$type> argument"
342 | fi
343 | }
344 | while [[ $# -gt 0 ]]; do
345 | case "$1" in
346 | -h|-help) usage; exit 1 ;;
347 | -v) verbose=true && shift ;;
348 | -d) addSbt "--debug" && shift ;;
349 | -w) addSbt "--warn" && shift ;;
350 | -q) addSbt "--error" && shift ;;
351 | -x) debugUs=true && shift ;;
352 | -trace) require_arg integer "$1" "$2" && trace_level="$2" && shift 2 ;;
353 | -ivy) require_arg path "$1" "$2" && addJava "-Dsbt.ivy.home=$2" && shift 2 ;;
354 | -no-colors) addJava "-Dsbt.log.noformat=true" && shift ;;
355 | -no-share) noshare=true && shift ;;
356 | -sbt-boot) require_arg path "$1" "$2" && addJava "-Dsbt.boot.directory=$2" && shift 2 ;;
357 | -sbt-dir) require_arg path "$1" "$2" && sbt_dir="$2" && shift 2 ;;
358 | -debug-inc) addJava "-Dxsbt.inc.debug=true" && shift ;;
359 | -offline) addSbt "set offline := true" && shift ;;
360 | -jvm-debug) require_arg port "$1" "$2" && addDebugger "$2" && shift 2 ;;
361 | -batch) batch=true && shift ;;
362 | -prompt) require_arg "expr" "$1" "$2" && setThisBuild shellPrompt "(s => { val e = Project.extract(s) ; $2 })" && shift 2 ;;
363 | -script) require_arg file "$1" "$2" && sbt_script="$2" && addJava "-Dsbt.main.class=sbt.ScriptMain" && shift 2 ;;
364 |
365 | -sbt-create) sbt_create=true && shift ;;
366 | -sbt-jar) require_arg path "$1" "$2" && sbt_jar="$2" && shift 2 ;;
367 | -sbt-version) require_arg version "$1" "$2" && sbt_explicit_version="$2" && shift 2 ;;
368 | -sbt-force-latest) sbt_explicit_version="$sbt_release_version" && shift ;;
369 | -sbt-dev) sbt_explicit_version="$sbt_unreleased_version" && shift ;;
370 | -sbt-launch-dir) require_arg path "$1" "$2" && sbt_launch_dir="$2" && shift 2 ;;
371 | -sbt-launch-repo) require_arg path "$1" "$2" && sbt_launch_repo="$2" && shift 2 ;;
372 | -scala-version) require_arg version "$1" "$2" && setScalaVersion "$2" && shift 2 ;;
373 | -binary-version) require_arg version "$1" "$2" && setThisBuild scalaBinaryVersion "\"$2\"" && shift 2 ;;
374 | -scala-home) require_arg path "$1" "$2" && setThisBuild scalaHome "_root_.scala.Some(file(\"$2\"))" && shift 2 ;;
375 | -java-home) require_arg path "$1" "$2" && setJavaHome "$2" && shift 2 ;;
376 | -sbt-opts) require_arg path "$1" "$2" && sbt_opts_file="$2" && shift 2 ;;
377 | -jvm-opts) require_arg path "$1" "$2" && jvm_opts_file="$2" && shift 2 ;;
378 |
379 | -D*) addJava "$1" && shift ;;
380 | -J*) addJava "${1:2}" && shift ;;
381 | -S*) addScalac "${1:2}" && shift ;;
382 | -28) setScalaVersion "$latest_28" && shift ;;
383 | -29) setScalaVersion "$latest_29" && shift ;;
384 | -210) setScalaVersion "$latest_210" && shift ;;
385 | -211) setScalaVersion "$latest_211" && shift ;;
386 | -212) setScalaVersion "$latest_212" && shift ;;
387 |
388 | # TODO: Switch the below to sbt_release_version after 0.13.13 (and "new) is out
389 | new) sbt_new=true && sbt_explicit_version="$sbt_unreleased_version" && addResidual "$1" && shift ;;
390 | *) addResidual "$1" && shift ;;
391 | esac
392 | done
393 | }
394 |
395 | # process the direct command line arguments
396 | process_args "$@"
397 |
398 | # skip #-styled comments and blank lines
399 | readConfigFile() {
400 | local end=false
401 | until $end; do
402 | read || end=true
403 | [[ $REPLY =~ ^# ]] || [[ -z $REPLY ]] || echo "$REPLY"
404 | done < "$1"
405 | }
406 |
407 | # if there are file/environment sbt_opts, process again so we
408 | # can supply args to this runner
409 | if [[ -r "$sbt_opts_file" ]]; then
410 | vlog "Using sbt options defined in file $sbt_opts_file"
411 | while read opt; do extra_sbt_opts+=("$opt"); done < <(readConfigFile "$sbt_opts_file")
412 | elif [[ -n "$SBT_OPTS" && ! ("$SBT_OPTS" =~ ^@.*) ]]; then
413 | vlog "Using sbt options defined in variable \$SBT_OPTS"
414 | extra_sbt_opts=( $SBT_OPTS )
415 | else
416 | vlog "No extra sbt options have been defined"
417 | fi
418 |
419 | [[ -n "${extra_sbt_opts[*]}" ]] && process_args "${extra_sbt_opts[@]}"
420 |
421 | # reset "$@" to the residual args
422 | set -- "${residual_args[@]}"
423 | argumentCount=$#
424 |
425 | # set sbt version
426 | set_sbt_version
427 |
428 | checkJava
429 |
430 | # only exists in 0.12+
431 | setTraceLevel() {
432 | case "$sbt_version" in
433 | "0.7."* | "0.10."* | "0.11."* ) echoerr "Cannot set trace level in sbt version $sbt_version" ;;
434 | *) setThisBuild traceLevel $trace_level ;;
435 | esac
436 | }
437 |
438 | # set scalacOptions if we were given any -S opts
439 | [[ ${#scalac_args[@]} -eq 0 ]] || addSbt "set scalacOptions in ThisBuild += \"${scalac_args[@]}\""
440 |
441 | # Update build.properties on disk to set explicit version - sbt gives us no choice
442 | [[ -n "$sbt_explicit_version" && -z "$sbt_new" ]] && update_build_props_sbt "$sbt_explicit_version"
443 | vlog "Detected sbt version $sbt_version"
444 |
445 | if [[ -n "$sbt_script" ]]; then
446 | residual_args=( $sbt_script ${residual_args[@]} )
447 | else
448 | # no args - alert them there's stuff in here
449 | (( argumentCount > 0 )) || {
450 | vlog "Starting $script_name: invoke with -help for other options"
451 | residual_args=( shell )
452 | }
453 | fi
454 |
455 | # verify this is an sbt dir, -create was given or user attempts to run a scala script
456 | [[ -r ./build.sbt || -d ./project || -n "$sbt_create" || -n "$sbt_script" || -n "$sbt_new" ]] || {
457 | cat <
5 |
6 | set -o pipefail
7 |
8 | declare -r sbt_release_version="0.13.12"
9 | declare -r sbt_unreleased_version="0.13.13-M1"
10 |
11 | declare -r latest_212="2.12.0-RC1"
12 | declare -r latest_211="2.11.8"
13 | declare -r latest_210="2.10.6"
14 | declare -r latest_29="2.9.3"
15 | declare -r latest_28="2.8.2"
16 |
17 | declare -r buildProps="project/build.properties"
18 |
19 | declare -r sbt_launch_ivy_release_repo="http://repo.typesafe.com/typesafe/ivy-releases"
20 | declare -r sbt_launch_ivy_snapshot_repo="https://repo.scala-sbt.org/scalasbt/ivy-snapshots"
21 | declare -r sbt_launch_mvn_release_repo="http://repo.scala-sbt.org/scalasbt/maven-releases"
22 | declare -r sbt_launch_mvn_snapshot_repo="http://repo.scala-sbt.org/scalasbt/maven-snapshots"
23 |
24 | declare -r default_jvm_opts_common="-Xms512m -Xmx1536m -Xss2m"
25 | declare -r noshare_opts="-Dsbt.global.base=project/.sbtboot -Dsbt.boot.directory=project/.boot -Dsbt.ivy.home=project/.ivy"
26 |
27 | declare sbt_jar sbt_dir sbt_create sbt_version sbt_script sbt_new
28 | declare sbt_explicit_version
29 | declare verbose noshare batch trace_level
30 | declare sbt_saved_stty debugUs
31 |
32 | declare java_cmd="java"
33 | declare sbt_launch_dir="$HOME/.sbt/launchers"
34 | declare sbt_launch_repo
35 |
36 | # pull -J and -D options to give to java.
37 | declare -a java_args scalac_args sbt_commands residual_args
38 |
39 | # args to jvm/sbt via files or environment variables
40 | declare -a extra_jvm_opts extra_sbt_opts
41 |
42 | echoerr () { echo >&2 "$@"; }
43 | vlog () { [[ -n "$verbose" ]] && echoerr "$@"; }
44 | die () { echo "Aborting: $@" ; exit 1; }
45 |
46 | # restore stty settings (echo in particular)
47 | onSbtRunnerExit() {
48 | [[ -n "$sbt_saved_stty" ]] || return
49 | vlog ""
50 | vlog "restoring stty: $sbt_saved_stty"
51 | stty "$sbt_saved_stty"
52 | unset sbt_saved_stty
53 | }
54 |
55 | # save stty and trap exit, to ensure echo is re-enabled if we are interrupted.
56 | trap onSbtRunnerExit EXIT
57 | sbt_saved_stty="$(stty -g 2>/dev/null)"
58 | vlog "Saved stty: $sbt_saved_stty"
59 |
60 | # this seems to cover the bases on OSX, and someone will
61 | # have to tell me about the others.
62 | get_script_path () {
63 | local path="$1"
64 | [[ -L "$path" ]] || { echo "$path" ; return; }
65 |
66 | local target="$(readlink "$path")"
67 | if [[ "${target:0:1}" == "/" ]]; then
68 | echo "$target"
69 | else
70 | echo "${path%/*}/$target"
71 | fi
72 | }
73 |
74 | declare -r script_path="$(get_script_path "$BASH_SOURCE")"
75 | declare -r script_name="${script_path##*/}"
76 |
77 | init_default_option_file () {
78 | local overriding_var="${!1}"
79 | local default_file="$2"
80 | if [[ ! -r "$default_file" && "$overriding_var" =~ ^@(.*)$ ]]; then
81 | local envvar_file="${BASH_REMATCH[1]}"
82 | if [[ -r "$envvar_file" ]]; then
83 | default_file="$envvar_file"
84 | fi
85 | fi
86 | echo "$default_file"
87 | }
88 |
89 | declare sbt_opts_file="$(init_default_option_file SBT_OPTS .sbtopts)"
90 | declare jvm_opts_file="$(init_default_option_file JVM_OPTS .jvmopts)"
91 |
92 | build_props_sbt () {
93 | [[ -r "$buildProps" ]] && \
94 | grep '^sbt\.version' "$buildProps" | tr '=\r' ' ' | awk '{ print $2; }'
95 | }
96 |
97 | update_build_props_sbt () {
98 | local ver="$1"
99 | local old="$(build_props_sbt)"
100 |
101 | [[ -r "$buildProps" ]] && [[ "$ver" != "$old" ]] && {
102 | perl -pi -e "s/^sbt\.version\b.*\$/sbt.version=${ver}/" "$buildProps"
103 | grep -q '^sbt.version[ =]' "$buildProps" || printf "\nsbt.version=%s\n" "$ver" >> "$buildProps"
104 |
105 | vlog "!!!"
106 | vlog "!!! Updated file $buildProps setting sbt.version to: $ver"
107 | vlog "!!! Previous value was: $old"
108 | vlog "!!!"
109 | }
110 | }
111 |
112 | set_sbt_version () {
113 | sbt_version="${sbt_explicit_version:-$(build_props_sbt)}"
114 | [[ -n "$sbt_version" ]] || sbt_version=$sbt_release_version
115 | export sbt_version
116 | }
117 |
118 | url_base () {
119 | local version="$1"
120 |
121 | case "$version" in
122 | 0.7.*) echo "http://simple-build-tool.googlecode.com" ;;
123 | 0.10.* ) echo "$sbt_launch_ivy_release_repo" ;;
124 | 0.11.[12]) echo "$sbt_launch_ivy_release_repo" ;;
125 | 0.*-[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9]-[0-9][0-9][0-9][0-9][0-9][0-9]) # ie "*-yyyymmdd-hhMMss"
126 | echo "$sbt_launch_ivy_snapshot_repo" ;;
127 | 0.*) echo "$sbt_launch_ivy_release_repo" ;;
128 | *-[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9]-[0-9][0-9][0-9][0-9][0-9][0-9]) # ie "*-yyyymmdd-hhMMss"
129 | echo "$sbt_launch_mvn_snapshot_repo" ;;
130 | *) echo "$sbt_launch_mvn_release_repo" ;;
131 | esac
132 | }
133 |
134 | make_url () {
135 | local version="$1"
136 |
137 | local base="${sbt_launch_repo:-$(url_base "$version")}"
138 |
139 | case "$version" in
140 | 0.7.*) echo "$base/files/sbt-launch-0.7.7.jar" ;;
141 | 0.10.* ) echo "$base/org.scala-tools.sbt/sbt-launch/$version/sbt-launch.jar" ;;
142 | 0.11.[12]) echo "$base/org.scala-tools.sbt/sbt-launch/$version/sbt-launch.jar" ;;
143 | 0.*) echo "$base/org.scala-sbt/sbt-launch/$version/sbt-launch.jar" ;;
144 | *) echo "$base/org/scala-sbt/sbt-launch/$version/sbt-launch.jar" ;;
145 | esac
146 | }
147 |
148 | addJava () { vlog "[addJava] arg = '$1'" ; java_args+=("$1"); }
149 | addSbt () { vlog "[addSbt] arg = '$1'" ; sbt_commands+=("$1"); }
150 | addScalac () { vlog "[addScalac] arg = '$1'" ; scalac_args+=("$1"); }
151 | addResidual () { vlog "[residual] arg = '$1'" ; residual_args+=("$1"); }
152 |
153 | addResolver () { addSbt "set resolvers += $1"; }
154 | addDebugger () { addJava "-Xdebug" ; addJava "-Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=$1"; }
155 | setThisBuild () {
156 | vlog "[addBuild] args = '$@'"
157 | local key="$1" && shift
158 | addSbt "set $key in ThisBuild := $@"
159 | }
160 | setScalaVersion () {
161 | [[ "$1" == *"-SNAPSHOT" ]] && addResolver 'Resolver.sonatypeRepo("snapshots")'
162 | addSbt "++ $1"
163 | }
164 | setJavaHome () {
165 | java_cmd="$1/bin/java"
166 | setThisBuild javaHome "_root_.scala.Some(file(\"$1\"))"
167 | export JAVA_HOME="$1"
168 | export JDK_HOME="$1"
169 | export PATH="$JAVA_HOME/bin:$PATH"
170 | }
171 |
172 | getJavaVersion() { "$1" -version 2>&1 | grep -E -e '(java|openjdk) version' | awk '{ print $3 }' | tr -d \"; }
173 |
174 | checkJava() {
175 | # Warn if there is a Java version mismatch between PATH and JAVA_HOME/JDK_HOME
176 |
177 | [[ -n "$JAVA_HOME" && -e "$JAVA_HOME/bin/java" ]] && java="$JAVA_HOME/bin/java"
178 | [[ -n "$JDK_HOME" && -e "$JDK_HOME/lib/tools.jar" ]] && java="$JDK_HOME/bin/java"
179 |
180 | if [[ -n "$java" ]]; then
181 | pathJavaVersion=$(getJavaVersion java)
182 | homeJavaVersion=$(getJavaVersion "$java")
183 | if [[ "$pathJavaVersion" != "$homeJavaVersion" ]]; then
184 | echoerr "Warning: Java version mismatch between PATH and JAVA_HOME/JDK_HOME, sbt will use the one in PATH"
185 | echoerr " Either: fix your PATH, remove JAVA_HOME/JDK_HOME or use -java-home"
186 | echoerr " java version from PATH: $pathJavaVersion"
187 | echoerr " java version from JAVA_HOME/JDK_HOME: $homeJavaVersion"
188 | fi
189 | fi
190 | }
191 |
192 | java_version () {
193 | local version=$(getJavaVersion "$java_cmd")
194 | vlog "Detected Java version: $version"
195 | echo "${version:2:1}"
196 | }
197 |
198 | # MaxPermSize critical on pre-8 JVMs but incurs noisy warning on 8+
199 | default_jvm_opts () {
200 | local v="$(java_version)"
201 | if [[ $v -ge 8 ]]; then
202 | echo "$default_jvm_opts_common"
203 | else
204 | echo "-XX:MaxPermSize=384m $default_jvm_opts_common"
205 | fi
206 | }
207 |
208 | build_props_scala () {
209 | if [[ -r "$buildProps" ]]; then
210 | versionLine="$(grep '^build.scala.versions' "$buildProps")"
211 | versionString="${versionLine##build.scala.versions=}"
212 | echo "${versionString%% .*}"
213 | fi
214 | }
215 |
216 | execRunner () {
217 | # print the arguments one to a line, quoting any containing spaces
218 | vlog "# Executing command line:" && {
219 | for arg; do
220 | if [[ -n "$arg" ]]; then
221 | if printf "%s\n" "$arg" | grep -q ' '; then
222 | printf >&2 "\"%s\"\n" "$arg"
223 | else
224 | printf >&2 "%s\n" "$arg"
225 | fi
226 | fi
227 | done
228 | vlog ""
229 | }
230 |
231 | [[ -n "$batch" ]] && exec /dev/null; then
248 | curl --fail --silent --location "$url" --output "$jar"
249 | elif which wget >/dev/null; then
250 | wget -q -O "$jar" "$url"
251 | fi
252 | } && [[ -r "$jar" ]]
253 | }
254 |
255 | acquire_sbt_jar () {
256 | {
257 | sbt_jar="$(jar_file "$sbt_version")"
258 | [[ -r "$sbt_jar" ]]
259 | } || {
260 | sbt_jar="$HOME/.ivy2/local/org.scala-sbt/sbt-launch/$sbt_version/jars/sbt-launch.jar"
261 | [[ -r "$sbt_jar" ]]
262 | } || {
263 | sbt_jar="$(jar_file "$sbt_version")"
264 | download_url "$(make_url "$sbt_version")" "$sbt_jar"
265 | }
266 | }
267 |
268 | usage () {
269 | set_sbt_version
270 | cat < display stack traces with a max of frames (default: -1, traces suppressed)
286 | -debug-inc enable debugging log for the incremental compiler
287 | -no-colors disable ANSI color codes
288 | -sbt-create start sbt even if current directory contains no sbt project
289 | -sbt-dir path to global settings/plugins directory (default: ~/.sbt/)
290 | -sbt-boot path to shared boot directory (default: ~/.sbt/boot in 0.11+)
291 | -ivy path to local Ivy repository (default: ~/.ivy2)
292 | -no-share use all local caches; no sharing
293 | -offline put sbt in offline mode
294 | -jvm-debug Turn on JVM debugging, open at the given port.
295 | -batch Disable interactive mode
296 | -prompt Set the sbt prompt; in expr, 's' is the State and 'e' is Extracted
297 | -script Run the specified file as a scala script
298 | # sbt version (default: sbt.version from $buildProps if present, otherwise $sbt_release_version)
299 | -sbt-force-latest force the use of the latest release of sbt: $sbt_release_version
300 | -sbt-version use the specified version of sbt (default: $sbt_release_version)
301 | -sbt-dev use the latest pre-release version of sbt: $sbt_unreleased_version
302 | -sbt-jar use the specified jar as the sbt launcher
303 | -sbt-launch-dir directory to hold sbt launchers (default: $sbt_launch_dir)
304 | -sbt-launch-repo repo url for downloading sbt launcher jar (default: $(url_base "$sbt_version"))
305 | # scala version (default: as chosen by sbt)
306 | -28 use $latest_28
307 | -29 use $latest_29
308 | -210 use $latest_210
309 | -211 use $latest_211
310 | -212 use $latest_212
311 | -scala-home use the scala build at the specified directory
312 | -scala-version use the specified version of scala
313 | -binary-version use the specified scala version when searching for dependencies
314 | # java version (default: java from PATH, currently $(java -version 2>&1 | grep version))
315 | -java-home alternate JAVA_HOME
316 | # passing options to the jvm - note it does NOT use JAVA_OPTS due to pollution
317 | # The default set is used if JVM_OPTS is unset and no -jvm-opts file is found
318 | $(default_jvm_opts)
319 | JVM_OPTS environment variable holding either the jvm args directly, or
320 | the reference to a file containing jvm args if given path is prepended by '@' (e.g. '@/etc/jvmopts')
321 | Note: "@"-file is overridden by local '.jvmopts' or '-jvm-opts' argument.
322 | -jvm-opts file containing jvm args (if not given, .jvmopts in project root is used if present)
323 | -Dkey=val pass -Dkey=val directly to the jvm
324 | -J-X pass option -X directly to the jvm (-J is stripped)
325 | # passing options to sbt, OR to this runner
326 | SBT_OPTS environment variable holding either the sbt args directly, or
327 | the reference to a file containing sbt args if given path is prepended by '@' (e.g. '@/etc/sbtopts')
328 | Note: "@"-file is overridden by local '.sbtopts' or '-sbt-opts' argument.
329 | -sbt-opts file containing sbt args (if not given, .sbtopts in project root is used if present)
330 | -S-X add -X to sbt's scalacOptions (-S is stripped)
331 | EOM
332 | }
333 |
334 | process_args () {
335 | require_arg () {
336 | local type="$1"
337 | local opt="$2"
338 | local arg="$3"
339 |
340 | if [[ -z "$arg" ]] || [[ "${arg:0:1}" == "-" ]]; then
341 | die "$opt requires <$type> argument"
342 | fi
343 | }
344 | while [[ $# -gt 0 ]]; do
345 | case "$1" in
346 | -h|-help) usage; exit 1 ;;
347 | -v) verbose=true && shift ;;
348 | -d) addSbt "--debug" && shift ;;
349 | -w) addSbt "--warn" && shift ;;
350 | -q) addSbt "--error" && shift ;;
351 | -x) debugUs=true && shift ;;
352 | -trace) require_arg integer "$1" "$2" && trace_level="$2" && shift 2 ;;
353 | -ivy) require_arg path "$1" "$2" && addJava "-Dsbt.ivy.home=$2" && shift 2 ;;
354 | -no-colors) addJava "-Dsbt.log.noformat=true" && shift ;;
355 | -no-share) noshare=true && shift ;;
356 | -sbt-boot) require_arg path "$1" "$2" && addJava "-Dsbt.boot.directory=$2" && shift 2 ;;
357 | -sbt-dir) require_arg path "$1" "$2" && sbt_dir="$2" && shift 2 ;;
358 | -debug-inc) addJava "-Dxsbt.inc.debug=true" && shift ;;
359 | -offline) addSbt "set offline := true" && shift ;;
360 | -jvm-debug) require_arg port "$1" "$2" && addDebugger "$2" && shift 2 ;;
361 | -batch) batch=true && shift ;;
362 | -prompt) require_arg "expr" "$1" "$2" && setThisBuild shellPrompt "(s => { val e = Project.extract(s) ; $2 })" && shift 2 ;;
363 | -script) require_arg file "$1" "$2" && sbt_script="$2" && addJava "-Dsbt.main.class=sbt.ScriptMain" && shift 2 ;;
364 |
365 | -sbt-create) sbt_create=true && shift ;;
366 | -sbt-jar) require_arg path "$1" "$2" && sbt_jar="$2" && shift 2 ;;
367 | -sbt-version) require_arg version "$1" "$2" && sbt_explicit_version="$2" && shift 2 ;;
368 | -sbt-force-latest) sbt_explicit_version="$sbt_release_version" && shift ;;
369 | -sbt-dev) sbt_explicit_version="$sbt_unreleased_version" && shift ;;
370 | -sbt-launch-dir) require_arg path "$1" "$2" && sbt_launch_dir="$2" && shift 2 ;;
371 | -sbt-launch-repo) require_arg path "$1" "$2" && sbt_launch_repo="$2" && shift 2 ;;
372 | -scala-version) require_arg version "$1" "$2" && setScalaVersion "$2" && shift 2 ;;
373 | -binary-version) require_arg version "$1" "$2" && setThisBuild scalaBinaryVersion "\"$2\"" && shift 2 ;;
374 | -scala-home) require_arg path "$1" "$2" && setThisBuild scalaHome "_root_.scala.Some(file(\"$2\"))" && shift 2 ;;
375 | -java-home) require_arg path "$1" "$2" && setJavaHome "$2" && shift 2 ;;
376 | -sbt-opts) require_arg path "$1" "$2" && sbt_opts_file="$2" && shift 2 ;;
377 | -jvm-opts) require_arg path "$1" "$2" && jvm_opts_file="$2" && shift 2 ;;
378 |
379 | -D*) addJava "$1" && shift ;;
380 | -J*) addJava "${1:2}" && shift ;;
381 | -S*) addScalac "${1:2}" && shift ;;
382 | -28) setScalaVersion "$latest_28" && shift ;;
383 | -29) setScalaVersion "$latest_29" && shift ;;
384 | -210) setScalaVersion "$latest_210" && shift ;;
385 | -211) setScalaVersion "$latest_211" && shift ;;
386 | -212) setScalaVersion "$latest_212" && shift ;;
387 |
388 | # TODO: Switch the below to sbt_release_version after 0.13.13 (and "new) is out
389 | new) sbt_new=true && sbt_explicit_version="$sbt_unreleased_version" && addResidual "$1" && shift ;;
390 | *) addResidual "$1" && shift ;;
391 | esac
392 | done
393 | }
394 |
395 | # process the direct command line arguments
396 | process_args "$@"
397 |
398 | # skip #-styled comments and blank lines
399 | readConfigFile() {
400 | local end=false
401 | until $end; do
402 | read || end=true
403 | [[ $REPLY =~ ^# ]] || [[ -z $REPLY ]] || echo "$REPLY"
404 | done < "$1"
405 | }
406 |
407 | # if there are file/environment sbt_opts, process again so we
408 | # can supply args to this runner
409 | if [[ -r "$sbt_opts_file" ]]; then
410 | vlog "Using sbt options defined in file $sbt_opts_file"
411 | while read opt; do extra_sbt_opts+=("$opt"); done < <(readConfigFile "$sbt_opts_file")
412 | elif [[ -n "$SBT_OPTS" && ! ("$SBT_OPTS" =~ ^@.*) ]]; then
413 | vlog "Using sbt options defined in variable \$SBT_OPTS"
414 | extra_sbt_opts=( $SBT_OPTS )
415 | else
416 | vlog "No extra sbt options have been defined"
417 | fi
418 |
419 | [[ -n "${extra_sbt_opts[*]}" ]] && process_args "${extra_sbt_opts[@]}"
420 |
421 | # reset "$@" to the residual args
422 | set -- "${residual_args[@]}"
423 | argumentCount=$#
424 |
425 | # set sbt version
426 | set_sbt_version
427 |
428 | checkJava
429 |
430 | # only exists in 0.12+
431 | setTraceLevel() {
432 | case "$sbt_version" in
433 | "0.7."* | "0.10."* | "0.11."* ) echoerr "Cannot set trace level in sbt version $sbt_version" ;;
434 | *) setThisBuild traceLevel $trace_level ;;
435 | esac
436 | }
437 |
438 | # set scalacOptions if we were given any -S opts
439 | [[ ${#scalac_args[@]} -eq 0 ]] || addSbt "set scalacOptions in ThisBuild += \"${scalac_args[@]}\""
440 |
441 | # Update build.properties on disk to set explicit version - sbt gives us no choice
442 | [[ -n "$sbt_explicit_version" && -z "$sbt_new" ]] && update_build_props_sbt "$sbt_explicit_version"
443 | vlog "Detected sbt version $sbt_version"
444 |
445 | if [[ -n "$sbt_script" ]]; then
446 | residual_args=( $sbt_script ${residual_args[@]} )
447 | else
448 | # no args - alert them there's stuff in here
449 | (( argumentCount > 0 )) || {
450 | vlog "Starting $script_name: invoke with -help for other options"
451 | residual_args=( shell )
452 | }
453 | fi
454 |
455 | # verify this is an sbt dir, -create was given or user attempts to run a scala script
456 | [[ -r ./build.sbt || -d ./project || -n "$sbt_create" || -n "$sbt_script" || -n "$sbt_new" ]] || {
457 | cat <