├── .babelrc ├── .gitignore ├── .sample_env ├── LICENSE ├── README.md ├── db ├── migrations │ ├── deploy │ │ ├── 20191021172721-initial.sql │ │ ├── 20191107093406-stacendpoint.sql │ │ ├── 20191107214618-rootcollection.sql │ │ ├── 20191113203743-replacesearch.sql │ │ ├── 20191211011147-fixindexperf.sql │ │ ├── 20200217192155-tiebreak.sql │ │ └── 20200715144826-multilinks.sql │ ├── revert │ │ ├── 20191021172721-initial.sql │ │ ├── 20191107093406-stacendpoint.sql │ │ ├── 20191107214618-rootcollection.sql │ │ ├── 20191113203743-replacesearch.sql │ │ ├── 20191211011147-fixindexperf.sql │ │ ├── 20200217192155-tiebreak.sql │ │ └── 20200715144826-multilinks.sql │ ├── sqitch.conf │ ├── sqitch.plan │ └── verify │ │ ├── 20191021172721-initial.sql │ │ ├── 20191107093406-stacendpoint.sql │ │ ├── 20191107214618-rootcollection.sql │ │ ├── 20191113203743-replacesearch.sql │ │ ├── 20191211011147-fixindexperf.sql │ │ ├── 20200217192155-tiebreak.sql │ │ └── 20200715144826-multilinks.sql └── src │ ├── api │ ├── satapi.sql │ └── schema.sql │ ├── authorization │ ├── privileges.sql │ └── roles.sql │ ├── data │ ├── satapi.sql │ └── schema.sql │ ├── init.sh │ ├── init.sql │ ├── libs │ ├── auth │ │ ├── api │ │ │ ├── all.sql │ │ │ ├── login.sql │ │ │ ├── me.sql │ │ │ ├── refresh_token.sql │ │ │ ├── session_type.sql │ │ │ ├── signup.sql │ │ │ └── user_type.sql │ │ ├── data │ │ │ ├── user.sql │ │ │ └── user_role_type.sql │ │ └── schema.sql │ ├── pgjwt │ │ ├── .gitignore │ │ ├── LICENSE │ │ ├── Makefile │ │ ├── README.md │ │ ├── pgjwt--0.0.1.sql │ │ ├── pgjwt.control │ │ ├── schema.sql │ │ └── test.sql │ ├── request │ │ └── schema.sql │ └── settings │ │ └── schema.sql │ └── sample_data │ ├── data.sql │ └── reset.sql ├── deployment ├── .sample_env ├── README.md ├── cloudformation.yaml ├── createStack.sh ├── createSubZeroConfig.sh ├── deploy.sh ├── initMigrations.sh ├── roleSql.sql └── sqitch ├── docker-compose.yml ├── docs ├── STAC.yaml ├── api.merge.yaml ├── api.yaml ├── fields.fragment.yaml ├── insert.fragment.yaml ├── query.fragment.yaml └── sort.fragment.yaml ├── generateToken.js ├── openresty ├── Dockerfile ├── lualib │ └── user_code │ │ ├── datetimeBuilder.lua │ │ ├── defaultFields.lua │ │ ├── extensions │ │ ├── fieldsExtension.lua │ │ ├── queryExtension.lua │ │ └── sortExtension.lua │ │ ├── filters.lua │ │ ├── hooks.lua │ │ ├── init_phase.lua │ │ ├── internal_rest_body_filter_phase.lua │ │ ├── internal_rest_header_filter_phase.lua │ │ ├── internal_rest_rewrite_phase.lua │ │ ├── limit_constants.lua │ │ ├── path_constants.lua │ │ ├── pg_constants.lua │ │ ├── satapi.lua │ │ ├── search.lua │ │ ├── string_utils.lua │ │ ├── utils.lua │ │ └── wfsBuilder.lua └── nginx │ ├── conf │ ├── includes │ │ ├── globals │ │ │ └── env_vars.conf │ │ ├── http │ │ │ ├── init_lua.conf │ │ │ └── server │ │ │ │ ├── gzip.conf │ │ │ │ ├── locations.conf │ │ │ │ ├── locations │ │ │ │ ├── internal_rest.conf │ │ │ │ └── internal_rest │ │ │ │ │ ├── lua.conf │ │ │ │ │ └── security.conf │ │ │ │ └── resolver.conf │ │ └── root_location.conf │ └── nginx.conf │ └── html │ └── index.html ├── package.json ├── tests ├── bin │ └── test_db.js ├── db │ ├── README.md │ ├── simple.sql │ └── structure.sql └── rest │ ├── .eslintrc │ ├── bbox.js │ ├── collections.js │ ├── collections_filter.js │ ├── common.js │ ├── constants.js │ ├── datetime.js │ ├── fields.js │ ├── ids_filter.js │ ├── intersects.js │ ├── intersects.json │ ├── intersectsPoint.json │ ├── items.js │ ├── landsat8l2Collection.json │ ├── landsatItem.json │ ├── landsatItems.json │ ├── next_limit.js │ ├── query.js │ ├── root.js │ ├── sort.js │ └── wfs.js └── yarn.lock /.babelrc: -------------------------------------------------------------------------------- 1 | { 2 | "presets": ["latest"] 3 | } -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | node_modules 2 | package-lock.json 3 | .env 4 | .tern-port 5 | subzero-app.json 6 | -------------------------------------------------------------------------------- /.sample_env: -------------------------------------------------------------------------------- 1 | # Docker specific configs 2 | # use only letters and numbers for the project name 3 | COMPOSE_PROJECT_NAME=sat-api-pg 4 | 5 | 6 | # Global configs 7 | DEVELOPMENT=1 8 | JWT_SECRET=reallyreallyreallyreallyverysafe 9 | 10 | # DB connection details (used by all containers) 11 | # set PG_VERSION to match your production db major version 12 | PG_VERSION=11.2 13 | DB_HOST=db 14 | DB_PORT=5432 15 | DB_NAME=app 16 | DB_SCHEMA=api 17 | DB_USER=authenticator 18 | DB_PASS=authenticatorpass 19 | 20 | # OpenResty 21 | POSTGREST_HOST=postgrest 22 | POSTGREST_PORT=3000 23 | 24 | # PostgREST 25 | DB_ANON_ROLE=anonymous 26 | DB_POOL=10 27 | #MAX_ROWS= 28 | #PRE_REQUEST= 29 | SERVER_PROXY_URI=http://localhost:8080/ 30 | 31 | # PostgreSQL container config 32 | # Use this to connect directly to the db running in the container 33 | SUPER_USER=superuser 34 | SUPER_USER_PASSWORD=superuserpass 35 | 36 | # psql postgres://superuser:superuserpass@localhost:5432/app 37 | @localhost:5432/app 38 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2019 Development Seed 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # sat-api-pg 2 | 3 | ## A Postgres backed STAC API. 4 | 5 | [sat-api-pg OpenAPI Docs](http://devseed.com/sat-api-pg-swagger/) 6 | 7 | Built on the foundation of the excellent 8 | 9 | [PostgREST](https://postgrest.com) - Postgres REST API backends. 10 | 11 | [PostgREST Starter Kit](https://github.com/subzerocloud/postgrest-starter-kit) - Starter Kit and tooling for authoring REST API backends with PostgREST. 12 | 13 | ## Purpose 14 | 15 | To provide the community a Postgres backed reference implementation of the [STAC API specification](https://github.com/radiantearth/stac-spec/tree/dev/api-spec). 16 | Postgres's flexibility and ecosystem of geospatial functionality provide a great 17 | foundation for building spatial APIs and we hope the community can expand on this work to drive STAC development forward. 18 | 19 | ## Project Layout 20 | 21 | ```bash 22 | . 23 | ├── db # Database schema source files and tests 24 | │ └── src # Schema definition 25 | │ ├── api # Api entities avaiable as REST endpoints 26 | │ ├── data # Definition of source tables that hold the data 27 | │ ├── libs # A collection modules of used throughout the code 28 | │ ├── authorization # Application level roles and their privileges 29 | │ ├── sample_data # A few sample rows 30 | │ └── init.sql # Schema definition entry point 31 | ├── openresty # Reverse proxy configurations and Lua code 32 | │ ├── lualib 33 | │ │ └── user_code # Application Lua code 34 | │ ├── nginx # Nginx files 35 | │ │ ├── conf # Configuration files 36 | │ │ └── html # Static frontend files 37 | │ ├── Dockerfile # Dockerfile definition for production 38 | │ └── entrypoint.sh # Custom entrypoint 39 | ├── tests # Tests for all the components 40 | │ ├── db # pgTap tests for the db 41 | │ └── rest # REST interface tests 42 | ├── docker-compose.yml # Defines Docker services, networks and volumes 43 | └── .env # Project configurations 44 | 45 | ``` 46 | 47 | ## Installation 48 | 49 | ### Prerequisites 50 | * [Docker](https://www.docker.com) 51 | * [Node.js](https://nodejs.org/en/) 52 | * [Yarn](https://yarnpkg.com/lang/en/) 53 | 54 | In the root folder of the application, install the necessary js libs. 55 | ```bash 56 | $ yarn 57 | ``` 58 | 59 | The root folder of the application contains `.sample_env` with development environment settings. Rename this file by running 60 | ```bash 61 | $ cp .sample_env .env 62 | ``` 63 | 64 | In the root folder of application, run the docker-compose command 65 | ```bash 66 | $ docker-compose up -d 67 | ``` 68 | 69 | The API server will become available at the following endpoint: 70 | 71 | - REST [http://localhost:8080](http://localhost:8080) 72 | 73 | Try a simple request 74 | ```bash 75 | $ curl http://localhost:8080/collections/landsat-8-l1/items 76 | ``` 77 | 78 | To remove the docker compose stack run 79 | ```bash 80 | $ docker-compose stop 81 | ``` 82 | Followed by 83 | ```bash 84 | $ docker-compose rm 85 | ``` 86 | 87 | ## Development workflow and debugging 88 | 89 | In the root of your project run. 90 | ```bash 91 | $ yarn subzero dashboard 92 | ``` 93 | After this step you can view the logs of all the stack components (SQL queries will also be logged) and 94 | if you edit a sql / conf / lua file in your project, the changes will immediately be applied. 95 | 96 | 97 | ## Testing 98 | Conformance with the [STAC API specification](https://github.com/radiantearth/stac-spec/tree/dev/api-spec) and extensions can be understood by reviewing the integration tests available at `/tests/rest`. 99 | To run tests, the `docker-compose` stack must be running. 100 | 101 | ```bash 102 | yarn test # Run all tests (db, rest) 103 | yarn test_db # Run pgTAP tests 104 | yarn test_rest # Run integration tests 105 | ``` 106 | 107 | ## Deployment 108 | For AWS deployment steps see [deployment/README.md](deployment/README.md). 109 | 110 | ## Contributing 111 | This project was initiated as part of [Development Seed's](https://developmentseed.org/) wider work in helping to build the [STAC API specification](https://github.com/radiantearth/stac-spec/tree/dev/api-spec) 112 | and open sourced to to the community to help drive contributions and new functionality. New contributions are welcomed and you can contact 113 | [@sharkinsspatial](https://github.com/sharkinsspatial) or info@developmentseed.org for additional support or assistance with customization. 114 | Anyone and everyone is welcome to contribute. 115 | 116 | ## STAC alignment 117 | This API implementation closely follows the [STAC API specification](https://github.com/radiantearth/stac-spec/tree/dev/api-spec). Becase the STAC API specifcation is under active development there are some current differences between the STAC specification [v0.8.0](https://github.com/radiantearth/stac-spec/releases/tag/v0.8.0). For more details on capabilities see [sat-api-pg OpenAPI Docs](http://devseed.com/sat-api-pg-swagger/). 118 | Notable differences 119 | 120 | - Though the [search extension](https://github.com/radiantearth/stac-spec/tree/master/api-spec/extensions/search) is not currently implemented much of the same behavior can be acheived via the use of http headers. When using the `next` and `limit` parameters, responses will contain a `Content-Range` header which shows the current range of the response. To obtain the total number of items found the request can specify the `Prefer: count=exact` header and the full count will be available in the `Content-Range` response header. Be aware that this exact count can be slow for very large tables. For increased performance we will soon release support for the `Prefer: count=planned` header to provide an estimated count. Note that the accuracy of this count depends on how up-to-date are the PostgreSQL statistics tables. 121 | 122 | - The API contains a generic `/items` endpoint which supports access to items across parent collections. The rationale for this is tied to the insert extension described below. 123 | 124 | - The [transaction](https://github.com/radiantearth/stac-spec/tree/master/api-spec/extensions/transaction) is not currently implemented but insert behavior using http POST is enabled for `items` and `collections`. Authentication for insert operations is handled via the `Authorization` header with JWT tokens. To make an authenticated request the client must include an Authorization HTTP header with the value `Bearer `. Tokens can be generated using the `JWT_SECRET` from the `.env` file by running 125 | 126 | ```bash 127 | $ node generateToken.js 128 | ``` 129 | 130 | Due to permissions on the base table where records are stored insert requests must also set the header `Prefer: return=minimal`. 131 | -------------------------------------------------------------------------------- /db/migrations/deploy/20191107093406-stacendpoint.sql: -------------------------------------------------------------------------------- 1 | -- Deploy sat-api-pg:20191107093406-stacendpoint to pg 2 | 3 | BEGIN; 4 | SET search_path = data, api; 5 | CREATE VIEW data.stacLinks AS 6 | SELECT 7 | 'sat-api-pg' AS title, 8 | 'sat-api-pg' AS id, 9 | 'STAC v0.8.0 implementation by Development Seed' AS description, 10 | '0.8.0' AS stac_version, 11 | (SELECT array_cat(ARRAY( 12 | SELECT 13 | ROW((SELECT url || '/collections/' || data.collectionsLinks.id FROM data.apiUrls LIMIT 1), 14 | 'child', 15 | 'application/json', 16 | null)::data.linkobject 17 | FROM data.collectionsLinks), 18 | ARRAY[ 19 | ROW((SELECT url || '/stac/search' FROM data.apiUrls LIMIT 1), 20 | 'search', 21 | 'application/json', 22 | null)::data.linkobject, 23 | ROW((SELECT url || '/stac' FROM data.apiUrls LIMIT 1), 24 | 'self', 25 | 'application/json', 26 | null)::data.linkobject] 27 | ) 28 | ) as links; 29 | 30 | GRANT SELECT on data.stacLinks to api; 31 | 32 | CREATE OR REPLACE VIEW api.stac AS 33 | SELECT * FROM data.stacLinks; 34 | ALTER VIEW stac owner to api; 35 | 36 | GRANT SELECT on api.stac to anonymous; 37 | COMMIT; 38 | -------------------------------------------------------------------------------- /db/migrations/deploy/20191107214618-rootcollection.sql: -------------------------------------------------------------------------------- 1 | -- Deploy sat-api-pg:20191108211002-rootcollection to pg 2 | 3 | BEGIN; 4 | CREATE OR REPLACE VIEW data.collectionsobject AS 5 | SELECT 6 | (SELECT ARRAY( 7 | SELECT 8 | ROW((SELECT url || '/collections/' || data.collectionsLinks.id FROM data.apiUrls LIMIT 1), 9 | 'child', 10 | 'application/json', 11 | null)::data.linkobject 12 | FROM data.collectionsLinks) 13 | ) as links, 14 | (SELECT ARRAY( 15 | SELECT row_to_json(collection) 16 | FROM (SELECT * FROM data.collectionsLinks) collection 17 | )) as collections; 18 | 19 | CREATE OR REPLACE VIEW api.rootcollections AS 20 | SELECT * FROM data.collectionsobject; 21 | ALTER VIEW api.rootcollections owner to api; 22 | 23 | GRANT SELECT on data.collectionsobject to api; 24 | GRANT SELECT on api.rootcollections to anonymous; 25 | COMMIT; 26 | -------------------------------------------------------------------------------- /db/migrations/deploy/20191113203743-replacesearch.sql: -------------------------------------------------------------------------------- 1 | -- Deploy sat-api-pg:20191113203743-replacesearch to pg 2 | 3 | BEGIN; 4 | 5 | DROP FUNCTION IF EXISTS api.search( 6 | bbox numeric[], 7 | intersects json, 8 | include TEXT[] 9 | ); 10 | 11 | DROP FUNCTION IF EXISTS api.searchnogeom( 12 | include TEXT[] 13 | ); 14 | 15 | CREATE OR REPLACE FUNCTION api.search( 16 | bbox numeric[] default NULL, 17 | intersects json default NULL, 18 | include text[] default NULL, 19 | andquery text default NULL, 20 | sort text default 'ORDER BY c.datetime', 21 | lim int default 50, 22 | next text default '0' 23 | ) RETURNS setof api.collectionitems AS $$ 24 | DECLARE 25 | res_headers text; 26 | prefer text; 27 | BEGIN 28 | -- prefer := current_setting('request.header.prefer'); 29 | RETURN QUERY EXECUTE 30 | FORMAT( 31 | 'WITH g AS ( 32 | SELECT CASE 33 | WHEN $1 IS NOT NULL THEN 34 | data.ST_MakeEnvelope( 35 | $1[1], 36 | $1[2], 37 | $1[3], 38 | $1[4], 39 | 4326 40 | ) 41 | WHEN $2 IS NOT NULL THEN 42 | data.st_SetSRID( 43 | data.ST_GeomFromGeoJSON($2), 44 | 4326 45 | ) 46 | ELSE 47 | NULL 48 | END AS geom 49 | ) 50 | SELECT 51 | collectionproperties, 52 | collection, 53 | id, 54 | c.geom, 55 | c.bbox, 56 | type, 57 | assets, 58 | geometry, 59 | CASE WHEN $3 IS NULL THEN properties 60 | ELSE ( 61 | SELECT jsonb_object_agg(e.key, e.value) 62 | FROM jsonb_each(properties) e 63 | WHERE e.key = ANY ($3) 64 | ) 65 | END as properties, 66 | datetime, 67 | links, 68 | stac_version 69 | FROM api.collectionitems c, g 70 | WHERE ( 71 | g.geom IS NULL OR 72 | data.ST_Intersects(g.geom, c.geom) 73 | ) %1s %2s LIMIT %3s OFFSET %4s; 74 | ', COALESCE(andQuery, ''), sort, lim, next) 75 | USING bbox, intersects, include; 76 | 77 | res_headers := format('[{"Func-Range": "%s-%s/*"}]', next, (next::int + lim::int) - 1); 78 | PERFORM set_config('response.headers', res_headers, true); 79 | END; 80 | $$ LANGUAGE PLPGSQL IMMUTABLE; 81 | 82 | COMMIT; 83 | -------------------------------------------------------------------------------- /db/migrations/deploy/20191211011147-fixindexperf.sql: -------------------------------------------------------------------------------- 1 | -- Deploy sat-api-pg:20191211011147-fixindexperf to pg 2 | 3 | BEGIN; 4 | 5 | CREATE OR REPLACE FUNCTION api.search( 6 | bbox numeric[] default NULL, 7 | intersects json default NULL, 8 | include text[] default NULL, 9 | andquery text default NULL, 10 | sort text default 'ORDER BY c.datetime', 11 | lim int default 50, 12 | next text default '0' 13 | ) RETURNS setof api.collectionitems AS $$ 14 | DECLARE 15 | res_headers text; 16 | prefer text; 17 | intersects_geometry data.geometry; 18 | BEGIN 19 | -- prefer := current_setting('request.header.prefer'); 20 | IF bbox IS NOT NULL THEN 21 | intersects_geometry = data.ST_MakeEnvelope( 22 | bbox[1], 23 | bbox[2], 24 | bbox[3], 25 | bbox[4], 26 | 4326 27 | ); 28 | ELSIF intersects IS NOT NULL THEN 29 | intersects_geometry = data.st_SetSRID(data.ST_GeomFromGeoJSON(intersects), 4326); 30 | END IF; 31 | RETURN QUERY EXECUTE 32 | FORMAT( 33 | 'SELECT 34 | collectionproperties, 35 | collection, 36 | id, 37 | c.geom, 38 | c.bbox, 39 | type, 40 | assets, 41 | geometry, 42 | CASE WHEN $2 IS NULL THEN properties 43 | ELSE ( 44 | SELECT jsonb_object_agg(e.key, e.value) 45 | FROM jsonb_each(properties) e 46 | WHERE e.key = ANY ($2) 47 | ) 48 | END as properties, 49 | datetime, 50 | links, 51 | stac_version 52 | FROM api.collectionitems c 53 | WHERE ( 54 | $1 IS NULL OR 55 | data.ST_Intersects($1, c.geom) 56 | ) %1s %2s LIMIT %3s OFFSET %4s; 57 | ', COALESCE(andQuery, ''), sort, lim, next) 58 | USING intersects_geometry, include; 59 | 60 | res_headers := format('[{"Func-Range": "%s-%s/*"}]', next, (next::int + lim::int) - 1); 61 | PERFORM set_config('response.headers', res_headers, true); 62 | 63 | END; 64 | $$ LANGUAGE PLPGSQL IMMUTABLE; 65 | COMMIT; 66 | -------------------------------------------------------------------------------- /db/migrations/deploy/20200217192155-tiebreak.sql: -------------------------------------------------------------------------------- 1 | -- Deploy sat-api-pg:20200217192155-tiebreak to pg 2 | 3 | BEGIN; 4 | ALTER TABLE data.items ADD COLUMN tiebreak SERIAL; 5 | 6 | 7 | DROP VIEW data.itemsLinks CASCADE; 8 | CREATE OR REPLACE VIEW data.itemsLinks AS 9 | SELECT 10 | id, 11 | type, 12 | geometry, 13 | bbox, 14 | properties, 15 | assets, 16 | collection, 17 | datetime, 18 | '0.8.0' AS stac_version, 19 | (SELECT array_cat(ARRAY[ 20 | ROW(( 21 | SELECT url || '/collections/' || collection || '/' || id 22 | FROM data.apiUrls LIMIT 1), 23 | 'self', 24 | 'application/geo+json', 25 | null)::data.linkobject, 26 | ROW(( 27 | SELECT url || '/collections/' || collection 28 | FROM data.apiUrls LIMIT 1), 29 | 'parent', 30 | 'application/json', 31 | null)::data.linkobject 32 | ], links)) as links, 33 | tiebreak 34 | FROM data.items i; 35 | 36 | CREATE VIEW data.items_string_geometry AS 37 | SELECT 38 | id, 39 | type, 40 | data.ST_AsGeoJSON(geometry) :: json as geometry, 41 | bbox, 42 | properties, 43 | assets, 44 | collection, 45 | datetime, 46 | links, 47 | stac_version 48 | FROM data.itemsLinks; 49 | 50 | CREATE TRIGGER convert_geometry_tg INSTEAD OF INSERT 51 | ON data.items_string_geometry FOR EACH ROW 52 | EXECUTE PROCEDURE data.convert_values(); 53 | 54 | CREATE OR REPLACE VIEW api.collectionitems AS 55 | SELECT 56 | c.properties as collectionproperties, 57 | i.collection as collection, 58 | i.id as id, 59 | i.geometry as geom, 60 | i.bbox as bbox, 61 | i.type, 62 | i.assets, 63 | data.ST_AsGeoJSON(i.geometry) :: json as geometry, 64 | i.properties as properties, 65 | i.datetime as datetime, 66 | i.links, 67 | i.stac_version, 68 | i.tiebreak 69 | FROM data.itemsLinks i 70 | RIGHT JOIN 71 | data.collections c ON i.collection = c.id; 72 | 73 | 74 | CREATE OR REPLACE FUNCTION api.search( 75 | bbox numeric[] default NULL, 76 | intersects json default NULL, 77 | include text[] default NULL, 78 | andquery text default NULL, 79 | sort text default 'ORDER BY c.datetime', 80 | lim int default 50, 81 | next text default '0' 82 | ) RETURNS setof api.collectionitems AS $$ 83 | DECLARE 84 | res_headers text; 85 | prefer text; 86 | intersects_geometry data.geometry; 87 | BEGIN 88 | -- prefer := current_setting('request.header.prefer'); 89 | IF bbox IS NOT NULL THEN 90 | intersects_geometry = data.ST_MakeEnvelope( 91 | bbox[1], 92 | bbox[2], 93 | bbox[3], 94 | bbox[4], 95 | 4326 96 | ); 97 | ELSIF intersects IS NOT NULL THEN 98 | intersects_geometry = data.st_SetSRID(data.ST_GeomFromGeoJSON(intersects), 4326); 99 | END IF; 100 | RETURN QUERY EXECUTE 101 | FORMAT( 102 | 'SELECT 103 | collectionproperties, 104 | collection, 105 | id, 106 | c.geom, 107 | c.bbox, 108 | type, 109 | assets, 110 | geometry, 111 | CASE WHEN $2 IS NULL THEN properties 112 | ELSE ( 113 | SELECT jsonb_object_agg(e.key, e.value) 114 | FROM jsonb_each(properties) e 115 | WHERE e.key = ANY ($2) 116 | ) 117 | END as properties, 118 | datetime, 119 | links, 120 | stac_version, 121 | tiebreak 122 | FROM api.collectionitems c 123 | WHERE ( 124 | $1 IS NULL OR 125 | data.ST_Intersects($1, c.geom) 126 | ) %1s %2s LIMIT %3s OFFSET %4s; 127 | ', COALESCE(andQuery, ''), sort, lim, next) 128 | USING intersects_geometry, include; 129 | 130 | res_headers := format('[{"Func-Range": "%s-%s/*"}]', next, (next::int + lim::int) - 1); 131 | PERFORM set_config('response.headers', res_headers, true); 132 | 133 | END; 134 | $$ LANGUAGE PLPGSQL IMMUTABLE; 135 | 136 | CREATE OR REPLACE VIEW api.items AS SELECT * FROM data.items_string_geometry; 137 | 138 | GRANT select, insert, update on data.items_string_geometry to api; 139 | GRANT select, insert, update on data.itemsLinks to api; 140 | 141 | GRANT select, insert, update on data.itemsLinks to application; 142 | GRANT select, insert, update on api.items to application; 143 | GRANT select, insert, update on data.items_string_geometry to application; 144 | GRANT usage ON sequence data.items_tiebreak_seq TO application; 145 | 146 | GRANT select on api.collectionitems to anonymous; 147 | GRANT select on api.items to anonymous; 148 | 149 | COMMIT; 150 | -------------------------------------------------------------------------------- /db/migrations/deploy/20200715144826-multilinks.sql: -------------------------------------------------------------------------------- 1 | -- Deploy sat-api-pg:20200715144826-multilinks to pg 2 | 3 | BEGIN; 4 | 5 | CREATE OR REPLACE FUNCTION convert_values() 6 | RETURNS trigger AS 7 | $BODY$ 8 | DECLARE 9 | converted_geometry data.geometry; 10 | converted_datetime timestamp with time zone; 11 | newlinks data.linkobject[]; 12 | filteredlinks data.linkobject[]; 13 | link data.linkobject; 14 | BEGIN 15 | -- IF TG_OP = 'INSERT' AND (NEW.geometry ISNULL) THEN 16 | -- RAISE EXCEPTION 'geometry is required'; 17 | -- RETURN NULL; 18 | -- END IF; 19 | -- EXCEPTION WHEN SQLSTATE 'XX000' THEN 20 | -- RAISE WARNING 'geometry not updated: %', SQLERRM; 21 | converted_geometry = data.st_setsrid(data.ST_GeomFromGeoJSON(NEW.geometry), 4326); 22 | converted_datetime = (new.properties)->'datetime'; 23 | 24 | newlinks := new.links; 25 | IF newlinks IS NOT NULL THEN 26 | FOREACH link IN ARRAY newlinks LOOP 27 | IF link.rel='derived_from' AND link.href IS NOT NULL THEN 28 | filteredlinks := ARRAY_APPEND(filteredlinks, link); 29 | ELSE 30 | filteredlinks := NULL; 31 | END IF; 32 | END LOOP; 33 | END IF; 34 | 35 | INSERT INTO data.items( 36 | id, 37 | type, 38 | geometry, 39 | bbox, 40 | properties, 41 | assets, 42 | collection, 43 | datetime, 44 | links) 45 | VALUES( 46 | new.id, 47 | new.type, 48 | converted_geometry, 49 | new.bbox, 50 | new.properties, 51 | new.assets, 52 | new.collection, 53 | converted_datetime, 54 | filteredlinks); 55 | RETURN NEW; 56 | END; 57 | $BODY$ 58 | LANGUAGE plpgsql; 59 | 60 | CREATE OR REPLACE FUNCTION convert_collection_links() 61 | RETURNS trigger AS 62 | $BODY$ 63 | DECLARE 64 | newlinks data.linkobject[]; 65 | filteredlinks data.linkobject[]; 66 | link data.linkobject; 67 | BEGIN 68 | 69 | newlinks := new.links; 70 | IF newlinks IS NOT NULL THEN 71 | FOREACH link IN ARRAY newlinks LOOP 72 | IF link.rel='derived_from' AND link.href IS NOT NULL THEN 73 | filteredlinks := ARRAY_APPEND(filteredlinks, link); 74 | ELSE 75 | filteredlinks := NULL; 76 | END IF; 77 | END LOOP; 78 | END IF; 79 | 80 | INSERT INTO data.collections( 81 | id, 82 | title, 83 | description, 84 | keywords, 85 | version, 86 | license, 87 | providers, 88 | extent, 89 | properties, 90 | links 91 | ) 92 | VALUES( 93 | new.id, 94 | new.title, 95 | new.description, 96 | new.keywords, 97 | new.version, 98 | new.license, 99 | new.providers, 100 | new.extent, 101 | new.properties, 102 | filteredlinks 103 | ); 104 | RETURN NEW; 105 | END; 106 | $BODY$ 107 | LANGUAGE plpgsql; 108 | 109 | COMMIT; 110 | -------------------------------------------------------------------------------- /db/migrations/revert/20191021172721-initial.sql: -------------------------------------------------------------------------------- 1 | -- Revert sat-api-pg:20191021172721-initial from pg 2 | 3 | BEGIN; 4 | 5 | -- XXX Add DDLs here. 6 | 7 | COMMIT; 8 | -------------------------------------------------------------------------------- /db/migrations/revert/20191107093406-stacendpoint.sql: -------------------------------------------------------------------------------- 1 | -- Revert sat-api-pg:20191107093406-stacendpoint from pg 2 | 3 | BEGIN; 4 | DROP VIEW data.stacLinks CASCADE; 5 | COMMIT; 6 | -------------------------------------------------------------------------------- /db/migrations/revert/20191107214618-rootcollection.sql: -------------------------------------------------------------------------------- 1 | -- Revert sat-api-pg:20191108211002-rootcollection from pg 2 | 3 | BEGIN; 4 | DROP VIEW collectionsobject; 5 | DROP VIEW rootcollections; 6 | COMMIT; 7 | -------------------------------------------------------------------------------- /db/migrations/revert/20191113203743-replacesearch.sql: -------------------------------------------------------------------------------- 1 | -- Revert sat-api-pg:20191113203743-replacesearch from pg 2 | 3 | BEGIN; 4 | 5 | -- XXX Add DDLs here. 6 | 7 | COMMIT; 8 | -------------------------------------------------------------------------------- /db/migrations/revert/20191211011147-fixindexperf.sql: -------------------------------------------------------------------------------- 1 | -- Revert sat-api-pg:20191211011147-fixindexperf from pg 2 | 3 | BEGIN; 4 | 5 | -- XXX Add DDLs here. 6 | 7 | COMMIT; 8 | -------------------------------------------------------------------------------- /db/migrations/revert/20200217192155-tiebreak.sql: -------------------------------------------------------------------------------- 1 | -- Revert sat-api-pg:20200217192155-tiebreak from pg 2 | 3 | BEGIN; 4 | 5 | -- XXX Add DDLs here. 6 | 7 | COMMIT; 8 | -------------------------------------------------------------------------------- /db/migrations/revert/20200715144826-multilinks.sql: -------------------------------------------------------------------------------- 1 | -- Revert sat-api-pg:20200715144826-multilinks from pg 2 | 3 | BEGIN; 4 | 5 | -- XXX Add DDLs here. 6 | 7 | COMMIT; 8 | -------------------------------------------------------------------------------- /db/migrations/sqitch.conf: -------------------------------------------------------------------------------- 1 | [core] 2 | engine = pg 3 | # plan_file = sqitch.plan 4 | # top_dir = . 5 | # [engine "pg"] 6 | # target = db:pg: 7 | # registry = sqitch 8 | # client = psql 9 | -------------------------------------------------------------------------------- /db/migrations/sqitch.plan: -------------------------------------------------------------------------------- 1 | %syntax-version=1.0.0 2 | %project=sat-api-pg 3 | 4 | 20191021172721-initial 2019-10-21T17:27:35Z root # Add 20191021172721-initial migration 5 | 20191107093406-stacendpoint 2019-11-07T09:34:07Z root # Add stac endpoint 6 | 20191107214618-rootcollection 2019-11-07T21:46:19Z root # Add root level collections shape 7 | 20191113203743-replacesearch 2019-11-13T20:37:44Z root # Replace search function 8 | 20191211011147-fixindexperf 2019-12-11T01:11:48Z root # fix index perf 9 | 20200217192155-tiebreak 2020-02-17T19:21:56Z root # include tiebreak field 10 | 20200715144826-multilinks 2020-07-15T14:48:27Z root # Fix multilinks 11 | -------------------------------------------------------------------------------- /db/migrations/verify/20191021172721-initial.sql: -------------------------------------------------------------------------------- 1 | -- Verify sat-api-pg:20191021172721-initial on pg 2 | 3 | BEGIN; 4 | 5 | -- XXX Add verifications here. 6 | 7 | ROLLBACK; 8 | -------------------------------------------------------------------------------- /db/migrations/verify/20191107093406-stacendpoint.sql: -------------------------------------------------------------------------------- 1 | -- Verify sat-api-pg:20191107093406-stacendpoint on pg 2 | 3 | BEGIN; 4 | 5 | -- XXX Add verifications here. 6 | 7 | ROLLBACK; 8 | -------------------------------------------------------------------------------- /db/migrations/verify/20191107214618-rootcollection.sql: -------------------------------------------------------------------------------- 1 | -- Verify sat-api-pg:20191108211002-rootcollection on pg 2 | 3 | BEGIN; 4 | 5 | -- XXX Add verifications here. 6 | 7 | ROLLBACK; 8 | -------------------------------------------------------------------------------- /db/migrations/verify/20191113203743-replacesearch.sql: -------------------------------------------------------------------------------- 1 | -- Verify sat-api-pg:20191113203743-replacesearch on pg 2 | 3 | BEGIN; 4 | 5 | -- XXX Add verifications here. 6 | 7 | ROLLBACK; 8 | -------------------------------------------------------------------------------- /db/migrations/verify/20191211011147-fixindexperf.sql: -------------------------------------------------------------------------------- 1 | -- Verify sat-api-pg:20191211011147-fixindexperf on pg 2 | 3 | BEGIN; 4 | 5 | -- XXX Add verifications here. 6 | 7 | ROLLBACK; 8 | -------------------------------------------------------------------------------- /db/migrations/verify/20200217192155-tiebreak.sql: -------------------------------------------------------------------------------- 1 | -- Verify sat-api-pg:20200217192155-tiebreak on pg 2 | 3 | BEGIN; 4 | 5 | -- XXX Add verifications here. 6 | 7 | ROLLBACK; 8 | -------------------------------------------------------------------------------- /db/migrations/verify/20200715144826-multilinks.sql: -------------------------------------------------------------------------------- 1 | -- Verify sat-api-pg:20200715144826-multilinks on pg 2 | 3 | BEGIN; 4 | 5 | -- XXX Add verifications here. 6 | 7 | ROLLBACK; 8 | -------------------------------------------------------------------------------- /db/src/api/satapi.sql: -------------------------------------------------------------------------------- 1 | CREATE OR REPLACE VIEW collectionitems AS 2 | SELECT 3 | c.properties as collectionproperties, 4 | i.collection as collection, 5 | i.id as id, 6 | i.geometry as geom, 7 | i.bbox as bbox, 8 | i.type, 9 | i.assets, 10 | data.ST_AsGeoJSON(i.geometry) :: json as geometry, 11 | i.properties as properties, 12 | i.datetime as datetime, 13 | i.links, 14 | i.stac_version, 15 | i.tiebreak 16 | FROM data.itemsLinks i 17 | RIGHT JOIN 18 | data.collections c ON i.collection = c.id; 19 | ALTER VIEW collectionitems owner to api; 20 | 21 | CREATE OR REPLACE FUNCTION api.search( 22 | bbox numeric[] default NULL, 23 | intersects json default NULL, 24 | include text[] default NULL, 25 | andquery text default NULL, 26 | sort text default 'ORDER BY c.datetime', 27 | lim int default 50, 28 | next text default '0' 29 | ) RETURNS setof api.collectionitems AS $$ 30 | DECLARE 31 | res_headers text; 32 | prefer text; 33 | intersects_geometry data.geometry; 34 | BEGIN 35 | -- prefer := current_setting('request.header.prefer'); 36 | IF bbox IS NOT NULL THEN 37 | intersects_geometry = data.ST_MakeEnvelope( 38 | bbox[1], 39 | bbox[2], 40 | bbox[3], 41 | bbox[4], 42 | 4326 43 | ); 44 | ELSIF intersects IS NOT NULL THEN 45 | intersects_geometry = data.st_SetSRID(data.ST_GeomFromGeoJSON(intersects), 4326); 46 | END IF; 47 | RETURN QUERY EXECUTE 48 | FORMAT( 49 | 'SELECT 50 | collectionproperties, 51 | collection, 52 | id, 53 | c.geom, 54 | c.bbox, 55 | type, 56 | assets, 57 | geometry, 58 | CASE WHEN $2 IS NULL THEN properties 59 | ELSE ( 60 | SELECT jsonb_object_agg(e.key, e.value) 61 | FROM jsonb_each(properties) e 62 | WHERE e.key = ANY ($2) 63 | ) 64 | END as properties, 65 | datetime, 66 | links, 67 | stac_version, 68 | tiebreak 69 | FROM api.collectionitems c 70 | WHERE ( 71 | $1 IS NULL OR 72 | data.ST_Intersects($1, c.geom) 73 | ) %1s %2s LIMIT %3s OFFSET %4s; 74 | ', COALESCE(andQuery, ''), sort, lim, next) 75 | USING intersects_geometry, include; 76 | 77 | res_headers := format('[{"Func-Range": "%s-%s/*"}]', next, (next::int + lim::int) - 1); 78 | PERFORM set_config('response.headers', res_headers, true); 79 | 80 | END; 81 | $$ LANGUAGE PLPGSQL IMMUTABLE; 82 | 83 | CREATE OR REPLACE VIEW items AS 84 | SELECT * FROM data.items_string_geometry; 85 | ALTER VIEW items owner to api; 86 | 87 | CREATE OR REPLACE VIEW collections AS 88 | SELECT * FROM data.collectionsLinks; 89 | ALTER VIEW collections owner to api; 90 | 91 | CREATE OR REPLACE VIEW rootcollections AS 92 | SELECT * FROM data.collectionsobject; 93 | ALTER VIEW rootcollections owner to api; 94 | 95 | CREATE OR REPLACE VIEW root AS 96 | SELECT * FROM data.rootLinks; 97 | ALTER VIEW root owner to api; 98 | 99 | CREATE OR REPLACE VIEW stac AS 100 | SELECT * FROM data.stacLinks; 101 | ALTER VIEW stac owner to api; 102 | -------------------------------------------------------------------------------- /db/src/api/schema.sql: -------------------------------------------------------------------------------- 1 | drop schema if exists api cascade; 2 | create schema api; 3 | set search_path = api, public; 4 | 5 | -- this role will be used as the owner of the views in the api schema 6 | -- it is needed for the definition of the RLS policies 7 | drop role if exists api; 8 | create role api; 9 | grant api to current_user; -- this is a workaround for RDS where the master user does not have SUPERUSER priviliges 10 | 11 | -- redifine this type to control the user properties returned by auth endpoints 12 | \ir ../libs/auth/api/user_type.sql 13 | -- include all auth endpoints 14 | \ir ../libs/auth/api/all.sql 15 | 16 | -- our endpoints 17 | \ir satapi.sql 18 | -------------------------------------------------------------------------------- /db/src/authorization/privileges.sql: -------------------------------------------------------------------------------- 1 | \echo # Loading roles privilege 2 | 3 | -- this file contains the privileges of all aplications roles to each database entity 4 | -- if it gets too long, you can split it one file per entity 5 | 6 | -- set default privileges to all the entities created by the auth lib 7 | select auth.set_auth_endpoints_privileges('api', :'anonymous', enum_range(null::data.user_role)::text[]); 8 | 9 | -- specify which application roles can access this api (you'll probably list them all) 10 | -- remember to list all the values of user_role type here 11 | grant usage on schema api to anonymous, application; 12 | grant usage on schema data to anonymous, application; 13 | 14 | grant select on data.items to api; 15 | grant select, insert, update on data.items_string_geometry to api; 16 | grant select, insert, update on data.collections to api; 17 | grant select, insert, update on data.collectionsLinks to api; 18 | grant select, insert, update on data.itemsLinks to api; 19 | grant select on data.rootLinks to api; 20 | grant select on data.stacLinks to api; 21 | grant select on data.collectionsobject to api; 22 | 23 | -- Anonymous can view collection items 24 | grant select on api.collectionitems to anonymous; 25 | grant select on api.items to anonymous; 26 | grant select on data.items to anonymous; 27 | grant select on api.collections to anonymous; 28 | grant select on api.root to anonymous; 29 | grant select on api.stac to anonymous; 30 | grant select on api.rootcollections to anonymous; 31 | 32 | -- Application can insert items with transformed geojson 33 | grant select, insert, update on data.collections to application; 34 | grant select, insert, update on data.collectionsLinks to application; 35 | grant select, insert, update on api.collections to application; 36 | grant select, insert, update on data.itemsLinks to application; 37 | grant select, insert, update on data.items to application; 38 | grant select, insert, update on api.items to application; 39 | grant select, insert, update on data.items_string_geometry to application; 40 | GRANT usage ON sequence data.items_tiebreak_seq TO application; 41 | -------------------------------------------------------------------------------- /db/src/authorization/roles.sql: -------------------------------------------------------------------------------- 1 | -- This file contains the definition of the applications specific roles 2 | -- the roles defined here should not be made owners of database entities (tables/views/...) 3 | 4 | \echo # Loading roles 5 | 6 | -- the role used by postgrest to connect to the database 7 | -- notice how this role does not have any privileges attached specifically to it 8 | -- it can only switch to other roles 9 | drop role if exists :authenticator; 10 | create role :"authenticator" with login password :'authenticator_pass'; 11 | 12 | -- this is an application level role 13 | -- requests that are not authenticated will be executed with this role's privileges 14 | 15 | drop role if exists :"anonymous"; 16 | create role :"anonymous"; 17 | grant :"anonymous" to :"authenticator"; 18 | 19 | 20 | -- role for the main application user for the api 21 | drop role if exists application; 22 | create role application; 23 | grant application to :"authenticator"; 24 | 25 | -- create all the applications user roles that are defined using the "user_role" type 26 | -- we use a function here in order to be able add new roles just by redefining the type 27 | create or replace function _temp_create_application_roles("authenticator" text, "roles" text[]) returns void as $$ 28 | declare r record; 29 | begin 30 | for r in 31 | select unnest(roles) as role 32 | loop 33 | execute 'drop role if exists ' || quote_ident(r.role); 34 | execute 'create role ' || quote_ident(r.role); 35 | execute 'grant ' || quote_ident(r.role) || ' to ' || quote_ident(authenticator); 36 | end loop; 37 | end; 38 | $$ language plpgsql;; 39 | 40 | select _temp_create_application_roles(:'authenticator', enum_range(null::data.user_role)::text[]); 41 | drop function _temp_create_application_roles(text, text[]); 42 | -------------------------------------------------------------------------------- /db/src/data/satapi.sql: -------------------------------------------------------------------------------- 1 | CREATE EXTENSION postgis SCHEMA data; 2 | CREATE EXTENSION ltree SCHEMA data; 3 | CREATE TYPE linkobject AS( 4 | href varchar(1024), 5 | rel varchar(1024), 6 | type varchar(1024), 7 | title varchar(1024) 8 | ); 9 | CREATE TABLE apiUrls( 10 | url varchar(1024) 11 | ); 12 | CREATE TABLE collections( 13 | id varchar(1024) PRIMARY KEY, 14 | title varchar(1024), 15 | description varchar(1024) NOT NULL, 16 | keywords varchar(300)[], 17 | version varchar(300), 18 | license varchar(300) NOT NULL, 19 | providers jsonb[], 20 | extent jsonb, 21 | properties jsonb, 22 | links linkobject[] 23 | ); 24 | CREATE TABLE items( 25 | id varchar(1024) PRIMARY KEY, 26 | type varchar(20) NOT NULL, 27 | geometry geometry NOT NULL, 28 | bbox numeric[] NOT NULL, 29 | properties jsonb NOT NULL, 30 | assets jsonb NOT NULL, 31 | collection varchar(1024), 32 | datetime timestamp with time zone NOT NULL, 33 | links linkobject[], 34 | tiebreak serial, 35 | CONSTRAINT fk_collection FOREIGN KEY (collection) REFERENCES collections(id) 36 | ); 37 | CREATE VIEW collectionsLinks AS 38 | SELECT 39 | id, 40 | '0.8.0' AS stac_version, 41 | title, 42 | description, 43 | keywords, 44 | version, 45 | license, 46 | providers, 47 | extent, 48 | properties, 49 | (SELECT array_cat(ARRAY[ 50 | ROW((SELECT url || '/collections/' || id FROM data.apiUrls LIMIT 1), 51 | 'self', 52 | 'application/json', 53 | null)::data.linkobject, 54 | ROW((SELECT url || '/collections/' || id FROM data.apiUrls LIMIT 1), 55 | 'root', 56 | 'application/json' , 57 | null)::data.linkobject 58 | ], links)) as links 59 | FROM data.collections; 60 | 61 | CREATE VIEW rootLinks AS 62 | SELECT 63 | 'sat-api-pg' AS title, 64 | 'sat-api-pg' AS id, 65 | 'STAC v0.8.0 implementation by Development Seed' AS description, 66 | '0.8.0' AS stac_version, 67 | (SELECT ARRAY[ 68 | ROW((SELECT url || '/collections' FROM data.apiUrls LIMIT 1), 69 | 'data', 70 | 'application/json', 71 | null)::data.linkobject, 72 | ROW((SELECT url || '/conformance' FROM data.apiUrls LIMIT 1), 73 | 'conformance', 74 | 'application/json', 75 | null)::data.linkobject, 76 | ROW((SELECT url FROM data.apiUrls LIMIT 1), 77 | 'self', 78 | 'application/json', 79 | null)::data.linkobject 80 | ]) as links; 81 | 82 | CREATE VIEW stacLinks AS 83 | SELECT 84 | 'sat-api-pg' AS title, 85 | 'sat-api-pg' AS id, 86 | 'STAC v0.8.0 implementation by Development Seed' AS description, 87 | '0.8.0' AS stac_version, 88 | (SELECT array_cat(ARRAY( 89 | SELECT 90 | ROW((SELECT url || '/collections/' || data.collectionsLinks.id FROM data.apiUrls LIMIT 1), 91 | 'child', 92 | 'application/json', 93 | null)::data.linkobject 94 | FROM data.collectionsLinks), 95 | ARRAY[ 96 | ROW((SELECT url || '/stac/search' FROM data.apiUrls LIMIT 1), 97 | 'search', 98 | 'application/json', 99 | null)::data.linkobject, 100 | ROW((SELECT url || '/stac' FROM data.apiUrls LIMIT 1), 101 | 'self', 102 | 'application/json', 103 | null)::data.linkobject] 104 | ) 105 | ) as links; 106 | 107 | CREATE VIEW itemsLinks AS 108 | SELECT 109 | id, 110 | type, 111 | geometry, 112 | bbox, 113 | properties, 114 | assets, 115 | collection, 116 | datetime, 117 | '0.8.0' AS stac_version, 118 | tiebreak, 119 | (SELECT array_cat(ARRAY[ 120 | ROW(( 121 | SELECT url || '/collections/' || collection || '/' || id 122 | FROM data.apiUrls LIMIT 1), 123 | 'self', 124 | 'application/geo+json', 125 | null)::data.linkobject, 126 | ROW(( 127 | SELECT url || '/collections/' || collection 128 | FROM data.apiUrls LIMIT 1), 129 | 'parent', 130 | 'application/json', 131 | null)::data.linkobject 132 | ], links)) as links 133 | FROM data.items i; 134 | 135 | CREATE VIEW items_string_geometry AS 136 | SELECT 137 | id, 138 | type, 139 | data.ST_AsGeoJSON(geometry) :: json as geometry, 140 | bbox, 141 | properties, 142 | assets, 143 | collection, 144 | datetime, 145 | links, 146 | stac_version 147 | FROM data.itemsLinks; 148 | 149 | CREATE OR REPLACE FUNCTION convert_values() 150 | RETURNS trigger AS 151 | $BODY$ 152 | DECLARE 153 | converted_geometry data.geometry; 154 | converted_datetime timestamp with time zone; 155 | newlinks data.linkobject[]; 156 | filteredlinks data.linkobject[]; 157 | link data.linkobject; 158 | BEGIN 159 | -- IF TG_OP = 'INSERT' AND (NEW.geometry ISNULL) THEN 160 | -- RAISE EXCEPTION 'geometry is required'; 161 | -- RETURN NULL; 162 | -- END IF; 163 | -- EXCEPTION WHEN SQLSTATE 'XX000' THEN 164 | -- RAISE WARNING 'geometry not updated: %', SQLERRM; 165 | converted_geometry = data.st_setsrid(data.ST_GeomFromGeoJSON(NEW.geometry), 4326); 166 | converted_datetime = (new.properties)->'datetime'; 167 | 168 | newlinks := new.links; 169 | IF newlinks IS NOT NULL THEN 170 | FOREACH link IN ARRAY newlinks LOOP 171 | IF link.rel='derived_from' AND link.href IS NOT NULL THEN 172 | filteredlinks := ARRAY_APPEND(filteredlinks, link); 173 | ELSE 174 | filteredlinks := NULL; 175 | END IF; 176 | END LOOP; 177 | END IF; 178 | 179 | INSERT INTO data.items( 180 | id, 181 | type, 182 | geometry, 183 | bbox, 184 | properties, 185 | assets, 186 | collection, 187 | datetime, 188 | links) 189 | VALUES( 190 | new.id, 191 | new.type, 192 | converted_geometry, 193 | new.bbox, 194 | new.properties, 195 | new.assets, 196 | new.collection, 197 | converted_datetime, 198 | filteredlinks); 199 | RETURN NEW; 200 | END; 201 | $BODY$ 202 | LANGUAGE plpgsql; 203 | 204 | CREATE OR REPLACE FUNCTION convert_collection_links() 205 | RETURNS trigger AS 206 | $BODY$ 207 | DECLARE 208 | newlinks data.linkobject[]; 209 | filteredlinks data.linkobject[]; 210 | link data.linkobject; 211 | BEGIN 212 | 213 | newlinks := new.links; 214 | IF newlinks IS NOT NULL THEN 215 | FOREACH link IN ARRAY newlinks LOOP 216 | IF link.rel='derived_from' AND link.href IS NOT NULL THEN 217 | filteredlinks := ARRAY_APPEND(filteredlinks, link); 218 | ELSE 219 | filteredlinks := NULL; 220 | END IF; 221 | END LOOP; 222 | END IF; 223 | 224 | INSERT INTO data.collections( 225 | id, 226 | title, 227 | description, 228 | keywords, 229 | version, 230 | license, 231 | providers, 232 | extent, 233 | properties, 234 | links 235 | ) 236 | VALUES( 237 | new.id, 238 | new.title, 239 | new.description, 240 | new.keywords, 241 | new.version, 242 | new.license, 243 | new.providers, 244 | new.extent, 245 | new.properties, 246 | filteredlinks 247 | ); 248 | RETURN NEW; 249 | END; 250 | $BODY$ 251 | LANGUAGE plpgsql; 252 | 253 | CREATE OR REPLACE VIEW collectionsobject AS 254 | SELECT 255 | (SELECT ARRAY( 256 | SELECT 257 | ROW((SELECT url || '/collections/' || data.collectionsLinks.id FROM data.apiUrls LIMIT 1), 258 | 'child', 259 | 'application/json', 260 | null)::data.linkobject 261 | FROM data.collectionsLinks) 262 | ) as links, 263 | (SELECT ARRAY( 264 | SELECT row_to_json(collection) 265 | FROM (SELECT * FROM data.collectionsLinks) collection 266 | )) as collections; 267 | 268 | CREATE TRIGGER convert_collection_links INSTEAD OF INSERT 269 | ON data.collectionsLinks FOR EACH ROW 270 | EXECUTE PROCEDURE data.convert_collection_links(); 271 | 272 | CREATE TRIGGER convert_geometry_tg INSTEAD OF INSERT 273 | ON data.items_string_geometry FOR EACH ROW 274 | EXECUTE PROCEDURE data.convert_values(); 275 | 276 | -------------------------------------------------------------------------------- /db/src/data/schema.sql: -------------------------------------------------------------------------------- 1 | drop schema if exists data cascade; 2 | create schema data; 3 | set search_path = data, public; 4 | 5 | -- import the type specifying the types of users we have (this is an enum). 6 | -- you most certainly will have to redefine this type for your application 7 | \ir ../libs/auth/data/user_role_type.sql 8 | 9 | -- import the default table definition for the user model used by the auth lib 10 | -- you can choose to define the "user" table yourself if you need additional columns 11 | \ir ../libs/auth/data/user.sql 12 | 13 | -- import our application models 14 | \ir satapi.sql 15 | -------------------------------------------------------------------------------- /db/src/init.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | CUSTOM_CONFIG=$(cat <> /var/lib/postgresql/data/postgresql.conf 22 | fi -------------------------------------------------------------------------------- /db/src/init.sql: -------------------------------------------------------------------------------- 1 | -- some setting to make the output less verbose 2 | \set QUIET on 3 | \set ON_ERROR_STOP on 4 | set client_min_messages to warning; 5 | 6 | -- load some variables from the env 7 | \setenv base_dir :DIR 8 | \set base_dir `if [ $base_dir != ":"DIR ]; then echo $base_dir; else echo "/docker-entrypoint-initdb.d"; fi` 9 | \set anonymous `echo $DB_ANON_ROLE` 10 | \set authenticator `echo $DB_USER` 11 | \set authenticator_pass `echo $DB_PASS` 12 | \set jwt_secret `echo $JWT_SECRET` 13 | \set quoted_jwt_secret '\'' :jwt_secret '\'' 14 | 15 | 16 | \echo # Loading database definition 17 | begin; 18 | 19 | \echo # Loading dependencies 20 | -- functions for storing different settins in a table 21 | \ir libs/settings/schema.sql 22 | -- functions implementing authentication (parts of the lib are included in data and api schema) 23 | \ir libs/auth/schema.sql 24 | -- functions for reading different http request properties exposed by PostgREST 25 | \ir libs/request/schema.sql 26 | -- functions for sending messages to RabbitMQ entities 27 | -- \ir libs/rabbitmq/schema.sql 28 | 29 | -- save app settings 30 | select settings.set('jwt_secret', :quoted_jwt_secret); 31 | select settings.set('jwt_lifetime', '3600'); 32 | select settings.set('auth.default-role', 'webuser'); 33 | 34 | 35 | \echo # Loading application definitions 36 | -- private schema where all tables will be defined 37 | -- you can use othere names besides "data" or even spread the tables 38 | -- between different schemas. The schema name "data" is just a convention 39 | \ir data/schema.sql 40 | -- entities inside this schema (which should be only views and stored procedures) will be 41 | -- exposed as API endpoints. Access to them however is still governed by the 42 | -- privileges defined for the current PostgreSQL role making the requests 43 | \ir api/schema.sql 44 | 45 | 46 | \echo # Loading roles and privilege settings 47 | \ir authorization/roles.sql 48 | \ir authorization/privileges.sql 49 | 50 | -- \echo # Loading sample data 51 | \ir sample_data/data.sql 52 | 53 | 54 | commit; 55 | \echo # ========================================== 56 | -------------------------------------------------------------------------------- /db/src/libs/auth/api/all.sql: -------------------------------------------------------------------------------- 1 | 2 | \ir session_type.sql 3 | \ir login.sql 4 | \ir refresh_token.sql 5 | \ir signup.sql 6 | \ir me.sql -------------------------------------------------------------------------------- /db/src/libs/auth/api/login.sql: -------------------------------------------------------------------------------- 1 | 2 | create or replace function login(email text, password text) returns session as $$ 3 | declare 4 | usr record; 5 | usr_api record; 6 | result record; 7 | begin 8 | 9 | EXECUTE format( 10 | ' select row_to_json(u.*) as j' 11 | ' from %I."user" as u' 12 | ' where u.email = $1 and u.password = public.crypt($2, u.password)' 13 | , quote_ident(settings.get('auth.data-schema'))) 14 | INTO usr 15 | USING $1, $2; 16 | 17 | if usr is NULL then 18 | raise exception 'invalid email/password'; 19 | else 20 | EXECUTE format( 21 | ' select json_populate_record(null::%I."user", $1) as r' 22 | , quote_ident(settings.get('auth.api-schema'))) 23 | INTO usr_api 24 | USING usr.j; 25 | 26 | result = ( 27 | row_to_json(usr_api.r), 28 | auth.sign_jwt(auth.get_jwt_payload(usr.j)) 29 | ); 30 | return result; 31 | end if; 32 | end 33 | $$ stable security definer language plpgsql; 34 | -- by default all functions are accessible to the public, we need to remove that and define our specific access rules 35 | revoke all privileges on function login(text, text) from public; -------------------------------------------------------------------------------- /db/src/libs/auth/api/me.sql: -------------------------------------------------------------------------------- 1 | create or replace function me() returns "user" as $$ 2 | declare 3 | usr record; 4 | begin 5 | 6 | EXECUTE format( 7 | ' select row_to_json(u.*) as j' 8 | ' from %I."user" as u' 9 | ' where id = $1' 10 | , quote_ident(settings.get('auth.data-schema'))) 11 | INTO usr 12 | USING request.user_id(); 13 | 14 | EXECUTE format( 15 | 'select json_populate_record(null::%I."user", $1) as r' 16 | , quote_ident(settings.get('auth.api-schema'))) 17 | INTO usr 18 | USING usr.j; 19 | 20 | return usr.r; 21 | end 22 | $$ stable security definer language plpgsql; 23 | 24 | revoke all privileges on function me() from public; 25 | -------------------------------------------------------------------------------- /db/src/libs/auth/api/refresh_token.sql: -------------------------------------------------------------------------------- 1 | create or replace function refresh_token() returns text as $$ 2 | declare 3 | usr record; 4 | token text; 5 | begin 6 | 7 | EXECUTE format( 8 | ' select row_to_json(u.*) as j' 9 | ' from %I."user" as u' 10 | ' where u.id = $1' 11 | , quote_ident(settings.get('auth.data-schema'))) 12 | INTO usr 13 | USING request.user_id(); 14 | 15 | if usr is NULL then 16 | raise exception 'user not found'; 17 | else 18 | select auth.sign_jwt(auth.get_jwt_payload(usr.j)) 19 | into token; 20 | return token; 21 | end if; 22 | end 23 | $$ stable security definer language plpgsql; 24 | 25 | -- by default all functions are accessible to the public, we need to remove that and define our specific access rules 26 | revoke all privileges on function refresh_token() from public; 27 | -------------------------------------------------------------------------------- /db/src/libs/auth/api/session_type.sql: -------------------------------------------------------------------------------- 1 | create type session as (me json, token text); 2 | -------------------------------------------------------------------------------- /db/src/libs/auth/api/signup.sql: -------------------------------------------------------------------------------- 1 | create or replace function signup(name text, email text, password text) returns session as $$ 2 | declare 3 | usr record; 4 | result record; 5 | usr_api record; 6 | begin 7 | EXECUTE format( 8 | ' insert into %I."user" as u' 9 | ' (name, email, password) values' 10 | ' ($1, $2, $3)' 11 | ' returning row_to_json(u.*) as j' 12 | , quote_ident(settings.get('auth.data-schema'))) 13 | INTO usr 14 | USING $1, $2, $3; 15 | 16 | EXECUTE format( 17 | ' select json_populate_record(null::%I."user", $1) as r' 18 | , quote_ident(settings.get('auth.api-schema'))) 19 | INTO usr_api 20 | USING usr.j; 21 | 22 | result := ( 23 | row_to_json(usr_api.r), 24 | auth.sign_jwt(auth.get_jwt_payload(usr.j)) 25 | ); 26 | 27 | return result; 28 | end 29 | $$ security definer language plpgsql; 30 | 31 | revoke all privileges on function signup(text, text, text) from public; 32 | -------------------------------------------------------------------------------- /db/src/libs/auth/api/user_type.sql: -------------------------------------------------------------------------------- 1 | select settings.set('auth.api-schema', current_schema); 2 | create type "user" as (id int, name text, email text, role text); 3 | -------------------------------------------------------------------------------- /db/src/libs/auth/data/user.sql: -------------------------------------------------------------------------------- 1 | select settings.set('auth.data-schema', current_schema); 2 | create table "user" ( 3 | id serial primary key, 4 | name text not null, 5 | email text not null unique, 6 | "password" text not null, 7 | "role" user_role not null default settings.get('auth.default-role')::user_role, 8 | 9 | check (length(name)>2), 10 | check (email ~* '^[A-Za-z0-9._%-]+@[A-Za-z0-9.-]+[.][A-Za-z]+$') 11 | ); 12 | 13 | create trigger user_encrypt_pass_trigger 14 | before insert or update on "user" 15 | for each row 16 | execute procedure auth.encrypt_pass(); 17 | -------------------------------------------------------------------------------- /db/src/libs/auth/data/user_role_type.sql: -------------------------------------------------------------------------------- 1 | create type user_role as enum ('webuser'); 2 | -------------------------------------------------------------------------------- /db/src/libs/auth/schema.sql: -------------------------------------------------------------------------------- 1 | \echo # Loading auth schema 2 | 3 | -- functions for JWT token generation in the database context 4 | \ir ../pgjwt/schema.sql 5 | 6 | 7 | drop schema if exists auth cascade; 8 | create schema auth; 9 | set search_path = auth, public; 10 | 11 | create extension if not exists pgcrypto; 12 | 13 | 14 | 15 | create or replace function encrypt_pass() returns trigger as $$ 16 | begin 17 | if new.password is not null then 18 | new.password = public.crypt(new.password, public.gen_salt('bf')); 19 | end if; 20 | return new; 21 | end 22 | $$ language plpgsql; 23 | 24 | 25 | create or replace function sign_jwt(json) returns text as $$ 26 | select pgjwt.sign($1, settings.get('jwt_secret')) 27 | $$ stable language sql; 28 | 29 | create or replace function get_jwt_payload(json) returns json as $$ 30 | select json_build_object( 31 | 'role', $1->'role', 32 | 'user_id', $1->'id', 33 | 'exp', extract(epoch from now())::integer + settings.get('jwt_lifetime')::int -- token expires in 1 hour 34 | ) 35 | $$ stable language sql; 36 | 37 | 38 | create or replace function set_auth_endpoints_privileges("schema" text, "anonymous" text, "roles" text[]) returns void as $$ 39 | declare r record; 40 | begin 41 | execute 'grant execute on function ' || quote_ident(schema) || '.login(text,text) to ' || quote_ident(anonymous); 42 | execute 'grant execute on function ' || quote_ident(schema) || '.signup(text,text,text) to ' || quote_ident(anonymous); 43 | for r in 44 | select unnest(roles) as role 45 | loop 46 | execute 'grant execute on function ' || quote_ident(schema) || '.me() to ' || quote_ident(r.role); 47 | execute 'grant execute on function ' || quote_ident(schema) || '.login(text,text) to ' || quote_ident(r.role); 48 | execute 'grant execute on function ' || quote_ident(schema) || '.refresh_token() to ' || quote_ident(r.role); 49 | end loop; 50 | end; 51 | $$ language plpgsql;; 52 | 53 | -------------------------------------------------------------------------------- /db/src/libs/pgjwt/.gitignore: -------------------------------------------------------------------------------- 1 | regression.* -------------------------------------------------------------------------------- /db/src/libs/pgjwt/LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2016 Michel Pelletier 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /db/src/libs/pgjwt/Makefile: -------------------------------------------------------------------------------- 1 | EXTENSION = pgjwt 2 | DATA = pgjwt--0.0.1.sql 3 | 4 | # postgres build stuff 5 | PG_CONFIG = pg_config 6 | PGXS := $(shell $(PG_CONFIG) --pgxs) 7 | include $(PGXS) 8 | -------------------------------------------------------------------------------- /db/src/libs/pgjwt/README.md: -------------------------------------------------------------------------------- 1 | # pgjwt 2 | PostgreSQL implementation of [JSON Web Tokens](https://jwt.io/) 3 | 4 | ## Dependencies 5 | 6 | This code requires the pgcrypto extension, included in most 7 | distribution's "postgresql-contrib" package. The tests require the 8 | pgtap extension to run. 9 | 10 | ## Install 11 | 12 | Clone the repository and then run: 13 | 14 | 'make install' 15 | 16 | You will require sudo privledges in most cases. This creates a new 17 | extension that can be installed with 'CREATE EXTENSION pgjwt;' To run 18 | the tests install pgtap and run 'pg_prove test.sql'. 19 | 20 | 21 | ## Usage 22 | 23 | Create a token. The first argument must be valid json, the second argument any text: 24 | 25 | => select sign('{"sub":"1234567890","name":"John Doe","admin":true}', 'secret'); 26 | sign 27 | ------------------------------------------------------------------------------------------------------------------------------------------------------- 28 | eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiYWRtaW4iOnRydWV9.TJVA95OrM7E2cBab30RMHrHDcEfxjoYZgeFONFh7HgQ 29 | 30 | Verify a token: 31 | 32 | => select * from verify('eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiYWRtaW4iOnRydWV9.TJVA95OrM7E2cBab30RMHrHDcEfxjoYZgeFONFh7HgQ', 'secret'); 33 | header | payload | valid 34 | -----------------------------+-----------------------------------------------------+------- 35 | {"alg":"HS256","typ":"JWT"} | {"sub":"1234567890","name":"John Doe","admin":true} | t 36 | 37 | Algorithm 38 | --------- 39 | 40 | sign() and verify() take an optional algorithm argument that can be 41 | 'HS256', 'HS384' or 'HS512'. The default is 'HS256': 42 | 43 | => select sign('{"sub":"1234567890","name":"John Doe","admin":true}', 'secret', 'HS384'), 44 | 45 | 46 | ## TODO 47 | 48 | * public/private keys when pgcrypto gets *_verify() functions 49 | 50 | * SET ROLE and key lookup helper functions 51 | -------------------------------------------------------------------------------- /db/src/libs/pgjwt/pgjwt--0.0.1.sql: -------------------------------------------------------------------------------- 1 | \echo Use "CREATE EXTENSION pgjwt" to load this file. \quit 2 | 3 | 4 | CREATE OR REPLACE FUNCTION url_encode(data bytea) RETURNS text LANGUAGE sql AS $$ 5 | SELECT translate(encode(data, 'base64'), E'+/=\n', '-_'); 6 | $$; 7 | 8 | 9 | CREATE OR REPLACE FUNCTION url_decode(data text) RETURNS bytea LANGUAGE sql AS $$ 10 | WITH t AS (SELECT translate(data, '-_', '+/')), 11 | rem AS (SELECT length((SELECT * FROM t)) % 4) -- compute padding size 12 | SELECT decode( 13 | (SELECT * FROM t) || 14 | CASE WHEN (SELECT * FROM rem) > 0 15 | THEN repeat('=', (4 - (SELECT * FROM rem))) 16 | ELSE '' END, 17 | 'base64'); 18 | $$; 19 | 20 | 21 | CREATE OR REPLACE FUNCTION algorithm_sign(signables text, secret text, algorithm text) 22 | RETURNS text LANGUAGE sql AS $$ 23 | WITH 24 | alg AS ( 25 | SELECT CASE 26 | WHEN algorithm = 'HS256' THEN 'sha256' 27 | WHEN algorithm = 'HS384' THEN 'sha384' 28 | WHEN algorithm = 'HS512' THEN 'sha512' 29 | ELSE '' END) -- hmac throws error 30 | SELECT @extschema@.url_encode(public.hmac(signables, secret, (select * FROM alg))); 31 | $$; 32 | 33 | 34 | CREATE OR REPLACE FUNCTION sign(payload json, secret text, algorithm text DEFAULT 'HS256') 35 | RETURNS text LANGUAGE sql AS $$ 36 | WITH 37 | header AS ( 38 | SELECT @extschema@.url_encode(convert_to('{"alg":"' || algorithm || '","typ":"JWT"}', 'utf8')) 39 | ), 40 | payload AS ( 41 | SELECT @extschema@.url_encode(convert_to(payload::text, 'utf8')) 42 | ), 43 | signables AS ( 44 | SELECT (SELECT * FROM header) || '.' || (SELECT * FROM payload) 45 | ) 46 | SELECT 47 | (SELECT * FROM signables) 48 | || '.' || 49 | @extschema@.algorithm_sign((SELECT * FROM signables), secret, algorithm); 50 | $$; 51 | 52 | 53 | CREATE OR REPLACE FUNCTION verify(token text, secret text, algorithm text DEFAULT 'HS256') 54 | RETURNS table(header json, payload json, valid boolean) LANGUAGE sql AS $$ 55 | SELECT 56 | convert_from(@extschema@.url_decode(r[1]), 'utf8')::json AS header, 57 | convert_from(@extschema@.url_decode(r[2]), 'utf8')::json AS payload, 58 | r[3] = @extschema@.algorithm_sign(r[1] || '.' || r[2], secret, algorithm) AS valid 59 | FROM regexp_split_to_array(token, '\.') r; 60 | $$; 61 | -------------------------------------------------------------------------------- /db/src/libs/pgjwt/pgjwt.control: -------------------------------------------------------------------------------- 1 | # pgjwt extension 2 | comment = 'JSON Web Token API for Postgresql' 3 | default_version = '0.0.1' 4 | relocatable = false 5 | requires = pgcrypto 6 | 7 | -------------------------------------------------------------------------------- /db/src/libs/pgjwt/schema.sql: -------------------------------------------------------------------------------- 1 | -- This file is the only one in this directory that is not part of the pgjwt extension distribution 2 | -- It deals with loading https://github.com/michelp/pgjwt extension 3 | -- All the seemingly complicated code related to pgjwt in this file is only because of "on they fly" altering 4 | -- of the distribution code which is meant to be installed as an extension (not possible with AWS RDS) 5 | -- one could just manually edit that file and simply include it. 6 | -- At th same time this goes to show the powerful features you have at your disposal of the psql meta commands 7 | 8 | -- decide what is our base dir and the location of the file 9 | \setenv JWT_FILE :base_dir/libs/pgjwt/pgjwt--0.0.1.sql 10 | 11 | -- load sql definition in a variable and use sed to make the needed changes 12 | \set pgjwt_schema pgjwt 13 | \setenv pgjwt_schema :pgjwt_schema 14 | \set pgjwt_sql `sed -e 's/\\echo Use "CREATE EXTENSION pgjwt" to load this file. \\quit//g' ${JWT_FILE} | sed -e "s/@extschema@/${pgjwt_schema}/g"` 15 | 16 | -- create the jwt schema namespace and all the functions in it 17 | create extension if not exists pgcrypto; 18 | drop schema if exists :pgjwt_schema cascade; 19 | create schema :pgjwt_schema; 20 | set search_path to pgjwt, public; 21 | :pgjwt_sql 22 | set search_path to public; 23 | -------------------------------------------------------------------------------- /db/src/libs/pgjwt/test.sql: -------------------------------------------------------------------------------- 1 | BEGIN; 2 | SELECT plan(14); 3 | 4 | SELECT 5 | is(sign('{"sub":"1234567890","name":"John Doe","admin":true}', 'secret'), 6 | 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiYWRtaW4iOnRydWV9.TJVA95OrM7E2cBab30RMHrHDcEfxjoYZgeFONFh7HgQ'); 7 | 8 | 9 | SELECT 10 | is(sign('{"sub":"1234567890","name":"John Doe","admin":true}', 'secret', 'HS256'), 11 | 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiYWRtaW4iOnRydWV9.TJVA95OrM7E2cBab30RMHrHDcEfxjoYZgeFONFh7HgQ'); 12 | 13 | 14 | SELECT 15 | throws_ok($$ 16 | SELECT sign('{"sub":"1234567890","name":"John Doe","admin":true}', 'secret', 'bogus') 17 | $$, 18 | '22023', 19 | 'Cannot use "": No such hash algorithm', 20 | 'sign() should raise on bogus algorithm' 21 | ); 22 | 23 | 24 | SELECT 25 | throws_ok( 26 | $$SELECT header::text, payload::text, valid FROM verify( 27 | 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiYWRtaW4iOnRydWV9.TJVA95OrM7E2cBab30RMHrHDcEfxjoYZgeFONFh7HgQ', 28 | 'secret', 'bogus')$$, 29 | '22023', 30 | 'Cannot use "": No such hash algorithm', 31 | 'verify() should raise on bogus algorithm' 32 | ); 33 | 34 | 35 | SELECT throws_ok( -- bogus header 36 | $$SELECT header::text, payload::text, valid FROM verify( 37 | 'eyJhbGciOiJIUzI1NiIBOGUScCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiYWRtaW4iOnRydWV9.TJVA95OrM7E2cBab30RMHrHDcEfxjoYZgeFONFh7HgQ', 38 | 'secret', 'HS256')$$ 39 | ); 40 | 41 | 42 | SELECT 43 | throws_ok( -- bogus payload 44 | $$SELECT header::text, payload::text, valid FROM verify( 45 | 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaBOGUS9lIiwiYWRtaW4iOnRydWV9.TJVA95OrM7E2cBab30RMHrHDcEfxjoYZgeFONFh7HgQ', 46 | 'secret', 'HS256')$$ 47 | ); 48 | 49 | 50 | SELECT 51 | results_eq( 52 | $$SELECT header::text, payload::text, valid FROM verify( 53 | 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiYWRtaW4iOnRydWV9.TJVA95OrM7E2cBab30RMHrHDcEfxjoYZgeFONFh7HgQ', 54 | 'secret')$$, 55 | $$VALUES ('{"alg":"HS256","typ":"JWT"}', '{"sub":"1234567890","name":"John Doe","admin":true}', true)$$, 56 | 'verify() should return return data marked valid' 57 | ); 58 | 59 | 60 | SELECT results_eq( 61 | $$SELECT header::text, payload::text, valid FROM verify( 62 | 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiYWRtaW4iOnRydWV9.TJVA95OrM7E2cBab30RMHrHDcEfxjoYZgeFONFh7HgQ', 63 | 'badsecret')$$, 64 | $$VALUES ('{"alg":"HS256","typ":"JWT"}', '{"sub":"1234567890","name":"John Doe","admin":true}', false)$$, 65 | 'verify() should return return data marked invalid' 66 | ); 67 | 68 | 69 | SELECT 70 | is(sign('{"sub":"1234567890","name":"John Doe","admin":true}', 'secret', 'HS384'), 71 | E'eyJhbGciOiJIUzM4NCIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiYWRtaW4iOnRydWV9.DtVnCyiYCsCbg8gUP-579IC2GJ7P3CtFw6nfTTPw-0lZUzqgWAo9QIQElyxOpoRm'); 72 | 73 | 74 | SELECT 75 | results_eq( 76 | $$SELECT header::text, payload::text, valid FROM verify( 77 | E'eyJhbGciOiJIUzM4NCIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiYWRtaW4iOnRydWV9.DtVnCyiYCsCbg8gUP-579IC2GJ7P3CtFw6nfTTPw-0lZUzqgWAo9QIQElyxOpoRm', 78 | 'secret', 'HS384')$$, 79 | $$VALUES ('{"alg":"HS384","typ":"JWT"}', '{"sub":"1234567890","name":"John Doe","admin":true}', true)$$, 80 | 'verify() should return return data marked valid' 81 | ); 82 | 83 | 84 | SELECT 85 | results_eq( 86 | $$SELECT header::text, payload::text, valid FROM verify( 87 | E'eyJhbGciOiJIUzM4NCIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiYWRtaW4iOnRydWV9.DtVnCyiYCsCbg8gUP-579IC2GJ7P3CtFw6nfTTPw-0lZUzqgWAo9QIQElyxOpoRm', 88 | 'badsecret', 'HS384')$$, 89 | $$VALUES ('{"alg":"HS384","typ":"JWT"}', '{"sub":"1234567890","name":"John Doe","admin":true}', false)$$, 90 | 'verify() should return return data marked invalid' 91 | ); 92 | 93 | 94 | SELECT 95 | is(sign('{"sub":"1234567890","name":"John Doe","admin":true}', 'secret', 'HS512'), 96 | E'eyJhbGciOiJIUzUxMiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiYWRtaW4iOnRydWV9.YI0rUGDq5XdRw8vW2sDLRNFMN8Waol03iSFH8I4iLzuYK7FKHaQYWzPt0BJFGrAmKJ6SjY0mJIMZqNQJFVpkuw'); 97 | 98 | 99 | SELECT 100 | results_eq( 101 | $$SELECT header::text, payload::text, valid FROM verify( 102 | E'eyJhbGciOiJIUzUxMiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiYWRtaW4iOnRydWV9.YI0rUGDq5XdRw8vW2sDLRNFMN8Waol03iSFH8I4iLzuYK7FKHaQYWzPt0BJFGrAmKJ6SjY0mJIMZqNQJFVpkuw', 103 | 'secret', 'HS512')$$, 104 | $$VALUES ('{"alg":"HS512","typ":"JWT"}', '{"sub":"1234567890","name":"John Doe","admin":true}', true)$$, 105 | 'verify() should return return data marked valid' 106 | ); 107 | 108 | 109 | SELECT 110 | results_eq( 111 | $$SELECT header::text, payload::text, valid FROM verify( 112 | E'eyJhbGciOiJIUzUxMiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiYWRtaW4iOnRydWV9.YI0rUGDq5XdRw8vW2sDLRNFMN8Waol03iSFH8I4iLzuYK7FKHaQYWzPt0BJFGrAmKJ6SjY0mJIMZqNQJFVpkuw', 113 | 'badsecret', 'HS512')$$, 114 | $$VALUES ('{"alg":"HS512","typ":"JWT"}', '{"sub":"1234567890","name":"John Doe","admin":true}', false)$$, 115 | 'verify() should return return data marked invalid' 116 | ); 117 | 118 | 119 | SELECT * FROM finish(); 120 | ROLLBACK; 121 | -------------------------------------------------------------------------------- /db/src/libs/request/schema.sql: -------------------------------------------------------------------------------- 1 | drop schema if exists request cascade; 2 | create schema request; 3 | grant usage on schema request to public; 4 | 5 | create or replace function request.env_var(v text) returns text as $$ 6 | select current_setting(v, true); 7 | $$ stable language sql; 8 | 9 | create or replace function request.jwt_claim(c text) returns text as $$ 10 | select request.env_var('request.jwt.claim.' || c); 11 | $$ stable language sql; 12 | 13 | create or replace function request.cookie(c text) returns text as $$ 14 | select request.env_var('request.cookie.' || c); 15 | $$ stable language sql; 16 | 17 | create or replace function request.header(h text) returns text as $$ 18 | select request.env_var('request.header.' || h); 19 | $$ stable language sql; 20 | 21 | create or replace function request.user_id() returns int as $$ 22 | select 23 | case coalesce(request.jwt_claim('user_id'),'') 24 | when '' then 0 25 | else request.jwt_claim('user_id')::int 26 | end 27 | $$ stable language sql; 28 | 29 | create or replace function request.user_role() returns text as $$ 30 | select request.jwt_claim('role')::text; 31 | $$ stable language sql; 32 | -------------------------------------------------------------------------------- /db/src/libs/settings/schema.sql: -------------------------------------------------------------------------------- 1 | \echo # Loading settings schema 2 | drop schema if exists settings cascade; 3 | create schema settings; 4 | 5 | create table settings.secrets ( 6 | key text primary key, 7 | value text not null 8 | ); 9 | 10 | 11 | create or replace function settings.get(text) returns text as $$ 12 | select value from settings.secrets where key = $1 13 | $$ security definer stable language sql; 14 | 15 | create or replace function settings.set(text, text) returns void as $$ 16 | insert into settings.secrets (key, value) 17 | values ($1, $2) 18 | on conflict (key) do update 19 | set value = $2; 20 | $$ security definer language sql; 21 | -------------------------------------------------------------------------------- /db/src/sample_data/reset.sql: -------------------------------------------------------------------------------- 1 | BEGIN; 2 | \set QUIET on 3 | \set ON_ERROR_STOP on 4 | set client_min_messages to warning; 5 | truncate data.collections restart identity cascade; 6 | truncate data.items restart identity cascade; 7 | \ir data.sql 8 | COMMIT; 9 | -------------------------------------------------------------------------------- /deployment/.sample_env: -------------------------------------------------------------------------------- 1 | # CF esttings 2 | PROJECT=smallsats 3 | REGION=us-east-1 4 | STACK_NAME=smallsats-sat-api 5 | 6 | #Subzero settings 7 | APP_DIR=.. 8 | OPENRESTY_DIR=../openresty 9 | MIGRATIONS_DIR=../db/migrations 10 | 11 | # Docker specific configs 12 | # use only letters and numbers for the project name 13 | COMPOSE_PROJECT_NAME=sat-api-pg 14 | 15 | # Global configs 16 | # DEVELOPMENT=1 17 | JWT_SECRET=reallyreallyreallyreallysupersafe 18 | 19 | # DB connection details (used by all containers) 20 | # set PG_VERSION to match your production db major version 21 | PG_VERSION=11.2 22 | # DB_HOST=db 23 | DB_PORT=5432 24 | DB_NAME=smallsatsdb 25 | DB_SCHEMA=api 26 | DB_USER=authenticator 27 | DB_PASS=authenticatorpass 28 | 29 | # OpenResty 30 | POSTGREST_HOST=postgrest 31 | POSTGREST_PORT=3000 32 | 33 | # PostgREST 34 | DB_ANON_ROLE=anonymous 35 | DB_POOL=10 36 | #MAX_ROWS= 37 | #PRE_REQUEST= 38 | SERVER_PROXY_URI=http://localhost:8080/rest/ 39 | 40 | # PostgreSQL container config 41 | # Use this to connect directly to the db running in the container 42 | SUPER_USER= 43 | SUPER_USER_PASSWORD= 44 | 45 | OPENRESTY_REPO_URI=552819999234.dkr.ecr.us-east-1.amazonaws.com/sat-api-pg/openresty 46 | OPEN_RESTY_IMAGE=552819999234.dkr.ecr.us-east-1.amazonaws.com/sat-api-pg/openresty 47 | # psql postgres://superuser:superuserpass@localhost:5432/app 48 | # @localhost:5432/app 49 | -------------------------------------------------------------------------------- /deployment/README.md: -------------------------------------------------------------------------------- 1 | ## Deploying the sat-api-pg to an AWS stack. 2 | 3 | ### Prerequisites 4 | 5 | - [aws-cli](https://aws.amazon.com/cli/) 6 | - [psql](https://www.postgresql.org/docs/9.5/libpq.html) 7 | 8 | ### Initial Deployment 9 | 10 | 1. Create an [ECR repository](https://docs.aws.amazon.com/AmazonECR/latest/userguide/repository-create.html) to house your OpenResty image. Example name: `sat-api-pg-dev/openresty` 11 | 2. Copy the `./deployment/.sample_env` to `./deployment/.env`. Update accordingly with the values relevant for your project. 12 | 3. Create the stack of required AWS resources: 13 | ```bash 14 | $ cd deployment 15 | $ ./createStack.sh 16 | ``` 17 | 4. Update the newly created RDS instance's security policy to allow inbound traffic from the IP address of the machine where you are executing the deployment. This will allow the deployment package to run `psql` commands from your IP address. 18 | 5. Build the deployment configuration file from you environment settings. From the `/deployment` directory run: 19 | ```bash 20 | $ ./createSubZeroConfig.sh 21 | ``` 22 | 6. Deploy the database migrations and the push the latest OpenResty image to ECR run. This will create the `sat-api-pg` schemas, users, tables, views and functions in your stack's RDS database. 23 | ```bash 24 | $ ./deploy.sh 25 | ``` 26 | 7. Now that our database is ready and the updated image is in ECR, bring up an instance of your service: 27 | ```bash 28 | $ ./createStack.sh 1 # The 1 indicates a single instance of your service 29 | ``` 30 | 31 | ### Migrations 32 | 33 | To create a new Sqitch migration run 34 | 35 | ```bash 36 | $ yarn subzero migrations add --no-diff --note "yournote" yourmigrationname 37 | ``` 38 | 39 | This will create the appropriate files in the `migrations` directory which you can then modify with your desired changes. 40 | -------------------------------------------------------------------------------- /deployment/cloudformation.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | AWSTemplateFormatVersion: '2010-09-09' 3 | Description: Stack for sat-api-pg. 4 | Parameters: 5 | DBName: 6 | Default: MyDatabase 7 | Description: The database name 8 | Type: String 9 | MinLength: '1' 10 | MaxLength: '64' 11 | AllowedPattern: '[a-zA-Z][a-zA-Z0-9]*' 12 | ConstraintDescription: must begin with a letter and contain only 13 | alphanumeric characters. 14 | DBUser: 15 | NoEcho: 'true' 16 | Description: The database authenticator user 17 | Type: String 18 | MinLength: '1' 19 | MaxLength: '16' 20 | AllowedPattern: '[a-zA-Z][a-zA-Z0-9]*' 21 | ConstraintDescription: must begin with a letter and contain only 22 | alphanumeric characters. 23 | DBPassword: 24 | NoEcho: 'true' 25 | Description: The database authenticator password 26 | Type: String 27 | MinLength: '8' 28 | MaxLength: '41' 29 | AllowedPattern: '[a-zA-Z0-9]*' 30 | ConstraintDescription: must contain only alphanumeric characters. 31 | DBSuperUser: 32 | NoEcho: 'true' 33 | Description: The database admin account username 34 | Type: String 35 | MinLength: '1' 36 | MaxLength: '16' 37 | AllowedPattern: '[a-zA-Z][a-zA-Z0-9]*' 38 | ConstraintDescription: must begin with a letter and contain only 39 | alphanumeric characters. 40 | DBSuperPassword: 41 | NoEcho: 'true' 42 | Description: The database admin account password 43 | Type: String 44 | MinLength: '8' 45 | MaxLength: '41' 46 | AllowedPattern: '[a-zA-Z0-9]*' 47 | ConstraintDescription: must contain only alphanumeric characters. 48 | DBAnonRole: 49 | Type: String 50 | Default: 'anonymous' 51 | DBSchema: 52 | Type: String 53 | Default: "api" 54 | DBPool: 55 | Type: String 56 | Default: "10" 57 | JwtSecret: 58 | NoEcho: 'true' 59 | Description: The JWT secret for PostgREST auth. 60 | Type: String 61 | MaxRows: 62 | Type: String 63 | Default: "" 64 | Description: "MAX_ROWS env var" 65 | PreRequest: 66 | Type: String 67 | Default: "" 68 | Description: "PRE_REQUEST env var" 69 | ListenerHostNamePattern: 70 | Type: String 71 | Description: Listen for requests on the load balancer for this domain 72 | Default: "*" 73 | OpenRestyImage: 74 | Type: String 75 | # Default: 'openresty:latest' 76 | Description: This image exists in your account's ECR repository. 77 | Version: 78 | Type: String 79 | Default: "latest" 80 | # PostgrestImage: 81 | # Type: String 82 | # Default: 'postgrest/postgrest:v5.2.0' 83 | # Description: This image is not prepended with the ECR host because 84 | # it is a standard PostgREST image. 85 | PostgrestContainerCpu: 86 | Type: Number 87 | Default: 256 88 | Description: How much CPU to give the container. 1024 is 1 CPU 89 | PostgrestContainerMemory: 90 | Type: Number 91 | Default: 512 92 | Description: How much memory in megabytes to give the container 93 | Priority: 94 | Type: Number 95 | Default: 1 96 | Description: The priority for the routing rule added to the load balancer. 97 | This only applies if your have multiple services which are 98 | assigned to different paths on the load balancer. 99 | DesiredCapacity: 100 | Type: Number 101 | Default: '1' 102 | Description: Number of EC2 instances to launch in your ECS cluster. 103 | DesiredCount: 104 | Type: Number 105 | Default: '0' 106 | Description: The number of task instances to run on cluster. 107 | MaxSize: 108 | Type: Number 109 | Default: '4' 110 | Description: Maximum number of EC2 instances that can be launched. 111 | Path: 112 | Type: String 113 | Default: "*" 114 | Description: A path on the public load balancer that this service 115 | should be connected to. Use * to send all load balancer 116 | traffic to this service. 117 | ECSAMI: 118 | Description: AMI ID 119 | Type: AWS::EC2::Image::Id 120 | Default: ami-0bc08634af113cccb 121 | InstanceType: 122 | Description: EC2 instance type 123 | Type: String 124 | Default: t3.medium 125 | ConstraintDescription: Please choose a valid instance type. 126 | Outputs: 127 | PGConnection: 128 | Value: !Join ['', ['postgres://', !Ref 'DBSuperUser', ':', 129 | !Ref 'DBSuperPassword', '@', 130 | !GetAtt [DB, Endpoint.Address], 131 | ':', !GetAtt [DB, Endpoint.Port], /, 132 | !Ref 'DBName']] 133 | Export: 134 | Name: !Sub '${AWS::StackName}-PGConnection' 135 | RDSHost: 136 | Value: !GetAtt [DB, Endpoint.Address] 137 | Export: 138 | Name: !Sub '${AWS::StackName}-RDSHost' 139 | RESTEndpoint: 140 | Value: !GetAtt ExternalLoadBalancer.DNSName 141 | Export: 142 | Name: !Sub '${AWS::StackName}-RESTEndpoint' 143 | Mappings: 144 | # Hard values for the subnet masks. These masks define 145 | # the range of internal IP addresses that can be assigned. 146 | # The VPC can have all IP's from 10.0.0.0 to 10.0.255.255 147 | # There are two subnets which cover the ranges: 148 | # 149 | # 10.0.0.0 - 10.0.0.255 150 | # 10.0.1.0 - 10.0.1.255 151 | # 152 | # If you need more IP addresses (perhaps you have so many 153 | # instances that you run out) then you can customize these 154 | # ranges to add more 155 | SubnetConfig: 156 | VPC: 157 | CIDR: '10.0.0.0/16' 158 | PublicOne: 159 | CIDR: '10.0.0.0/24' 160 | PublicTwo: 161 | CIDR: '10.0.1.0/24' 162 | Resources: 163 | # VPC in which containers will be networked. 164 | # It has two public subnets 165 | # We distribute the subnets across the first two available subnets 166 | # for the region, for high availability. 167 | VPC: 168 | Type: AWS::EC2::VPC 169 | Properties: 170 | EnableDnsSupport: true 171 | EnableDnsHostnames: true 172 | CidrBlock: !FindInMap ['SubnetConfig', 'VPC', 'CIDR'] 173 | # Two public subnets, where containers can have public IP addresses 174 | PublicSubnetOne: 175 | Type: AWS::EC2::Subnet 176 | Properties: 177 | AvailabilityZone: 178 | Fn::Select: 179 | - 0 180 | - Fn::GetAZs: {Ref: 'AWS::Region'} 181 | VpcId: !Ref 'VPC' 182 | CidrBlock: !FindInMap ['SubnetConfig', 'PublicOne', 'CIDR'] 183 | MapPublicIpOnLaunch: true 184 | PublicSubnetTwo: 185 | Type: AWS::EC2::Subnet 186 | Properties: 187 | AvailabilityZone: 188 | Fn::Select: 189 | - 1 190 | - Fn::GetAZs: {Ref: 'AWS::Region'} 191 | VpcId: !Ref 'VPC' 192 | CidrBlock: !FindInMap ['SubnetConfig', 'PublicTwo', 'CIDR'] 193 | MapPublicIpOnLaunch: true 194 | DBSubnetGroup: 195 | Type: AWS::RDS::DBSubnetGroup 196 | Properties: 197 | DBSubnetGroupDescription: 'rds-subnet' 198 | SubnetIds: 199 | - !Ref 'PublicSubnetOne' 200 | - !Ref 'PublicSubnetTwo' 201 | # Setup networking resources for the public subnets. Containers 202 | # in the public subnets have public IP addresses and the routing table 203 | # sends network traffic via the internet gateway. 204 | InternetGateway: 205 | Type: AWS::EC2::InternetGateway 206 | GatewayAttachement: 207 | Type: AWS::EC2::VPCGatewayAttachment 208 | Properties: 209 | VpcId: !Ref 'VPC' 210 | InternetGatewayId: !Ref 'InternetGateway' 211 | PublicRouteTable: 212 | Type: AWS::EC2::RouteTable 213 | Properties: 214 | VpcId: !Ref 'VPC' 215 | PublicRoute: 216 | Type: AWS::EC2::Route 217 | DependsOn: GatewayAttachement 218 | Properties: 219 | RouteTableId: !Ref 'PublicRouteTable' 220 | DestinationCidrBlock: '0.0.0.0/0' 221 | GatewayId: !Ref 'InternetGateway' 222 | PublicSubnetOneRouteTableAssociation: 223 | Type: AWS::EC2::SubnetRouteTableAssociation 224 | Properties: 225 | SubnetId: !Ref PublicSubnetOne 226 | RouteTableId: !Ref PublicRouteTable 227 | PublicSubnetTwoRouteTableAssociation: 228 | Type: AWS::EC2::SubnetRouteTableAssociation 229 | Properties: 230 | SubnetId: !Ref PublicSubnetTwo 231 | RouteTableId: !Ref PublicRouteTable 232 | # Allows access from ECS to RDS 233 | # RDS Resources 234 | DB: 235 | Type: AWS::RDS::DBInstance 236 | DependsOn: GatewayAttachement 237 | Properties: 238 | DBName: !Sub '${DBName}' 239 | DBSubnetGroupName: !Ref 'DBSubnetGroup' 240 | MasterUsername: !Ref 'DBSuperUser' 241 | MasterUserPassword: !Ref 'DBSuperPassword' 242 | AllocatedStorage: '5' 243 | StorageType: 'gp2' 244 | DBInstanceClass: db.t3.small 245 | Engine: Postgres 246 | EngineVersion: '11' 247 | PubliclyAccessible: true 248 | VPCSecurityGroups: 249 | - !Ref ECSSecurityGroup 250 | 251 | # ECS Resources 252 | ECSCluster: 253 | Type: AWS::ECS::Cluster 254 | ECSSecurityGroup: 255 | Type: AWS::EC2::SecurityGroup 256 | Properties: 257 | GroupDescription: Access to the containers 258 | VpcId: !Ref 'VPC' 259 | EcsSecurityGroupIngressFromPublicALB: 260 | Type: AWS::EC2::SecurityGroupIngress 261 | Properties: 262 | Description: Ingress from the public ALB 263 | GroupId: !Ref ECSSecurityGroup 264 | IpProtocol: -1 265 | SourceSecurityGroupId: !Ref 'ExternalLoadBalancerSG' 266 | EcsSecurityGroupIngressFromSelf: 267 | Type: AWS::EC2::SecurityGroupIngress 268 | Properties: 269 | GroupId: !Ref ECSSecurityGroup 270 | IpProtocol: "-1" 271 | SourceSecurityGroupId: !Ref ECSSecurityGroup 272 | ExternalLoadBalancerSG: 273 | Type: AWS::EC2::SecurityGroup 274 | Properties: 275 | GroupDescription: Access to the public facing load balancer 276 | VpcId: !Ref 'VPC' 277 | SecurityGroupIngress: 278 | # Allow access to ALB from anywhere on the internet 279 | - CidrIp: 0.0.0.0/0 280 | IpProtocol: -1 281 | ExternalLoadBalancer: 282 | Type: AWS::ElasticLoadBalancingV2::LoadBalancer 283 | Properties: 284 | Scheme: "internet-facing" 285 | Subnets: 286 | - !Ref PublicSubnetOne 287 | - !Ref PublicSubnetTwo 288 | LoadBalancerAttributes: 289 | - Key: idle_timeout.timeout_seconds 290 | Value: "50" 291 | SecurityGroups: 292 | - !Ref ExternalLoadBalancerSG 293 | PlaceholderTargetGroup: 294 | Type: AWS::ElasticLoadBalancingV2::TargetGroup 295 | Properties: 296 | HealthCheckIntervalSeconds: 30 297 | HealthCheckPath: "/rest/" 298 | HealthCheckProtocol: HTTP 299 | HealthCheckTimeoutSeconds: 10 300 | HealthyThresholdCount: 4 301 | Matcher: 302 | HttpCode: '200' 303 | Port: 80 304 | Protocol: HTTP 305 | TargetGroupAttributes: 306 | - Key: deregistration_delay.timeout_seconds 307 | Value: '30' 308 | UnhealthyThresholdCount: 3 309 | VpcId: !Ref VPC 310 | 311 | TargetGroup: 312 | Type: AWS::ElasticLoadBalancingV2::TargetGroup 313 | Properties: 314 | HealthCheckIntervalSeconds: 30 315 | HealthCheckPath: "/" 316 | HealthCheckProtocol: HTTP 317 | HealthCheckTimeoutSeconds: 10 318 | HealthyThresholdCount: 4 319 | Matcher: 320 | HttpCode: '200' 321 | Port: 80 322 | Protocol: HTTP 323 | TargetGroupAttributes: 324 | - Key: deregistration_delay.timeout_seconds 325 | Value: '30' 326 | UnhealthyThresholdCount: 3 327 | VpcId: !Ref VPC 328 | TargetType: ip 329 | 330 | ExternalListenerHttp: 331 | Type: AWS::ElasticLoadBalancingV2::Listener 332 | Properties: 333 | Protocol: HTTP 334 | Port: 80 335 | LoadBalancerArn: !Ref ExternalLoadBalancer 336 | DefaultActions: 337 | - TargetGroupArn: !Ref TargetGroup 338 | Type: forward 339 | 340 | # Create a rule on the load balancer for routing traffic to the target group 341 | ListenerRule: 342 | Type: AWS::ElasticLoadBalancingV2::ListenerRule 343 | Properties: 344 | Actions: 345 | - TargetGroupArn: !Ref 'TargetGroup' 346 | Type: 'forward' 347 | Conditions: 348 | - Field: path-pattern 349 | Values: [!Ref 'Path'] 350 | ListenerArn: !Ref ExternalListenerHttp 351 | Priority: !Ref 'Priority' 352 | PgRestLogGroup: 353 | Type: AWS::Logs::LogGroup 354 | Properties: 355 | LogGroupName: !Join ['', ['/ecs/', !Ref 'AWS::StackName', '/postgrest']] 356 | OpenRestyLogGroup: 357 | Type: AWS::Logs::LogGroup 358 | Properties: 359 | LogGroupName: !Join ['', ['/ecs/', !Ref 'AWS::StackName', '/openresty']] 360 | 361 | # Autoscaling group. This launches the actual EC2 instances that will register 362 | # themselves as members of the cluster, and run the docker containers. 363 | ECSAutoScalingGroup: 364 | Type: AWS::AutoScaling::AutoScalingGroup 365 | Properties: 366 | VPCZoneIdentifier: 367 | - !Ref PublicSubnetOne 368 | - !Ref PublicSubnetTwo 369 | LaunchConfigurationName: !Ref 'ContainerInstances' 370 | MinSize: '1' 371 | MaxSize: !Ref 'MaxSize' 372 | DesiredCapacity: !Ref 'DesiredCapacity' 373 | CreationPolicy: 374 | ResourceSignal: 375 | Timeout: PT15M 376 | UpdatePolicy: 377 | AutoScalingReplacingUpdate: 378 | WillReplace: true 379 | ContainerInstances: 380 | Type: AWS::AutoScaling::LaunchConfiguration 381 | Properties: 382 | ImageId: !Ref 'ECSAMI' 383 | SecurityGroups: [!Ref 'ECSSecurityGroup'] 384 | InstanceType: !Ref 'InstanceType' 385 | IamInstanceProfile: !Ref 'EC2InstanceProfile' 386 | UserData: 387 | Fn::Base64: !Sub | 388 | #!/bin/bash -xe 389 | echo ECS_CLUSTER=${ECSCluster} >> /etc/ecs/ecs.config 390 | yum install -y aws-cfn-bootstrap 391 | /opt/aws/bin/cfn-signal -e $? --stack ${AWS::StackName} \ 392 | --resource ECSAutoScalingGroup --region ${AWS::Region} 393 | AutoscalingRole: 394 | Type: AWS::IAM::Role 395 | Properties: 396 | AssumeRolePolicyDocument: 397 | Statement: 398 | - Effect: Allow 399 | Principal: 400 | Service: [application-autoscaling.amazonaws.com] 401 | Action: ['sts:AssumeRole'] 402 | Path: / 403 | Policies: 404 | - PolicyName: service-autoscaling 405 | PolicyDocument: 406 | Statement: 407 | - Effect: Allow 408 | Action: 409 | - 'application-autoscaling:*' 410 | - 'cloudwatch:DescribeAlarms' 411 | - 'cloudwatch:PutMetricAlarm' 412 | - 'ecs:DescribeServices' 413 | - 'ecs:UpdateService' 414 | Resource: '*' 415 | EC2InstanceProfile: 416 | Type: AWS::IAM::InstanceProfile 417 | Properties: 418 | Path: / 419 | Roles: [!Ref 'EC2Role'] 420 | 421 | # Role for the EC2 hosts. This allows the ECS agent on the EC2 hosts 422 | # to communciate with the ECS control plane, as well as download the docker 423 | # images from ECR to run on your host. 424 | EC2Role: 425 | Type: AWS::IAM::Role 426 | Properties: 427 | AssumeRolePolicyDocument: 428 | Statement: 429 | - Effect: Allow 430 | Principal: 431 | Service: [ec2.amazonaws.com] 432 | Action: ['sts:AssumeRole'] 433 | Path: / 434 | Policies: 435 | - PolicyName: ecs-service 436 | PolicyDocument: 437 | Statement: 438 | - Effect: Allow 439 | Action: 440 | - 'ecs:CreateCluster' 441 | - 'ecs:DeregisterContainerInstance' 442 | - 'ecs:DiscoverPollEndpoint' 443 | - 'ecs:Poll' 444 | - 'ecs:RegisterContainerInstance' 445 | - 'ecs:StartTelemetrySession' 446 | - 'ecs:Submit*' 447 | - 'logs:CreateLogStream' 448 | - 'logs:PutLogEvents' 449 | - 'ecr:GetAuthorizationToken' 450 | - 'ecr:BatchGetImage' 451 | - 'ecr:GetDownloadUrlForLayer' 452 | Resource: '*' 453 | 454 | TaskDefinition: 455 | Type: AWS::ECS::TaskDefinition 456 | Properties: 457 | NetworkMode: awsvpc 458 | ContainerDefinitions: 459 | - Name: 'postgrest' 460 | Image: postgrest/postgrest 461 | Cpu: !Ref 'PostgrestContainerCpu' 462 | Memory: !Ref 'PostgrestContainerMemory' 463 | Environment: 464 | # version used only to trigger image update and oncatiner restart 465 | - Name: VERSION 466 | Value: !Ref Version 467 | - Name: PGRST_DB_URI 468 | Value: !Join 469 | - "" 470 | - 471 | - "postgres://" 472 | - !Ref DBUser 473 | - ":" 474 | - !Ref DBPassword 475 | - "@" 476 | - !GetAtt [DB, Endpoint.Address] 477 | - ":" 478 | - !GetAtt [DB, Endpoint.Port] 479 | - "/" 480 | - !Ref DBName 481 | - Name: PGRST_DB_SCHEMA 482 | Value: !Ref DBSchema 483 | - Name: PGRST_DB_ANON_ROLE 484 | Value: !Ref DBAnonRole 485 | - Name: PGRST_DB_POOL 486 | Value: !Ref DBPool 487 | - Name: PGRST_JWT_SECRET 488 | Value: !Ref JwtSecret 489 | - Name: PGRST_MAX_ROWS 490 | Value: !Ref MaxRows 491 | LogConfiguration: 492 | LogDriver: "awslogs" 493 | Options: 494 | awslogs-group: !Ref PgRestLogGroup 495 | awslogs-region: !Ref AWS::Region 496 | - Name: "openresty" 497 | Cpu: !Ref 'PostgrestContainerCpu' 498 | Memory: !Ref 'PostgrestContainerMemory' 499 | Image: !Join 500 | - "" 501 | - 502 | - !Ref OpenRestyImage 503 | - ":" 504 | - !Ref Version 505 | PortMappings: 506 | - ContainerPort: "80" 507 | Protocol: "tcp" 508 | Environment: 509 | - Name: DB_HOST 510 | Value: !GetAtt [DB, Endpoint.Address] 511 | - Name: DB_PORT 512 | Value: !GetAtt [DB, Endpoint.Port] 513 | - Name: DB_NAME 514 | Value: !Ref DBName 515 | - Name: DB_SCHEMA 516 | Value: !Ref DBSchema 517 | - Name: DB_USER 518 | Value: !Ref DBUser 519 | - Name: DB_PASS 520 | Value: !Ref DBPassword 521 | - Name: POSTGREST_HOST 522 | Value: "127.0.0.1" 523 | - Name: POSTGREST_PORT 524 | Value: "3000" 525 | - Name: JWT_SECRET 526 | Value: !Ref JwtSecret 527 | - Name: DEVELOPMENT 528 | Value: "1" 529 | LogConfiguration: 530 | LogDriver: "awslogs" 531 | Options: 532 | awslogs-group: !Ref OpenRestyLogGroup 533 | awslogs-region: !Ref AWS::Region 534 | Service: 535 | Type: AWS::ECS::Service 536 | DependsOn: ListenerRule 537 | Properties: 538 | Cluster: !Ref ECSCluster 539 | DeploymentConfiguration: 540 | MaximumPercent: "200" 541 | MinimumHealthyPercent: "50" 542 | DesiredCount: !Ref DesiredCount 543 | LoadBalancers: 544 | - ContainerName: "openresty" 545 | TargetGroupArn: !Ref TargetGroup 546 | ContainerPort: 80 547 | TaskDefinition: !Ref TaskDefinition 548 | NetworkConfiguration: 549 | AwsvpcConfiguration: 550 | AssignPublicIp: DISABLED 551 | SecurityGroups: 552 | - !Ref ECSSecurityGroup 553 | Subnets: 554 | - !Ref PublicSubnetOne 555 | - !Ref PublicSubnetTwo 556 | -------------------------------------------------------------------------------- /deployment/createStack.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -a # automatically export all variables 3 | source .env 4 | set +a 5 | DefaultCount=0 6 | Count=${1:-$DefaultCount} 7 | aws cloudformation deploy \ 8 | --template-file cloudformation.yaml \ 9 | --stack-name $STACK_NAME \ 10 | --tags Project=$PROJECT \ 11 | --parameter-overrides \ 12 | DBSuperUser=$SUPER_USER \ 13 | DBSuperPassword=$SUPER_USER_PASSWORD \ 14 | DBName=$DB_NAME \ 15 | DBUser=$DB_USER \ 16 | DBPassword=$DB_PASS \ 17 | JwtSecret=$JWT_SECRET \ 18 | DesiredCount=$Count \ 19 | OpenRestyImage=$OPEN_RESTY_IMAGE \ 20 | --region $REGION \ 21 | --capabilities CAPABILITY_IAM \ 22 | -------------------------------------------------------------------------------- /deployment/createSubZeroConfig.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -a # automatically export all variables 3 | source .env 4 | set +a 5 | PRODUCTION_DB_HOST=$(aws cloudformation describe-stacks --stack-name "$STACK_NAME" \ 6 | --query "Stacks[0].Outputs[?OutputKey=='RDSHost'].OutputValue" --output text) 7 | DOMAIN=$(aws cloudformation describe-stacks --stack-name "$STACK_NAME" \ 8 | --query "Stacks[0].Outputs[?OutputKey=='RESTEndpoint'].OutputValue" --output text) 9 | SUBZERO_APP_CONF=$(cat < ../subzero-app.json 25 | -------------------------------------------------------------------------------- /deployment/deploy.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -a # automatically export all variables 3 | source .env 4 | set +a 5 | SQITCH_CMD=./sqitch 6 | aws ecr get-login \ 7 | --region $REGION \ 8 | --no-include-email | sh 9 | 10 | yarn subzero cloud deploy \ 11 | --dba $SUPER_USER \ 12 | --password $SUPER_USER_PASSWORD \ 13 | --openresty-image-tag latest 14 | -------------------------------------------------------------------------------- /deployment/initMigrations.sh: -------------------------------------------------------------------------------- 1 | SQITCH_CMD=./sqitch 2 | APP_DIR=.. 3 | yarn subzero migrations init --db-docker-image mdillon/postgis 4 | 5 | -------------------------------------------------------------------------------- /deployment/roleSql.sql: -------------------------------------------------------------------------------- 1 | -- load some variables from the env 2 | \setenv base_dir :DIR 3 | \set base_dir `if [ $base_dir != ":"DIR ]; then echo $base_dir; else echo "/docker-entrypoint-initdb.d"; fi` 4 | \set anonymous `echo $DB_ANON_ROLE` 5 | \set authenticator `echo $DB_USER` 6 | \set authenticator_pass `echo $DB_PASS` 7 | \set jwt_secret `echo $JWT_SECRET` 8 | \set quoted_jwt_secret '\'' :jwt_secret '\'' 9 | 10 | DROP ROLE IF EXISTS api; 11 | CREATE ROLE api; 12 | GRANT api to current_user; -- this is a workaround for RDS where the master user does not have SUPERUSER priviliges 13 | 14 | DROP ROLE IF EXISTS webuser; 15 | CREATE ROLE webuser; 16 | 17 | drop role if exists :authenticator; 18 | create role :"authenticator" with login password :'authenticator_pass'; 19 | 20 | drop role if exists :"anonymous"; 21 | create role :"anonymous"; 22 | grant :"anonymous" to :"authenticator"; 23 | 24 | drop role if exists application; 25 | create role application; 26 | grant application to :"authenticator"; 27 | -------------------------------------------------------------------------------- /deployment/sqitch: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Determine which Docker image to run. 4 | SQITCH_IMAGE=${SQITCH_IMAGE:=sqitch/sqitch:latest} 5 | 6 | # Set up required pass-through variables. 7 | user=${USER-$(whoami)} 8 | passopt=( 9 | -e "SQITCH_ORIG_SYSUSER=$user" 10 | -e "SQITCH_ORIG_EMAIL=$user@$(hostname)" 11 | -e "TZ=$(date +%Z)" \ 12 | -e "LESS=${LESS:--R}" \ 13 | ) 14 | 15 | # Handle OS-specific options. 16 | case "$(uname -s)" in 17 | Linux*) 18 | passopt+=(-e "SQITCH_ORIG_FULLNAME=$(getent passwd $user | cut -d: -f5 | cut -d, -f1)") 19 | passopt+=(-u $(id -u ${user}):$(id -g ${user})) 20 | ;; 21 | Darwin*) 22 | passopt+=(-e "SQITCH_ORIG_FULLNAME=$(id -P $user | awk -F '[:]' '{print $8}')") 23 | ;; 24 | MINGW*|CYGWIN*) 25 | passopt+=(-e "SQITCH_ORIG_FULLNAME=$(net user $user)") 26 | ;; 27 | *) 28 | echo "Unknown OS: $(uname -s)" 29 | exit 2 30 | ;; 31 | esac 32 | 33 | # Iterate over optional Sqitch and engine variables. 34 | for var in \ 35 | SQITCH_CONFIG SQITCH_USERNAME SQITCH_PASSWORD SQITCH_FULLNAME SQITCH_EMAIL SQITCH_TARGET \ 36 | DBI_TRACE \ 37 | PGUSER PGPASSWORD PGHOST PGHOSTADDR PGPORT PGDATABASE PGSERVICE PGOPTIONS PGSSLMODE PGREQUIRESSL PGSSLCOMPRESSION PGREQUIREPEER PGKRBSRVNAME PGKRBSRVNAME PGGSSLIB PGCONNECT_TIMEOUT PGCLIENTENCODING PGTARGETSESSIONATTRS \ 38 | MYSQL_PWD MYSQL_HOST MYSQL_TCP_PORT \ 39 | TNS_ADMIN TWO_TASK ORACLE_SID \ 40 | ISC_USER ISC_PASSWORD \ 41 | VSQL_HOST VSQL_PORT VSQL_USER VSQL_PASSWORD VSQL_SSLMODE \ 42 | SNOWSQL_ACCOUNT SNOWSQL_USER SNOWSQL_PWD SNOWSQL_HOST SNOWSQL_PORT SNOWSQL_DATABASE SNOWSQL_REGION SNOWSQL_WAREHOUSE 43 | do 44 | if [ -n "${!var}" ]; then 45 | passopt+=(-e $var) 46 | fi 47 | done 48 | 49 | # Determine the name of the container home directory. 50 | homedst=/home 51 | if [ $(id -u ${user}) -eq 0 ]; then 52 | homedst=/root 53 | fi 54 | # Set HOME, since the user ID likely won't be the same as for the sqitch user. 55 | passopt+=(-e "HOME=${homedst}") 56 | 57 | # Run the container with the current and home directories mounted. 58 | docker run -it --rm --network host \ 59 | --mount "type=bind,src=$(pwd),dst=/repo" \ 60 | --mount "type=bind,src=$HOME,dst=$homedst" \ 61 | "${passopt[@]}" "$SQITCH_IMAGE" "$@" 62 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | services: 3 | 4 | ### DB START 5 | # This is the database to which the all the other components in the stack will connect and interact with 6 | # (but mostly it's PostgREST that is going to be responsible for the bulk of the db traffic) 7 | # Having the database in a container is very convenient in development but in production you will 8 | # use a separate database instance, like Amazon RDS, i.e. in production this section will be 9 | # commented and in the .env file you will specify the ip of your separate database instance 10 | db: 11 | image: mdillon/postgis 12 | ports: 13 | - "5432:5432" 14 | environment: 15 | # env vars specific to postgres image used on first boot 16 | - POSTGRES_USER=${SUPER_USER} 17 | - POSTGRES_PASSWORD=${SUPER_USER_PASSWORD} 18 | - POSTGRES_DB=${DB_NAME} 19 | # env vars useful for our sql scripts 20 | - SUPER_USER=${SUPER_USER} 21 | - SUPER_USER_PASSWORD=${SUPER_USER_PASSWORD} 22 | - DB_NAME=${DB_NAME} 23 | - DB_USER=${DB_USER} 24 | - DB_PASS=${DB_PASS} 25 | - DB_ANON_ROLE=${DB_ANON_ROLE} 26 | - DEVELOPMENT=${DEVELOPMENT} 27 | - JWT_SECRET=${JWT_SECRET} 28 | 29 | volumes: 30 | - "./db/src:/docker-entrypoint-initdb.d" 31 | ### DB END 32 | 33 | # PostgREST instance, is responsible for communicating with the database 34 | # and providing a REST api, (almost) every request that is sent to the database goes through it 35 | postgrest: 36 | image: postgrest/postgrest 37 | ports: 38 | - "3000:3000" 39 | links: 40 | - db:db 41 | environment: 42 | - PGRST_DB_URI=postgres://${DB_USER}:${DB_PASS}@${DB_HOST}:${DB_PORT}/${DB_NAME} 43 | - PGRST_DB_SCHEMA=${DB_SCHEMA} 44 | - PGRST_DB_ANON_ROLE=${DB_ANON_ROLE} 45 | - PGRST_DB_POOL=${DB_POOL} 46 | - PGRST_JWT_SECRET=${JWT_SECRET} 47 | - PGRST_MAX_ROWS=${MAX_ROWS} 48 | - PGRST_PRE_REQUEST=${PRE_REQUEST} 49 | - PGRST_SERVER_PROXY_URI=${SERVER_PROXY_URI} 50 | depends_on: 51 | - db 52 | 53 | # OpenResty (Nginx + Lua) instance that sits in front of PostgREST. 54 | # All the requests coming into the system are first hitting this component. 55 | # After some processing/checks and transformation, the request is forwarded 56 | # to PostgREST down the stack. 57 | openresty: 58 | image: openresty/openresty:stretch 59 | command: ["/usr/bin/openresty", "-g", "daemon off; error_log /dev/stderr info;"] 60 | ports: 61 | - "8080:80" 62 | links: 63 | - db:db 64 | - postgrest:postgrest 65 | environment: 66 | - JWT_SECRET=${JWT_SECRET} 67 | - DEVELOPMENT=${DEVELOPMENT} 68 | - POSTGREST_HOST=${POSTGREST_HOST} 69 | - POSTGREST_PORT=${POSTGREST_PORT} 70 | - DB_HOST=${DB_HOST} 71 | - DB_PORT=${DB_PORT} 72 | - DB_NAME=${DB_NAME} 73 | - DB_SCHEMA=${DB_SCHEMA} 74 | - DB_USER=${DB_USER} 75 | - DB_PASS=${DB_PASS} 76 | volumes: 77 | - "./openresty/nginx/conf/nginx.conf:/usr/local/openresty/nginx/conf/nginx.conf" 78 | - "./openresty/nginx/conf/includes:/usr/local/openresty/nginx/conf/includes" 79 | - "./openresty/nginx/html:/usr/local/openresty/nginx/html" 80 | - "./openresty/lualib/user_code:/usr/local/openresty/lualib/user_code" 81 | depends_on: 82 | - postgrest 83 | -------------------------------------------------------------------------------- /docs/api.merge.yaml: -------------------------------------------------------------------------------- 1 | !!files_merge_append ["docs/STAC.yaml", "docs/query.fragment.yaml", "docs/fields.fragment.yaml", "docs/sort.fragment.yaml", "docs/insert.fragment.yaml"] 2 | -------------------------------------------------------------------------------- /docs/fields.fragment.yaml: -------------------------------------------------------------------------------- 1 | components: 2 | parameters: 3 | fields: 4 | name: fields 5 | in: query 6 | description: Determines the shape of the features in the response 7 | required: false 8 | schema: 9 | $ref: '#/components/schemas/fields' 10 | style: form 11 | explode: false 12 | schemas: 13 | searchBody: 14 | allOf: 15 | - $ref: '#/components/schemas/fieldsFilter' 16 | fieldsFilter: 17 | type: object 18 | description: Determines the shape of the features in the response 19 | properties: 20 | fields: 21 | $ref: '#/components/schemas/fields' 22 | fields: 23 | description: | 24 | The include and exclude members specify an array of 25 | property names that are either included or excluded 26 | from the result, respectively. If both include and 27 | exclude are specified, include takes precedence. 28 | Values should include the full JSON path of the property. 29 | type: object 30 | properties: 31 | include: 32 | type: array 33 | items: 34 | type: string 35 | exclude: 36 | type: array 37 | items: 38 | type: string 39 | example: 40 | include: 41 | - id 42 | - 'properties.eo:cloud_cover' 43 | exclude: 44 | - geometry 45 | - properties.datetime 46 | -------------------------------------------------------------------------------- /docs/insert.fragment.yaml: -------------------------------------------------------------------------------- 1 | openapi: 3.0.1 2 | paths: 3 | '/items': 4 | post: 5 | summary: add a new feature 6 | description: create a new feature 7 | operationId: postFeature 8 | tags: 9 | - Insert Extension 10 | security: 11 | - BearerAuth: [application] 12 | parameters: 13 | - in: header 14 | name: Authorization 15 | schema: 16 | type: string 17 | required: true 18 | description: A bearer JWT with an 'application' role 19 | default: Bearer token 20 | - in: header 21 | name: Prefer 22 | schema: 23 | type: string 24 | required: true 25 | description: Controls the response type from the insert 26 | default: return=minimal 27 | requestBody: 28 | content: 29 | application/json: 30 | schema: 31 | oneOf: 32 | - $ref: '#/components/schemas/item' 33 | - $ref: '#/components/schemas/itemCollection' 34 | responses: 35 | '201': 36 | description: Status of the create request. 37 | '400': 38 | $ref: '#/components/responses/BadRequest' 39 | '409': 40 | $ref: '#/components/responses/ConflictRequest' 41 | '5XX': 42 | $ref: '#/components/responses/InternalServerError' 43 | default: 44 | description: An error occurred. 45 | content: 46 | application/json: 47 | schema: 48 | $ref: '#/components/schemas/exception' 49 | text/html: 50 | schema: 51 | type: string 52 | 53 | components: 54 | securitySchemes: 55 | BearerAuth: 56 | type: http 57 | scheme: bearer 58 | bearerFormat: JWT 59 | responses: 60 | NotFound: 61 | description: The specified resource was not found 62 | content: 63 | application/json: 64 | schema: 65 | $ref: '#/components/schemas/exception' 66 | BadRequest: 67 | description: The request was malformed or semantically invalid 68 | content: 69 | application/json: 70 | schema: 71 | $ref: '#/components/schemas/exception' 72 | ConflictRequest: 73 | description: The request has a conflict 74 | content: 75 | application/json: 76 | schema: 77 | $ref: '#/components/schemas/exception' 78 | InternalServerError: 79 | description: The request was syntactically and semantically valid, but an error occurred while trying to act upon it 80 | content: 81 | application/json: 82 | schema: 83 | $ref: '#/components/schemas/exception' 84 | -------------------------------------------------------------------------------- /docs/query.fragment.yaml: -------------------------------------------------------------------------------- 1 | components: 2 | parameters: 3 | query: 4 | name: query 5 | in: query 6 | description: query for properties in items. Use the JSON form of the queryFilter used in POST. 7 | required: false 8 | schema: 9 | type: string 10 | schemas: 11 | searchBody: 12 | allOf: 13 | - $ref: '#/components/schemas/queryFilter' 14 | queryFilter: 15 | type: object 16 | description: Allows users to query properties for specific values 17 | properties: 18 | query: 19 | $ref: '#/components/schemas/query' 20 | query: 21 | type: object 22 | description: Define which properties to query and the operatations to apply 23 | additionalProperties: 24 | $ref: '#/components/schemas/queryProp' 25 | example: 26 | eo:cloud_cover: 27 | lt: 50 28 | queryProp: 29 | description: Apply query operations to a specific property 30 | anyOf: 31 | - description: if the object doesn't contain any of the operators, it is equivalent to using the equals operator 32 | - type: object 33 | description: Match using an operator 34 | properties: 35 | eq: 36 | description: Find items with a property that is equal to the specified value. For strings, a case-insensitive comparison must be performed. 37 | gt: 38 | type: number 39 | description: Find items with a property value greater than the specified value. 40 | lt: 41 | type: number 42 | description: Find items with a property value less than the specified value. 43 | gte: 44 | type: number 45 | description: Find items with a property value greater than or equal the specified value. 46 | lte: 47 | type: number 48 | description: Find items with a property value greater than or equal the specified value. 49 | in: 50 | type: array 51 | items: 52 | type: string 53 | description: Find items with a property that matches one of the specified strings. A case-insensitive comparison must be performed. 54 | -------------------------------------------------------------------------------- /docs/sort.fragment.yaml: -------------------------------------------------------------------------------- 1 | # Adds sorting parameter to the base STAC search. 2 | components: 3 | parameters: 4 | sort: 5 | name: sort 6 | in: query 7 | description: Allows sorting results by the specified properties 8 | required: false 9 | schema: 10 | $ref: '#/components/schemas/sort' 11 | schemas: 12 | searchBody: 13 | allOf: 14 | - $ref: '#/components/schemas/sortFilter' 15 | sortFilter: 16 | type: object 17 | description: Sort the results 18 | properties: 19 | sort: 20 | $ref: '#/components/schemas/sort' 21 | sort: 22 | type: array 23 | description: | 24 | An array of objects containing a property name and sort direction. 25 | minItems: 1 26 | items: 27 | type: object 28 | required: 29 | - field 30 | properties: 31 | field: 32 | type: string 33 | direction: 34 | type: string 35 | default: asc 36 | enum: 37 | - asc 38 | - desc 39 | 40 | example: 41 | - field: eo:cloud_cover 42 | direction: desc 43 | -------------------------------------------------------------------------------- /generateToken.js: -------------------------------------------------------------------------------- 1 | const jsonwebtoken = require('jsonwebtoken'); 2 | const dotenv = require('dotenv'); 3 | dotenv.config(); 4 | const jwt = jsonwebtoken.sign({ role: 'application' }, process.env.JWT_SECRET); 5 | console.log(jwt); 6 | -------------------------------------------------------------------------------- /openresty/Dockerfile: -------------------------------------------------------------------------------- 1 | # example building using AWS ECR as the remote private registry 2 | # export REMOTE_REPO=.dkr.ecr.us-east-1.amazonaws.com/openresty 3 | # docker build -t openresty . 4 | # docker tag openresty $REMOTE_REPO:latest 5 | # docker push $REMOTE_REPO:latest 6 | 7 | 8 | FROM openresty/openresty:stretch 9 | 10 | COPY nginx /usr/local/openresty/nginx 11 | COPY lualib /usr/local/openresty/lualib 12 | -------------------------------------------------------------------------------- /openresty/lualib/user_code/datetimeBuilder.lua: -------------------------------------------------------------------------------- 1 | module("datetimeBuilder", package.seeall) 2 | require "string_utils" 3 | wrapSingleQuote = string_utils.wrapSingleQuote 4 | local pg_constants = require "pg_constants" 5 | 6 | function buildDatetime(datetime) 7 | local dateString 8 | local startdate, enddate = string.match(datetime, "(.*)/(.*)") 9 | if startdate and enddate then 10 | dateString = "datetime.gt." .. startdate .. "," .. "datetime.lt." .. enddate 11 | else 12 | dateString = "datetime.eq." .. datetime 13 | end 14 | return dateString 15 | end 16 | 17 | function buildDatetimeSQL(datetime) 18 | local dateString 19 | local unknown = "::timestamp" 20 | local startdate, enddate = string.match(datetime, "(.*)/(.*)") 21 | if startdate and enddate then 22 | dateString = pg_constants.datetime .. " > " .. wrapSingleQuote(startdate) .. unknown .. 23 | " AND " .. pg_constants.datetime .. " < " .. wrapSingleQuote(enddate) .. unknown 24 | else 25 | dateString = pg_constants.datetime .. " = " .. datetime 26 | end 27 | return dateString 28 | end 29 | -------------------------------------------------------------------------------- /openresty/lualib/user_code/defaultFields.lua: -------------------------------------------------------------------------------- 1 | module("defaultFields", package.seeall) 2 | local defaultFields = {} 3 | defaultFields.items = { "id", "collection", "geometry", "properties" ,"type" , "assets", "bbox", "links", "stac_version" } 4 | defaultFields.collections = { "id", "description", "properties", "keywords", "version", "license", "providers", "links" } 5 | return defaultFields 6 | -------------------------------------------------------------------------------- /openresty/lualib/user_code/extensions/fieldsExtension.lua: -------------------------------------------------------------------------------- 1 | module("fieldsExtension", package.seeall) 2 | 3 | function buildFieldsObject(fields, query) 4 | local selectTable = { "id", "type", "geometry", "properties", "assets" } 5 | local includeTable = {} 6 | local selectFields 7 | -- A default property which must be specified in the includes body for seach 8 | table.insert(includeTable, "datetime") 9 | if fields.exclude then 10 | for _, field in ipairs(fields.exclude) do 11 | -- This splits out properties fields 12 | local prefix, key = string.match(field, "(.*)%.(.*)") 13 | -- If the key is present it is a properties field 14 | if key then 15 | table.remove(includeTable, includeTable[key]) 16 | else 17 | -- Lua is a mystery 18 | table.remove(selectTable, selectTable[field]) 19 | end 20 | end 21 | end 22 | if fields.include then 23 | for _, field in ipairs(fields.include) do 24 | -- This splits out properties fields 25 | local prefix, key = string.match(field, "(.*)%.(.*)") 26 | -- If the key is present it is a properties field 27 | if key then 28 | table.insert(includeTable, key) 29 | else 30 | table.insert(selectTable, field) 31 | end 32 | end 33 | end 34 | -- This is a temporary hack as the nature of the query requires the fields to be present 35 | if query then 36 | for key, keyValue in pairs(query) do 37 | table.insert(includeTable, key) 38 | end 39 | end 40 | selectFields = table.concat(selectTable, ",") 41 | return selectFields, includeTable 42 | end 43 | -------------------------------------------------------------------------------- /openresty/lualib/user_code/extensions/queryExtension.lua: -------------------------------------------------------------------------------- 1 | module("queryExtension", package.seeall) 2 | require "string_utils" 3 | wrapSingleQuote = string_utils.wrapSingleQuote 4 | local stacOperators = {} 5 | stacOperators["eq"] = "=" 6 | stacOperators["gt"] = ">" 7 | stacOperators["lt"] = "<" 8 | stacOperators["gte"] = ">=" 9 | stacOperators["lte"] = "<=" 10 | stacOperators["neq"] = "!=" 11 | stacOperators["in"] = "in" 12 | 13 | function buildQueryString(query) 14 | local logicalAndTable = {} 15 | for key, keyValue in pairs(query) do 16 | for operator, operatorValue in pairs(keyValue) do 17 | local propertiesAccessor 18 | local collectionPropertiesAccessor 19 | local castType = "" 20 | local sqlValue 21 | if type(keyValue[operator]) == "number" then 22 | castType = "::numeric" 23 | sqlValue = keyValue[operator] 24 | propertiesAccessor = "properties->" 25 | collectionPropertiesAccessor = "collectionproperties->" 26 | elseif type(keyValue[operator]) == "string" then 27 | sqlValue = wrapSingleQuote(keyValue[operator]) 28 | propertiesAccessor = "properties->>" 29 | collectionPropertiesAccessor = "collectionproperties->>" 30 | end 31 | if (operator == "in") then 32 | local invalues = "(" 33 | for _, initem in ipairs(keyValue[operator]) do 34 | if type(initem) == "number" then 35 | castType = "::numeric" 36 | invalues = invalues .. initem .. "," 37 | propertiesAccessor = "properties->" 38 | collectionPropertiesAccessor = "collectionproperties->" 39 | elseif type(initem) == "string" then 40 | invalues = invalues .. wrapSingleQuote(initem) .. "," 41 | propertiesAccessor = "properties->>" 42 | collectionPropertiesAccessor = "collectionproperties->>" 43 | end 44 | end 45 | if string.sub(invalues, -1) == "," then 46 | invalues = string.sub(invalues, 1, string.len(invalues) - 1) 47 | end 48 | sqlValue = invalues .. ")" 49 | end 50 | -- local logicalCoalesce = "COALESCE(" .. propertiesAccessor .. 51 | -- wrapSingleQuote(key) .. "," .. collectionPropertiesAccessor .. 52 | -- wrapSingleQuote(key) .. ")" .. castType .. " " .. stacOperators[operator] 53 | -- .. " " .. sqlValue 54 | local andClause = "(" .. propertiesAccessor .. 55 | wrapSingleQuote(key) .. ")" .. castType .. " " .. 56 | stacOperators[operator] .. " " .. sqlValue 57 | table.insert(logicalAndTable, andClause) 58 | end 59 | end 60 | local logicalAndString = table.concat(logicalAndTable, " AND ") 61 | return logicalAndString 62 | end 63 | -------------------------------------------------------------------------------- /openresty/lualib/user_code/extensions/sortExtension.lua: -------------------------------------------------------------------------------- 1 | module("sortExtension", package.seeall) 2 | require "string_utils" 3 | local pg_constants = require "pg_constants" 4 | wrapSingleQuote = string_utils.wrapSingleQuote 5 | 6 | function setPropertiesPrefix(field) 7 | local prefix, key = string.match(field, "(.*)%.(.*)") 8 | local pgField = "" 9 | if key then 10 | pgField = "properties->" .. "'" .. key .. "'" 11 | else 12 | pgField = field 13 | end 14 | return pgField 15 | end 16 | 17 | function buildSortString(sort) 18 | -- Defaut sort by datetime 19 | order = 'datetime.desc' 20 | return order 21 | end 22 | 23 | function buildSortSQL(sort) 24 | local order = "" 25 | if sort then 26 | local orderTable = {} 27 | -- This rule.direction is a temporary fix until tokenized paging is 28 | -- implemented 29 | direction = "asc" 30 | for _, rule in ipairs(sort) do 31 | local pgField 32 | if rule.field == "properties.datetime" then 33 | pgField = pg_constants.datetime 34 | elseif rule.field == "properties.eo:cloud_cover" then 35 | pgField = "(" .. setPropertiesPrefix(rule.field) .. ")" .. "::numeric" 36 | else 37 | pgField = setPropertiesPrefix(rule.field) 38 | end 39 | local orderValue = pgField .. " " .. rule.direction 40 | direction = rule.direction 41 | table.insert(orderTable, orderValue) 42 | end 43 | order = table.concat(orderTable, ",") 44 | order = order .. "," .. pg_constants.tiebreak .. " " .. direction 45 | else 46 | -- Defaut sort by datetime 47 | order = pg_constants.datetime .. " " .. "desc" .. "," .. 48 | pg_constants.tiebreak .. " " .. "desc" 49 | end 50 | -- if string.sub(order, -1) == "," then 51 | -- -- order = string.sub(order, 1, string.len(order) - 1) 52 | -- print ("test") 53 | -- order = "wat" 54 | -- -- order = order .. "," .. pg_constants.id .. " " .. "desc" 55 | -- end 56 | local orderby = "ORDER BY " .. order 57 | return orderby 58 | end 59 | -------------------------------------------------------------------------------- /openresty/lualib/user_code/filters.lua: -------------------------------------------------------------------------------- 1 | module("filters", package.seeall) 2 | require "extensions.sortExtension" 3 | require "datetimeBuilder" 4 | local defaultFields = require "defaultFields" 5 | local limit_constants = require "limit_constants" 6 | 7 | function processDatetimeFilter(andQuery, datetime) 8 | local updatedAndQuery 9 | if datetime then 10 | local dateString = datetimeBuilder.buildDatetime(datetime) 11 | if andQuery then 12 | updatedAndQuery = string.sub(andQuery, 1,-2) .. "," .. dateString .. ")" 13 | else 14 | updatedAndQuery = "(" .. dateString .. ")" 15 | end 16 | else 17 | updatedAndQuery = andQuery 18 | end 19 | return updatedAndQuery 20 | end 21 | 22 | function processListFilter(andQuery, list, key) 23 | local updatedAndQuery 24 | local listString 25 | if list then 26 | if type(list) == "table" then 27 | listString = table.concat(list, ",") 28 | else 29 | listString = list 30 | end 31 | 32 | local listQuery = key .. ".in.(" .. listString .. ")" 33 | if andQuery then 34 | updatedAndQuery = string.sub(andQuery, 1,-2) .. "," .. listQuery .. ")" 35 | else 36 | updatedAndQuery = "(" .. listQuery .. ")" 37 | end 38 | else 39 | updatedAndQuery = andQuery 40 | end 41 | return updatedAndQuery 42 | end 43 | 44 | function createFilterArgs(andQuery, sort, next, limit) 45 | local defaultSelect = table.concat(defaultFields.items, ",") 46 | local filterArgs = {} 47 | filterArgs["select"] = defaultSelect 48 | if andQuery then 49 | filterArgs["and"] = andQuery 50 | end 51 | if next and limit then 52 | filterArgs["offset"] = next 53 | filterArgs["limit"] = limit 54 | else 55 | filterArgs["offset"] = limit_constants.offset 56 | filterArgs["limit"] = limit_constants.limit 57 | end 58 | -- If sort is null returns default sorting order 59 | local order = sortExtension.buildSortString(sort) 60 | filterArgs["order"] = order 61 | return filterArgs 62 | end 63 | 64 | function buildFilters(existingAndQuery, args) 65 | local andQuery = processDatetimeFilter(existingAndQuery, args.datetime) 66 | andQuery = filters.processListFilter(andQuery, args.ids, "id") 67 | andQuery = filters.processListFilter(andQuery, args.collections, "collection") 68 | local filterArgs = filters.createFilterArgs(andQuery, args.sort, args.next, args.limit) 69 | return filterArgs 70 | end 71 | -------------------------------------------------------------------------------- /openresty/lualib/user_code/hooks.lua: -------------------------------------------------------------------------------- 1 | require "satapi" 2 | local path_constants = require "path_constants" 3 | local apiPath = path_constants.apiPath 4 | local searchPath = path_constants.searchPath 5 | local itemsPath = path_constants.itemsPath 6 | local collectionsPath = path_constants.collectionsPath 7 | local conformancePath = path_constants.conformancePath 8 | local stacPath = path_constants.stacPath 9 | local ngx_re = require "ngx.re" 10 | 11 | local function on_init() 12 | -- print "on_init called" 13 | end 14 | 15 | local function on_rest_request() 16 | local method = ngx.var.request_method 17 | ngx.ctx.originalMethod = method 18 | satapi.handleRequest() 19 | end 20 | 21 | local function before_rest_response() 22 | local uri = string.gsub(ngx.var.request_uri, "?.*", "") 23 | local uriComponents = ngx_re.split(uri, '/') 24 | local collections = uriComponents[2] 25 | local collectionId = uriComponents[3] 26 | local items = uriComponents[4] 27 | local itemId = uriComponents[5] 28 | 29 | -- Don't wrap in a feature collection 30 | if ((collections == 'collections' and items == nil) or itemId or uri == apiPath or uri == stacPath or uri == (stacPath .. '/')) then 31 | else 32 | if uri == conformancePath then 33 | utils.set_body_postprocess_mode(utils.postprocess_modes.ALL) 34 | utils.set_body_postprocess_fn(satapi.returnConformance) 35 | else 36 | -- If items are posted they should be created 37 | -- Handle the case when a GET /items request with intersects redirects 38 | -- to a POST request to the /search endpoint 39 | if ngx.ctx.originalMethod == "POST" then 40 | if uri ~= itemsPath then 41 | utils.set_body_postprocess_mode(utils.postprocess_modes.ALL) 42 | utils.set_body_postprocess_fn(satapi.wrapFeatureCollection) 43 | end 44 | else 45 | utils.set_body_postprocess_mode(utils.postprocess_modes.ALL) 46 | utils.set_body_postprocess_fn(satapi.wrapFeatureCollection) 47 | end 48 | end 49 | end 50 | end 51 | 52 | return { 53 | on_init = on_init, 54 | on_rest_request = on_rest_request, 55 | before_rest_response = before_rest_response, 56 | } 57 | -------------------------------------------------------------------------------- /openresty/lualib/user_code/init_phase.lua: -------------------------------------------------------------------------------- 1 | cjson = require('cjson') 2 | utils = require('utils') 3 | 4 | hooks = require("hooks") 5 | 6 | if type(hooks.on_init) == 'function' then 7 | hooks.on_init() 8 | end 9 | 10 | -------------------------------------------------------------------------------- /openresty/lualib/user_code/internal_rest_body_filter_phase.lua: -------------------------------------------------------------------------------- 1 | -- call body postprocess hook function 2 | -- to trigger this code, one would have these lines in one of 3 | -- the hooks (on_rest_request, before_rest_response) 4 | --[[ 5 | utils.set_body_postprocess_mode(utils.postprocess_modes.ALL) 6 | utils.set_body_postprocess_fn(function(body) 7 | local b = cjson.decode(body) 8 | b.custom_key = 'custom_value' 9 | return cjson.encode(b) 10 | end) 11 | --]] 12 | local mode = utils.get_body_postprocess_mode() 13 | local fn = utils.get_body_postprocess_fn() 14 | if type(fn) == 'function' then 15 | if mode == utils.postprocess_modes.CHUNKS then 16 | ngx.arg[1], ngx.arg[2] = fn(ngx.arg[1], ngx.arg[2]) 17 | end 18 | 19 | if mode == utils.postprocess_modes.ALL then 20 | local response_body = utils.buffer_response_body() 21 | if response_body then 22 | ngx.arg[1] = fn(response_body) 23 | end 24 | end 25 | end 26 | -------------------------------------------------------------------------------- /openresty/lualib/user_code/internal_rest_header_filter_phase.lua: -------------------------------------------------------------------------------- 1 | local path_constants = require "path_constants" 2 | local searchPath = path_constants.searchPath 3 | local itemsPath = path_constants.itemsPath 4 | -- call hook function if present 5 | if type(hooks.before_rest_response) == 'function' then 6 | if ngx.status == 200 then 7 | local uri = string.gsub(ngx.var.request_uri, "?.*", "") 8 | if (uri == searchPath or uri == itemsPath) then 9 | ngx.header.content_type = "application/geo+json" 10 | end 11 | local headers = ngx.resp.get_headers() 12 | if headers["Func-Range"] then 13 | ngx.header.content_range = headers["Func-Range"] 14 | ngx.header["Func-Range"] = nil 15 | end 16 | end 17 | hooks.before_rest_response() 18 | end 19 | -------------------------------------------------------------------------------- /openresty/lualib/user_code/internal_rest_rewrite_phase.lua: -------------------------------------------------------------------------------- 1 | -- support /endpoint/:id url style 2 | local m, err = ngx.re.match(ngx.var.uri, "^/([a-z_]+)/([0-9]+)") 3 | if m then 4 | ngx.req.set_uri('/' .. m[1]) 5 | local args = ngx.req.get_uri_args() 6 | args.id = 'eq.' .. m[2] 7 | ngx.req.set_uri_args(args) 8 | ngx.req.set_header('Accept', 'application/vnd.pgrst.object+json') 9 | end 10 | 11 | -- call hook function if present 12 | if type(hooks.on_rest_request) == 'function' then 13 | hooks.on_rest_request() 14 | end 15 | -------------------------------------------------------------------------------- /openresty/lualib/user_code/limit_constants.lua: -------------------------------------------------------------------------------- 1 | module("limit_constants", package.seeall) 2 | local limit_constants = {} 3 | limit_constants.offset = 0 4 | limit_constants.limit = 50 5 | return limit_constants 6 | -------------------------------------------------------------------------------- /openresty/lualib/user_code/path_constants.lua: -------------------------------------------------------------------------------- 1 | module("path_constants", package.seeall) 2 | local path_constants = {} 3 | path_constants.apiPath = "/" 4 | path_constants.searchPath = "/stac/search" 5 | path_constants.itemsPath = "/items" 6 | path_constants.collectionsPath = "/collections" 7 | path_constants.conformancePath = "/conformance" 8 | path_constants.stacPath = "/stac" 9 | path_constants.pg_searchPath = "/rpc/search" 10 | path_constants.pg_searchNoGeomPath = "/rpc/searchnogeom" 11 | path_constants.pg_root = "root" 12 | path_constants.pg_stac = "stac" 13 | path_constants.pg_rootcollections = "rootcollections" 14 | return path_constants 15 | -------------------------------------------------------------------------------- /openresty/lualib/user_code/pg_constants.lua: -------------------------------------------------------------------------------- 1 | module("pg_constants", package.seeall) 2 | local pg_constants = {} 3 | pg_constants.datetime = "datetime" 4 | pg_constants.id = "id" 5 | pg_constants.collection = "collection" 6 | pg_constants.tiebreak = "tiebreak" 7 | 8 | return pg_constants 9 | -------------------------------------------------------------------------------- /openresty/lualib/user_code/satapi.lua: -------------------------------------------------------------------------------- 1 | module("satapi", package.seeall) 2 | local defaultFields = require "defaultFields" 3 | local filters = require "filters" 4 | local search = require "search" 5 | local ngx_re = require "ngx.re" 6 | local path_constants = require "path_constants" 7 | local apiPath = path_constants.apiPath 8 | local searchPath = path_constants.searchPath 9 | local itemsPath = path_constants.itemsPath 10 | local collectionsPath = path_constants.collectionsPath 11 | local conformancePath = path_constants.conformancePath 12 | local pg_searchPath = path_constants.pg_searchPath 13 | local pg_searchNoGeomPath = path_constants.pg_searchNoGeomPath 14 | local pg_root = path_constants.pg_root 15 | local pg_rootcollections = path_constants.pg_rootcollections 16 | 17 | function setUri(bodyJson, args, collectionId, itemId) 18 | -- The search function is needed for these operations 19 | if bodyJson and (bodyJson.bbox or bodyJson.intersects or bodyJson.fields or bodyJson.query or bodyJson.sort) then 20 | if bodyJson.ids and table.getn(bodyJson.ids) then 21 | ids = bodyJson.ids 22 | elseif itemId then 23 | ids = { itemId } 24 | else 25 | ids = nil 26 | end 27 | if bodyJson.collections and table.getn(bodyJson.collections) then 28 | collections = bodyJson.collections 29 | else 30 | collections = nil 31 | end 32 | local searchBody, searchArgs = search.buildSearch(bodyJson, collectionId, ids, collections) 33 | ngx.req.set_body_data(cjson.encode(searchBody)) 34 | ngx.req.set_uri_args(searchArgs) 35 | ngx.req.set_uri(pg_searchPath) 36 | ngx.req.set_method(ngx.HTTP_POST) 37 | -- The search function is needed for spatial operations 38 | elseif args and (args.bbox or args.intersects) then 39 | if args.ids and table.getn(args.ids) then 40 | ids = args.ids 41 | elseif itemId then 42 | ids = { itemId } 43 | else 44 | ids = nil 45 | end 46 | if args.collections and table.getn(args.collections) then 47 | collections = args.collections 48 | else 49 | collections = nil 50 | end 51 | local searchBody, searchArgs = search.buildSearch(args, collectionId, ids, collections) 52 | ngx.req.set_body_data(cjson.encode(searchBody)) 53 | ngx.req.set_uri_args(searchArgs) 54 | ngx.req.set_uri(pg_searchPath) 55 | ngx.req.set_method(ngx.HTTP_POST) 56 | -- If not we can pass all the traffic down to the raw PostgREST items endpoint. 57 | else 58 | local andQuery 59 | if collectionId and collectionId ~= '' then 60 | andQuery = "(collection.eq." .. collectionId .. ")" 61 | end 62 | if itemId and itemId ~= '' then 63 | andQuery = "(id.eq." .. itemId .. ")" 64 | end 65 | -- Use the POST body as the args table 66 | if args == nil and bodyJson then 67 | args = bodyJson 68 | end 69 | local filterArgs = filters.buildFilters(andQuery, args) 70 | ngx.req.set_body_data(cjson.encode(filterBody)) 71 | ngx.req.set_uri_args(filterArgs) 72 | ngx.req.set_uri(itemsPath) 73 | ngx.req.set_method(ngx.HTTP_GET) 74 | end 75 | end 76 | 77 | function handleRequest() 78 | -- Change cjson encoding behavior to support empty arrays. 79 | cjson.encode_empty_table_as_object(false) 80 | local method = ngx.req.get_method() 81 | ngx.req.read_body() 82 | local body = ngx.req.get_body_data() 83 | local uri = string.gsub(ngx.var.request_uri, "?.*", "") 84 | -- Trim trailing slash 85 | if string.len(uri) > 1 and string.sub(uri, -1) == "/" then 86 | uri = string.sub(uri, 1, string.len(uri) - 1) 87 | end 88 | if method == 'POST' then 89 | ngx.req.set_header("Accept", "application/json") 90 | if uri == searchPath then 91 | if not body then 92 | body = "{}" 93 | end 94 | local bodyJson = cjson.decode(body) 95 | setUri(bodyJson) 96 | end 97 | elseif method == 'GET' then 98 | local collections = string.find(uri, collectionsPath) 99 | local args = ngx.req.get_uri_args() 100 | if collections then 101 | handleWFS(args, uri) 102 | else 103 | if uri == apiPath then 104 | ngx.req.set_header("Accept", "application/vnd.pgrst.object+json") 105 | ngx.req.set_uri(pg_root) 106 | elseif uri == itemsPath then 107 | ngx.req.set_header("Accept", "application/json") 108 | setUri(nil, args) 109 | -- This uses the root path for conformance to have a valid response 110 | elseif uri == conformancePath then 111 | ngx.req.set_uri(pg_root) 112 | elseif uri == stacPath then 113 | ngx.req.set_header("Accept", "application/vnd.pgrst.object+json") 114 | ngx.req.set_uri(stacPath) 115 | end 116 | end 117 | end 118 | end 119 | 120 | function handleWFS(args, uri) 121 | local uriComponents = ngx_re.split(uri, '/') 122 | local collections = uriComponents[2] 123 | local collectionId = uriComponents[3] 124 | local items = uriComponents[4] 125 | local itemId = uriComponents[5] 126 | 127 | if collectionId then 128 | if items and items ~= '' then 129 | if itemId and itemId ~= '' then 130 | -- Return object rather than array 131 | ngx.req.set_header("Accept", "application/vnd.pgrst.object+json") 132 | else 133 | ngx.req.set_header("Accept", "application/json") 134 | end 135 | setUri(nil, args, collectionId, itemId) 136 | else 137 | local idQuery = "eq." .. collectionId 138 | local defaultCollectionSelect = table.concat(defaultFields.collections, ",") 139 | local uriArgs = {} 140 | uriArgs["id"] = idQuery 141 | uriArgs["select"] = defaultCollectionSelect 142 | local headers = ngx.req.get_headers() 143 | -- Return object rather than array 144 | ngx.req.set_header("Accept", "application/vnd.pgrst.object+json") 145 | ngx.req.set_uri_args(uriArgs) 146 | ngx.req.set_uri(collectionsPath) 147 | end 148 | else 149 | -- Handle trailing slashes 150 | ngx.req.set_header("Accept", "application/vnd.pgrst.object+json") 151 | ngx.req.set_uri(pg_rootcollections) 152 | end 153 | end 154 | 155 | function wrapFeatureCollection(body) 156 | local features = cjson.decode(body) 157 | local itemCollection = { 158 | type="FeatureCollection", 159 | features=features 160 | } 161 | return cjson.encode(itemCollection) 162 | end 163 | 164 | function returnConformance() 165 | local conformanceTable = { conformsTo={ 166 | "http://www.opengis.net/spec/ogcapi-features-1/1.0/conf/core", 167 | "http://www.opengis.net/spec/ogcapi-features-1/1.0/conf/html", 168 | "http://www.opengis.net/spec/ogcapi-features-1/1.0/conf/geojson" 169 | }} 170 | return cjson.encode(conformanceTable) 171 | end 172 | 173 | -------------------------------------------------------------------------------- /openresty/lualib/user_code/search.lua: -------------------------------------------------------------------------------- 1 | module("search", package.seeall) 2 | require "extensions.fieldsExtension" 3 | require "extensions.queryExtension" 4 | require "extensions.sortExtension" 5 | require "datetimeBuilder" 6 | require "wfsBuilder" 7 | local defaultFields = require "defaultFields" 8 | local limit_constants = require "limit_constants" 9 | wrapSingleQuote = string_utils.wrapSingleQuote 10 | 11 | function processSearchQuery(query, datetime, collectionId, ids, collections) 12 | local andComponents = {} 13 | if query and not ids then 14 | andComponents[#andComponents + 1] = queryExtension.buildQueryString(query) 15 | end 16 | if datetime then 17 | andComponents[#andComponents + 1] = datetimeBuilder.buildDatetimeSQL(datetime) 18 | end 19 | if collectionId then 20 | andComponents[#andComponents + 1] = wfsBuilder.buildQuery(collectionId, "collection") 21 | end 22 | if ids then 23 | andComponents[#andComponents + 1] = wfsBuilder.buildInQuery(ids, "id") 24 | end 25 | if collections then 26 | andComponents[#andComponents + 1] = wfsBuilder.buildInQuery(collections, "collection") 27 | end 28 | local andQuery 29 | if #andComponents ~= 0 then 30 | andQuery = table.concat(andComponents, " AND ") 31 | end 32 | if andQuery then 33 | andQuery = " AND " .. andQuery 34 | end 35 | print(andQuery) 36 | return andQuery 37 | end 38 | 39 | function createSearch(fields, bbox, intersects, next, limit, andQuery, sort) 40 | local body = {} 41 | local searchArgs = {} 42 | local defaultSelect = table.concat(defaultFields.items, ",") 43 | if next and limit then 44 | body["next"] = next 45 | body["lim"] = limit 46 | else 47 | body["next"] = limit_constants.offset 48 | body["lim"] = limit_constants.limit 49 | end 50 | if fields then 51 | local selectFields, includeTable = fieldsExtension.buildFieldsObject(fields, query) 52 | body["include"] = includeTable 53 | searchArgs["select"] = selectFields 54 | else 55 | searchArgs["select"] = defaultSelect 56 | end 57 | if bbox then 58 | if type(bbox) == 'string' then 59 | modifiedBbox = "{" .. bbox .. "}" 60 | body["bbox"] = modifiedBbox 61 | else 62 | body["bbox"] = bbox 63 | end 64 | end 65 | if intersects then 66 | if type(intersects) == 'string' then 67 | print(intersects) 68 | local intersectsTable = cjson.decode(intersects) 69 | body["intersects"] = intersectsTable 70 | else 71 | body["intersects"] = intersects 72 | end 73 | end 74 | if andQuery then 75 | body["andquery"] = andQuery 76 | end 77 | body["sort"] = sort 78 | return body, searchArgs 79 | end 80 | 81 | function buildSearch(json, collectionId, ids, collections) 82 | local andQuery = processSearchQuery(json.query, json.datetime, collectionId, ids, collections) 83 | local sort = sortExtension.buildSortSQL(json.sort) 84 | local searchBody, searchArgs = createSearch(json.fields, json.bbox, json.intersects, json.next, json.limit, andQuery, sort) 85 | return searchBody, searchArgs 86 | end 87 | -------------------------------------------------------------------------------- /openresty/lualib/user_code/string_utils.lua: -------------------------------------------------------------------------------- 1 | module("string_utils", package.seeall) 2 | 3 | function wrapSingleQuote(value) 4 | return "'" .. value .. "'" 5 | end 6 | -------------------------------------------------------------------------------- /openresty/lualib/user_code/utils.lua: -------------------------------------------------------------------------------- 1 | -- response body postprocess mode 2 | local NONE = 0 3 | local CHUNKS = 1 4 | local ALL = 2 5 | 6 | local function set_body_postprocess_mode(mode) 7 | ngx.ctx.body_postprocess_mode = mode 8 | end 9 | 10 | local function get_body_postprocess_mode() 11 | return ngx.ctx.body_postprocess_mode 12 | end 13 | 14 | local function get_body_postprocess_fn() 15 | return ngx.ctx.body_postprocess_fn 16 | end 17 | 18 | local function set_body_postprocess_fn(fn) 19 | ngx.ctx.body_postprocess_fn = fn 20 | end 21 | 22 | local function buffer_response_body() 23 | local chunk, eof = ngx.arg[1], ngx.arg[2] 24 | local buffered = ngx.ctx.buffered_respose_body 25 | if not buffered then 26 | buffered = {} 27 | ngx.ctx.buffered_respose_body = buffered 28 | end 29 | if chunk ~= "" then 30 | buffered[#buffered + 1] = chunk 31 | ngx.arg[1] = nil 32 | end 33 | if eof then 34 | local response = table.concat(buffered) 35 | ngx.ctx.buffered_respose_body = nil 36 | --ngx.arg[1] = response 37 | ngx.arg[1] = nil 38 | return response 39 | end 40 | end 41 | 42 | return { 43 | postprocess_modes = { 44 | NONE = NONE, 45 | CHUNKS = CHUNKS, 46 | ALL = ALL 47 | }, 48 | set_body_postprocess_mode = set_body_postprocess_mode, 49 | get_body_postprocess_mode = get_body_postprocess_mode, 50 | buffer_response_body = buffer_response_body, 51 | get_body_postprocess_fn = get_body_postprocess_fn, 52 | set_body_postprocess_fn = set_body_postprocess_fn 53 | } -------------------------------------------------------------------------------- /openresty/lualib/user_code/wfsBuilder.lua: -------------------------------------------------------------------------------- 1 | module("wfsBuilder", package.seeall) 2 | require "string_utils" 3 | wrapSingleQuote = string_utils.wrapSingleQuote 4 | local pg_constants = require "pg_constants" 5 | 6 | function buildQuery(id, wfsType) 7 | local wfsQuery 8 | if id and wfsType then 9 | wfsQuery = pg_constants[wfsType] .. " = " .. wrapSingleQuote(id) 10 | end 11 | return wfsQuery 12 | end 13 | 14 | function buildInQuery(ids, wfsType) 15 | local wfsQuery 16 | if ids and wfsType then 17 | wrappedIds = {} 18 | for index, value in ipairs(ids) do 19 | table.insert(wrappedIds, wrapSingleQuote(value)) 20 | end 21 | wfsQuery = pg_constants[wfsType] .. " IN " .. "(" .. table.concat(wrappedIds, ",") .. ")" 22 | end 23 | return wfsQuery 24 | end 25 | -------------------------------------------------------------------------------- /openresty/nginx/conf/includes/globals/env_vars.conf: -------------------------------------------------------------------------------- 1 | # a list of env vars that nginx will make avaliable for configuration files and Lua code 2 | 3 | env POSTGREST_HOST; 4 | env POSTGREST_PORT; 5 | env JWT_SECRET; 6 | 7 | env DB_SCHEMA; 8 | env DB_HOST; 9 | env DB_PORT; 10 | env DB_NAME; 11 | env DB_USER; 12 | env DB_PASS; 13 | 14 | env DEVELOPMENT; 15 | -------------------------------------------------------------------------------- /openresty/nginx/conf/includes/http/init_lua.conf: -------------------------------------------------------------------------------- 1 | # set search paths for pure Lua external libraries (';;' is the default path): 2 | lua_package_path '${prefix}../lualib/user_code/?.lua;;'; 3 | init_by_lua_file '../lualib/user_code/init_phase.lua'; 4 | -------------------------------------------------------------------------------- /openresty/nginx/conf/includes/http/server/gzip.conf: -------------------------------------------------------------------------------- 1 | # enable gzip compression 2 | gzip on; 3 | gzip_disable "msie6"; 4 | gzip_vary on; 5 | gzip_proxied any; 6 | gzip_comp_level 6; 7 | gzip_buffers 16 8k; 8 | gzip_http_version 1.1; 9 | gzip_types text/plain text/css application/json application/vnd.pgrst.object+json application/x-javascript text/xml application/xml application/xml+rss text/javascript; 10 | -------------------------------------------------------------------------------- /openresty/nginx/conf/includes/http/server/locations.conf: -------------------------------------------------------------------------------- 1 | include includes/http/server/locations/*.conf; 2 | include includes/root_location.conf; -------------------------------------------------------------------------------- /openresty/nginx/conf/includes/http/server/locations/internal_rest.conf: -------------------------------------------------------------------------------- 1 | # main location that will handle rest requests requests 2 | location /internal/rest/ { 3 | internal; 4 | include includes/http/server/locations/internal_rest/*.conf; 5 | default_type application/json; 6 | set_by_lua_block $postgrest_host { return os.getenv('POSTGREST_HOST') or "0" } 7 | set_by_lua_block $postgrest_port { return os.getenv('POSTGREST_PORT') or "0" } 8 | 9 | proxy_set_header Accept-Encoding ""; #force postgrest not to gzip the output 10 | proxy_set_header Connection ""; #optimise communication with upstream (keep alive) 11 | proxy_http_version 1.1; 12 | rewrite /internal/rest(.+) $1 break; 13 | proxy_pass http://$postgrest_host:$postgrest_port; # Reverse proxy to your PostgREST 14 | 15 | # Rewrite the Content-Location header to match our location 16 | proxy_hide_header Content-Location; 17 | more_set_headers 'Content-Location: $rest_prefix$upstream_http_content_location'; 18 | 19 | # Debug Info 20 | if ($development = "1") { 21 | more_set_headers 'Request-Time: $request_time'; 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /openresty/nginx/conf/includes/http/server/locations/internal_rest/lua.conf: -------------------------------------------------------------------------------- 1 | set_by_lua_block $rest_prefix { return ngx.var.rest_prefix or "/rest" } 2 | rewrite_by_lua_file '../lualib/user_code/internal_rest_rewrite_phase.lua'; 3 | header_filter_by_lua_file '../lualib/user_code/internal_rest_header_filter_phase.lua'; 4 | body_filter_by_lua_file '../lualib/user_code/internal_rest_body_filter_phase.lua'; -------------------------------------------------------------------------------- /openresty/nginx/conf/includes/http/server/locations/internal_rest/security.conf: -------------------------------------------------------------------------------- 1 | # don't send the nginx version number in error pages and Server header 2 | server_tokens off; 3 | 4 | # config to don't allow the browser to render the page inside an frame or iframe 5 | # and avoid clickjacking http://en.wikipedia.org/wiki/Clickjacking 6 | # if you need to allow [i]frames, you can use SAMEORIGIN or even set an uri with ALLOW-FROM uri 7 | # https://developer.mozilla.org/en-US/docs/HTTP/X-Frame-Options 8 | more_set_headers "X-Frame-Options: SAMEORIGIN"; 9 | 10 | # when serving user-supplied content, include a X-Content-Type-Options: nosniff header along with the Content-Type: header, 11 | # to disable content-type sniffing on some browsers. 12 | # https://www.owasp.org/index.php/List_of_useful_HTTP_headers 13 | # currently suppoorted in IE > 8 http://blogs.msdn.com/b/ie/archive/2008/09/02/ie8-security-part-vi-beta-2-update.aspx 14 | # http://msdn.microsoft.com/en-us/library/ie/gg622941(v=vs.85).aspx 15 | # 'soon' on Firefox https://bugzilla.mozilla.org/show_bug.cgi?id=471020 16 | more_set_headers "X-Content-Type-Options: nosniff"; 17 | 18 | # This header enables the Cross-site scripting (XSS) filter built into most recent web browsers. 19 | # It's usually enabled by default anyway, so the role of this header is to re-enable the filter for 20 | # this particular website if it was disabled by the user. 21 | # https://www.owasp.org/index.php/List_of_useful_HTTP_headers 22 | more_set_headers "X-XSS-Protection: 1; mode=block"; 23 | 24 | # with Content Security Policy (CSP) enabled(and a browser that supports it(http://caniuse.com/#feat=contentsecuritypolicy), 25 | # you can tell the browser that it can only download content from the domains you explicitly allow 26 | # http://www.html5rocks.com/en/tutorials/security/content-security-policy/ 27 | # https://www.owasp.org/index.php/Content_Security_Policy 28 | # I need to change our application code so we can increase security by disabling 'unsafe-inline' 'unsafe-eval' 29 | # directives for css and js(if you have inline css or js, you will need to keep it too). 30 | # more: http://www.html5rocks.com/en/tutorials/security/content-security-policy/#inline-code-considered-harmful 31 | # more_set_headers "Content-Security-Policy: default-src 'self'; script-src 'self' 'unsafe-inline' 'unsafe-eval' https://ssl.google-analytics.com https://assets.zendesk.com https://connect.facebook.net; img-src 'self' https://ssl.google-analytics.com https://s-static.ak.facebook.com https://assets.zendesk.com; style-src 'self' 'unsafe-inline' https://fonts.googleapis.com https://assets.zendesk.com; font-src 'self' https://themes.googleusercontent.com; frame-src https://assets.zendesk.com https://www.facebook.com https://s-static.ak.facebook.com https://tautt.zendesk.com; object-src 'none'"; -------------------------------------------------------------------------------- /openresty/nginx/conf/includes/http/server/resolver.conf: -------------------------------------------------------------------------------- 1 | resolver 127.0.0.11 ipv6=off; 2 | -------------------------------------------------------------------------------- /openresty/nginx/conf/includes/root_location.conf: -------------------------------------------------------------------------------- 1 | # this is the last location in the chain that points to html directory 2 | # this is where the files of your frontend application would go (html/javascript/css) 3 | 4 | # location / { 5 | # root html; 6 | # index index.html index.htm; 7 | # } 8 | # 9 | location / { 10 | if ($request_method = 'OPTIONS') { 11 | add_header 'Access-Control-Allow-Origin' '*'; 12 | add_header 'Access-Control-Allow-Methods' 'GET, POST, PATCH, DELETE, OPTIONS'; 13 | add_header 'Access-Control-Allow-Headers' $http_access_control_request_headers; 14 | add_header 'Access-Control-Allow-Credentials' true; 15 | add_header 'Access-Control-Max-Age' 1728000; 16 | add_header 'Content-Type' 'text/plain charset=UTF-8'; 17 | add_header 'Content-Length' 0; 18 | return 204; 19 | } 20 | 21 | if ($request_method = 'POST') { 22 | set_by_lua_block $dummy { 23 | ngx.header['Access-Control-Allow-Methods'] = 'GET, POST, PATCH, DELETE, OPTIONS'; 24 | ngx.header['Access-Control-Allow-Credentials'] = 'true'; 25 | return true; 26 | } 27 | } 28 | 29 | if ($request_method = 'DELETE') { 30 | add_header 'Access-Control-Max-Age' 1728000; 31 | add_header 'Content-Type' 'text/plain charset=UTF-8'; 32 | add_header 'Content-Length' 0; 33 | return 405; 34 | } 35 | 36 | if ($request_method = 'PATCH') { 37 | add_header 'Access-Control-Max-Age' 1728000; 38 | add_header 'Content-Type' 'text/plain charset=UTF-8'; 39 | add_header 'Content-Length' 0; 40 | return 405; 41 | } 42 | 43 | include includes/http/server/locations/rest/*.conf; 44 | set $rest_prefix "/"; 45 | rewrite ^/(.*)$ /internal/rest/$1; 46 | 47 | } 48 | -------------------------------------------------------------------------------- /openresty/nginx/conf/nginx.conf: -------------------------------------------------------------------------------- 1 | include includes/globals/*.conf; 2 | worker_processes 1; 3 | events { 4 | worker_connections 1024; 5 | } 6 | 7 | http { 8 | include includes/http/*.conf; 9 | 10 | include mime.types; 11 | # a shorter log format for development 12 | log_format development '[$time_local] "$request" $status $body_bytes_sent "$request_time ms"'; 13 | 14 | server { 15 | listen 80 default_server; 16 | server_name _; 17 | charset utf-8; 18 | uninitialized_variable_warn off; 19 | 20 | #expose external env vars as internal nginx variables 21 | set_by_lua_block $development { return os.getenv('DEVELOPMENT') or "0" } 22 | 23 | #depending the env (production/development) switch between log formats 24 | #for production this section can be removed 25 | set $log_development 0; 26 | set $log_production 0; 27 | if ($development = "1") { 28 | set $log_development 1; 29 | } 30 | if ($development = "0") { 31 | set $log_production 1; 32 | } 33 | 34 | access_log logs/access.log combined if=$log_production; 35 | access_log logs/access.log development if=$log_development; 36 | 37 | 38 | include includes/http/server/*.conf; 39 | } 40 | 41 | } 42 | -------------------------------------------------------------------------------- /openresty/nginx/html/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | Welcome to PostgREST Starter Kit! 5 | 6 | 12 | 20 | 21 | 22 |

PostgREST Starter Kit!

23 | 24 |

Your API is up and running.

25 | 26 |

API Endpoints

27 |
    28 |
  • rest/
  • 29 |
30 | 31 | 32 |

Explore

33 | curl rest/todos?select=id,todo 34 | 35 |

Support and Documentation

36 | 43 | 44 | Developed by subZero 45 | 46 | 47 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "sat-api-pg", 3 | "version": "0.0.1", 4 | "dependencies": {}, 5 | "devDependencies": { 6 | "babel-core": "^6.24.0", 7 | "babel-preset-latest": "^6.24.1", 8 | "dotenv": "4.0.0", 9 | "eslint": "^6.4.0", 10 | "eslint-plugin-mocha": "^6.1.1", 11 | "jsonwebtoken": "^8.5.1", 12 | "mocha": "^6.0.2", 13 | "should": "^11.2.0", 14 | "standard": "^14.3.1", 15 | "subzero-cli": "^0.1.38", 16 | "supertest": "^3.0.0", 17 | "yaml-files": "^1.1.0" 18 | }, 19 | "scripts": { 20 | "test_rest": "mocha --no-timeouts --require babel-core/register ./tests/rest/", 21 | "test_db": "node tests/bin/test_db.js", 22 | "test": "npm run test_db && npm run test_rest", 23 | "generate-docs": "yaml-files docs/api.merge.yaml docs/api.yaml" 24 | }, 25 | "author": { 26 | "name": "Sean Harkins", 27 | "url": "https://github.com/sharkinsspatial", 28 | "email": "sean@developmentseed.org" 29 | }, 30 | "license": "MIT" 31 | } 32 | -------------------------------------------------------------------------------- /tests/bin/test_db.js: -------------------------------------------------------------------------------- 1 | const spawn = require('child_process').spawn; 2 | require('dotenv').config(); 3 | const PG_MAJOR_VERSION = process.env.PG_VERSION.replace(/\..*/,''); 4 | spawn('docker', [ 5 | 'run', 6 | '-i', 7 | '-t', 8 | '--rm', 9 | '--name', 'pgtap', 10 | '--net', `${process.env.COMPOSE_PROJECT_NAME}_default`, 11 | '--link', `${process.env.COMPOSE_PROJECT_NAME}_db_1:db`, 12 | '-v', `${process.cwd()}/tests/db/:/test`, 13 | '-e', `HOST=${process.env.DB_HOST}`, 14 | '-e', `DATABASE=${process.env.DB_NAME}`, 15 | '-e', `USER=${process.env.SUPER_USER}`, 16 | '-e', `PASSWORD=${process.env.SUPER_USER_PASSWORD}`, `subzerocloud/pgtap:pg${PG_MAJOR_VERSION}`, 17 | ], 18 | { stdio: 'inherit' }); -------------------------------------------------------------------------------- /tests/db/README.md: -------------------------------------------------------------------------------- 1 | To run the tests in this directory do the following 2 | - change to the root of the project folder 3 | - bring the system up using docker-compose 4 | - run the command below 5 | 6 | ```shell 7 | ( \ 8 | source .env && \ 9 | docker run -i -t --rm --name pgtap \ 10 | --network ${COMPOSE_PROJECT_NAME}_default \ 11 | --link ${COMPOSE_PROJECT_NAME}_db_1:db \ 12 | -v $(pwd)/tests/db/:/test \ 13 | -e HOST=$DB_HOST \ 14 | -e DATABASE=$DB_NAME \ 15 | -e USER=$SUPER_USER \ 16 | -e PASSWORD=$SUPER_USER_PASSWORD \ 17 | lren/pgtap:0.96.0-2 \ 18 | ) 19 | ``` 20 | -------------------------------------------------------------------------------- /tests/db/simple.sql: -------------------------------------------------------------------------------- 1 | begin; 2 | select * from plan(3); 3 | 4 | select has_schema('information_schema'); 5 | 6 | select has_view('information_schema', 'routines', 'has routines information_schema.routines view'); 7 | 8 | select has_column('information_schema', 'routines', 'specific_name', 'has information_schema.routines.specific_name column'); 9 | 10 | select * from finish(); 11 | rollback; 12 | -------------------------------------------------------------------------------- /tests/db/structure.sql: -------------------------------------------------------------------------------- 1 | begin; 2 | select * from plan(2); 3 | 4 | select views_are( 5 | 'api', 6 | array['collectionitems', 'items', 'collections', 'root'], 7 | 'views present' 8 | ); 9 | 10 | select functions_are( 11 | 'api', 12 | array['login', 'signup', 'refresh_token', 'me', 'search', 'searchnogeom'], 'functions present' 13 | ); 14 | 15 | select * from finish(); 16 | rollback; 17 | -------------------------------------------------------------------------------- /tests/rest/.eslintrc: -------------------------------------------------------------------------------- 1 | { 2 | "plugins": [ 3 | "mocha" 4 | ], 5 | "parserOptions": { 6 | "ecmaVersion": 2017, 7 | "sourceType": "module" 8 | }, 9 | "env": { 10 | "es6": true 11 | }, 12 | "extends": "standard", 13 | "rules": { 14 | "semi": [2, "always"] 15 | }, 16 | "globals": { 17 | "describe": "readonly", 18 | "it": "readonly", 19 | "before": "readonly", 20 | "after": "readonly", 21 | "beforeEach": "readonly", 22 | "afterEach": "readonly" 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /tests/rest/bbox.js: -------------------------------------------------------------------------------- 1 | import { restService, resetdb } from './common'; 2 | import should from 'should'; // eslint-disable-line no-unused-vars 3 | import { itemsPath, searchPath, wfsItemsPath } from './constants'; 4 | 5 | describe('bbox filter', function () { 6 | before(function (done) { resetdb(); done(); }); 7 | after(function (done) { resetdb(); done(); }); 8 | 9 | it('Handles bbox filter for search endpoint with POST', function (done) { 10 | restService() 11 | .post(searchPath) 12 | .send({ 13 | bbox: [-114.18578, 30.64594, -111.68488, 32.81955] 14 | }) 15 | .expect('Content-Type', /json/) 16 | .expect(200, done) 17 | .expect(r => { 18 | r.body.features.length.should.equal(1); 19 | }); 20 | }); 21 | 22 | it('Global bbox selects all the items', function (done) { 23 | restService() 24 | .post(searchPath) 25 | .send({ 26 | bbox: [-180, -90, 180, 90] 27 | }) 28 | .expect('Content-Type', /json/) 29 | .expect(200, done) 30 | .expect(r => { 31 | r.body.features.length.should.be.above(1); 32 | }); 33 | }); 34 | 35 | it('Handles bbox filter for items endpoint with GET and query parameter', 36 | function (done) { 37 | restService() 38 | .get(itemsPath) 39 | .query({ 40 | bbox: '-180,-90,180,90' 41 | }) 42 | .expect('Content-Type', /json/) 43 | .expect(200, done) 44 | .expect(r => { 45 | r.body.features.length.should.be.above(1); 46 | }); 47 | }); 48 | 49 | it('Handles bbox filter for wfs items endpoint with GET and query parameter', 50 | function (done) { 51 | restService() 52 | .get(wfsItemsPath) 53 | .query({ 54 | bbox: '-180,-90,180,90' 55 | }) 56 | .expect('Content-Type', /json/) 57 | .expect(200, done) 58 | .expect(r => { 59 | r.body.features.length.should.be.above(1); 60 | }); 61 | }); 62 | }); 63 | -------------------------------------------------------------------------------- /tests/rest/collections.js: -------------------------------------------------------------------------------- 1 | import should from 'should'; // eslint-disable-line no-unused-vars 2 | import { restService, resetdb } from './common'; 3 | import { collectionsPath } from './constants'; 4 | import landsat8l2Collection from './landsat8l2Collection.json'; 5 | const proxy = process.env.SERVER_PROXY_URI; 6 | 7 | describe('collections', function () { 8 | beforeEach(function (done) { resetdb(); done(); }); 9 | afterEach(function (done) { resetdb(); done(); }); 10 | 11 | it('Initial insert of a collection returns 201', function (done) { 12 | restService() 13 | .post(collectionsPath) 14 | .set('Prefer', 'return=minimal') 15 | .set('Content-Type', 'application/json') 16 | .withRole('application') 17 | .send(landsat8l2Collection) 18 | .expect(201, done); 19 | }); 20 | 21 | it('Insert a collection without a valid JWT or role returns 401', function (done) { 22 | restService() 23 | .post(collectionsPath) 24 | .set('Prefer', 'return=minimal') 25 | .set('Content-Type', 'application/json') 26 | .send(landsat8l2Collection) 27 | .expect(401, done); 28 | }); 29 | 30 | it('Inserting a collection with a duplicate id returns 409', function (done) { 31 | restService() 32 | .post(collectionsPath) 33 | .set('Prefer', 'return=minimal') 34 | .set('Content-Type', 'application/json') 35 | .withRole('application') 36 | .send(landsat8l2Collection) 37 | .end(() => { 38 | restService() 39 | .post(collectionsPath) 40 | .set('Prefer', 'return=minimal') 41 | .set('Content-Type', 'application/json') 42 | .withRole('application') 43 | .send(landsat8l2Collection) 44 | .expect(409, done); 45 | }); 46 | }); 47 | 48 | it('Adds self and root links based on apiUrl value', function (done) { 49 | restService() 50 | .get(collectionsPath) 51 | .expect('Content-Type', /json/) 52 | .expect(200, done) 53 | .expect(r => { 54 | r.body.collections[0].links.length.should.equal(2); 55 | r.body.collections[0].links.should.containDeep([{ 56 | href: `${proxy}collections/landsat-8-l1`, 57 | rel: 'self', 58 | type: 'application/json', 59 | title: null 60 | }, 61 | { 62 | href: `${proxy}collections/landsat-8-l1`, 63 | rel: 'root', 64 | type: 'application/json', 65 | title: null 66 | }]); 67 | }); 68 | }); 69 | 70 | it('Merges derived_from link if included in inserted collection', function (done) { 71 | restService() 72 | .post(collectionsPath) 73 | .set('Prefer', 'return=minimal') 74 | .set('Content-Type', 'application/json') 75 | .withRole('application') 76 | .send(landsat8l2Collection) 77 | .end(() => { 78 | restService() 79 | .get(collectionsPath) 80 | .expect('Content-Type', /json/) 81 | .expect(200, done) 82 | .expect(r => { 83 | r.body.collections[1].links.length.should.equal(4); 84 | r.body.collections[1].links.should.containDeep([{ 85 | href: `${proxy}collections/landsat-8-l2`, 86 | rel: 'self', 87 | type: 'application/json', 88 | title: null 89 | }, 90 | { 91 | href: `${proxy}collections/landsat-8-l2`, 92 | rel: 'root', 93 | type: 'application/json', 94 | title: null 95 | }, 96 | { 97 | href: 'derived', 98 | rel: 'derived_from', 99 | type: null, 100 | title: null 101 | }]); 102 | }); 103 | }); 104 | }); 105 | }); 106 | -------------------------------------------------------------------------------- /tests/rest/collections_filter.js: -------------------------------------------------------------------------------- 1 | import { restService, resetdb } from './common'; 2 | import should from 'should'; // eslint-disable-line no-unused-vars 3 | import { searchPath, itemsPath, wfsItemsPath } from './constants'; 4 | 5 | describe('collections filter', function () { 6 | before(function (done) { resetdb(); done(); }); 7 | after(function (done) { resetdb(); done(); }); 8 | 9 | it('Collections filter for search endpoint', function (done) { 10 | restService() 11 | .post(searchPath) 12 | .send({ 13 | collections: ['landsat-8-l1'] 14 | }) 15 | .expect('Content-Type', /json/) 16 | .expect(200, done) 17 | .expect(r => { 18 | r.body.features.length.should.be.above(1); 19 | }); 20 | }); 21 | 22 | it('Collections filter as query parameter for items GET', function (done) { 23 | restService() 24 | .get(itemsPath) 25 | .query({ 26 | collections: 'landsat-8-l1' 27 | }) 28 | .expect('Content-Type', /json/) 29 | .expect(200, done) 30 | .expect(r => { 31 | r.body.features.length.should.be.above(1); 32 | }); 33 | }); 34 | 35 | it('Collections can be passed as query parameter in GET for wfs items', function (done) { 36 | restService() 37 | .get(wfsItemsPath) 38 | .query({ 39 | collections: 'landsat-8-l1' 40 | }) 41 | .expect('Content-Type', /json/) 42 | .expect(200, done) 43 | .expect(r => { 44 | r.body.features.length.should.be.above(1); 45 | }); 46 | }); 47 | 48 | it('Collections filter works correctly with filters using stored procedure', function (done) { 49 | restService() 50 | .post(searchPath) 51 | .send({ 52 | collections: ['nocollection'], 53 | bbox: [-180, -90, 180, 90] 54 | }) 55 | .expect('Content-Type', /json/) 56 | .expect(200, done) 57 | .expect(r => { 58 | r.body.features.length.should.equal(0); 59 | }); 60 | }); 61 | }); 62 | -------------------------------------------------------------------------------- /tests/rest/common.js: -------------------------------------------------------------------------------- 1 | import jsonwebtoken from 'jsonwebtoken'; 2 | import request from 'supertest'; 3 | import { config } from 'dotenv'; 4 | import { spawnSync } from 'child_process'; 5 | // var execSync = require('child_process').execSync; 6 | 7 | // .env file vars added to process.env 8 | config(); 9 | 10 | const COMPOSE_PROJECT_NAME = process.env.COMPOSE_PROJECT_NAME; 11 | // const POSTGRES_USER = process.env.POSTGRES_USER; 12 | // const POSTGRES_PASSWORD = process.env.POSTGRES_PASSWORD; 13 | const SUPER_USER = process.env.SUPER_USER; 14 | const SUPER_USER_PASSWORD = process.env.SUPER_USER_PASSWORD; 15 | 16 | // const DB_HOST = process.env.DB_HOST; 17 | const DB_NAME = process.env.DB_NAME; 18 | const PG = `${COMPOSE_PROJECT_NAME}_db_1`; 19 | 20 | const psqlVersion = spawnSync('psql', ['--version']); 21 | const havePsql = (psqlVersion.stdout && psqlVersion.stdout.toString('utf8').trim().length > 0); 22 | 23 | export function restService () { 24 | return request(process.env.SERVER_PROXY_URI); 25 | } 26 | 27 | export function resetdb () { 28 | let pg; 29 | if (havePsql) { 30 | var env = Object.create(process.env); 31 | env.PGPASSWORD = SUPER_USER_PASSWORD; 32 | pg = spawnSync('psql', ['-h', 'localhost', '-U', SUPER_USER, DB_NAME, '-f', process.env.PWD + '/db/src/sample_data/reset.sql'], { env: env }); 33 | } else { 34 | pg = spawnSync('docker', ['exec', PG, 'psql', '-U', SUPER_USER, DB_NAME, '-f', 'docker-entrypoint-initdb.d/sample_data/reset.sql']); 35 | } 36 | if (pg.status !== 0) { 37 | throw new Error(`Could not reset database in rest tests. Error = ${pg.stderr.toString()}`); 38 | } 39 | } 40 | 41 | request.Test.prototype.withRole = function (role) { 42 | if (typeof role !== 'string') { 43 | throw new TypeError('The role must be given as a string'); 44 | } 45 | 46 | const payload = { 47 | user_id: 1, 48 | role, 49 | // Pretend that the JWT was issued 30 seconds ago in the past 50 | iat: Math.floor(Date.now() / 1000) - 30 51 | }; 52 | 53 | const jwt = jsonwebtoken.sign(payload, process.env.JWT_SECRET); 54 | 55 | return this.set('Authorization', `Bearer ${jwt}`); 56 | }; 57 | -------------------------------------------------------------------------------- /tests/rest/constants.js: -------------------------------------------------------------------------------- 1 | export const searchPath = 'stac/search'; 2 | export const itemsPath = 'items'; 3 | export const collectionsPath = 'collections'; 4 | export const wfsItemsPath = 'collections/landsat-8-l1/items'; 5 | -------------------------------------------------------------------------------- /tests/rest/datetime.js: -------------------------------------------------------------------------------- 1 | import { restService, resetdb } from './common'; 2 | import should from 'should'; // eslint-disable-line no-unused-vars 3 | import { searchPath, itemsPath, wfsItemsPath } from './constants'; 4 | 5 | describe('datetime filter', function () { 6 | before(function (done) { resetdb(); done(); }); 7 | after(function (done) { resetdb(); done(); }); 8 | 9 | it('Handles date range queries', function (done) { 10 | restService() 11 | .post(searchPath) 12 | .send({ 13 | datetime: '2019-04-01T12:00/2019-08-21T14:02' 14 | }) 15 | .expect('Content-Type', /json/) 16 | .expect(200, done) 17 | .expect(r => { 18 | r.body.features.length.should.equal(1); 19 | }); 20 | }); 21 | 22 | it('Handles dates not in range', function (done) { 23 | restService() 24 | .post(searchPath) 25 | .send({ 26 | datetime: '2018-04-01T12:00/2018-08-21T14:02' 27 | }) 28 | .expect('Content-Type', /json/) 29 | .expect(200, done) 30 | .expect(r => { 31 | r.body.features.length.should.equal(0); 32 | }); 33 | }); 34 | it('Datetime can be passed as query parameter in GET', function (done) { 35 | restService() 36 | .get(itemsPath) 37 | .query({ 38 | datetime: '2019-04-01T12:00/2019-08-21T14:02' 39 | }) 40 | .expect('Content-Type', /json/) 41 | .expect(200, done) 42 | .expect(r => { 43 | r.body.features.length.should.equal(1); 44 | }); 45 | }); 46 | 47 | it('Datetime can be passed as query parameter in GET for wfs items', function (done) { 48 | restService() 49 | .get(wfsItemsPath) 50 | .query({ 51 | datetime: '2019-04-01T12:00/2019-08-21T14:02' 52 | }) 53 | .expect('Content-Type', /json/) 54 | .expect(200, done) 55 | .expect(r => { 56 | r.body.features.length.should.equal(1); 57 | }); 58 | }); 59 | }); 60 | -------------------------------------------------------------------------------- /tests/rest/fields.js: -------------------------------------------------------------------------------- 1 | import should from 'should'; // eslint-disable-line no-unused-vars 2 | import { restService, resetdb } from './common'; 3 | import { searchPath } from './constants'; 4 | 5 | describe('fields extension', function () { 6 | before(function (done) { resetdb(); done(); }); 7 | after(function (done) { resetdb(); done(); }); 8 | 9 | it('includes default fields when include and exclude are null', function (done) { 10 | restService() 11 | .post(searchPath) 12 | .send({ 13 | fields: {} 14 | }) 15 | .expect('Content-Type', /json/) 16 | .expect(200, done) 17 | .expect(r => { 18 | r.body.features[0].should.have.property('id'); 19 | r.body.features[0].should.have.property('type'); 20 | r.body.features[0].should.have.property('geometry'); 21 | r.body.features[0].should.have.property('properties'); 22 | r.body.features[0].should.have.property('assets'); 23 | r.body.features[0].properties.should.have.property('datetime'); 24 | }); 25 | }); 26 | 27 | it('includes default fields when include and exclude are empty', function (done) { 28 | restService() 29 | .post(searchPath) 30 | .send({ 31 | fields: { 32 | include: [], 33 | exclude: [] 34 | } 35 | }) 36 | .expect('Content-Type', /json/) 37 | .expect(200, done) 38 | .expect(r => { 39 | r.body.features[0].should.have.property('id'); 40 | r.body.features[0].should.have.property('type'); 41 | r.body.features[0].should.have.property('geometry'); 42 | r.body.features[0].should.have.property('properties'); 43 | r.body.features[0].should.have.property('assets'); 44 | r.body.features[0].properties.should.have.property('datetime'); 45 | }); 46 | }); 47 | 48 | it('if only include is specified, properties are added to the defaults', 49 | function (done) { 50 | restService() 51 | .post(searchPath) 52 | .send({ 53 | fields: { 54 | include: [ 55 | 'properties.landsat:row', 56 | 'properties.eo:cloud_cover' 57 | ] 58 | } 59 | }) 60 | .expect('Content-Type', /json/) 61 | .expect(200, done) 62 | .expect(r => { 63 | r.body.features[0].properties.should.have.property('datetime'); 64 | r.body.features[0].properties.should.have.property('landsat:row'); 65 | r.body.features[0].properties.should.have.property('eo:cloud_cover'); 66 | }); 67 | }); 68 | 69 | it('if only exclude is specified excluded fields are subtracted from' + 70 | ' the defaults. May result in an invalid item', 71 | function (done) { 72 | restService() 73 | .post(searchPath) 74 | .send({ 75 | fields: { 76 | exclude: [ 77 | 'assets' 78 | ] 79 | } 80 | }) 81 | .expect('Content-Type', /json/) 82 | .expect(200, done) 83 | .expect(r => { 84 | r.body.features[0].should.not.have.property('assets'); 85 | r.body.features[0].properties.should.have.property('datetime'); 86 | }); 87 | }); 88 | 89 | it('if the same field is specified in include and exclude, the include wins', 90 | function (done) { 91 | restService() 92 | .post(searchPath) 93 | .send({ 94 | fields: { 95 | include: [ 96 | 'properties.landsat:row' 97 | ], 98 | exclude: [ 99 | 'properties.landsat:row' 100 | ] 101 | } 102 | }) 103 | .expect('Content-Type', /json/) 104 | .expect(200, done) 105 | .expect(r => { 106 | r.body.features[0].properties.should.have.property('landsat:row'); 107 | }); 108 | }); 109 | }); 110 | -------------------------------------------------------------------------------- /tests/rest/ids_filter.js: -------------------------------------------------------------------------------- 1 | import { restService, resetdb } from './common'; 2 | import should from 'should'; // eslint-disable-line no-unused-vars 3 | import { searchPath, itemsPath, wfsItemsPath } from './constants'; 4 | 5 | describe('ids filter', function () { 6 | before(function (done) { resetdb(); done(); }); 7 | after(function (done) { resetdb(); done(); }); 8 | 9 | it('Ids filter for search endpoint', function (done) { 10 | restService() 11 | .post(searchPath) 12 | .send({ 13 | ids: ['LC80370382019170', 'LC81392162019261'] 14 | }) 15 | .expect('Content-Type', /json/) 16 | .expect(200, done) 17 | .expect(r => { 18 | r.body.features.length.should.equal(2); 19 | }); 20 | }); 21 | 22 | it('Ids filter as query parameter for items GET', function (done) { 23 | restService() 24 | .get(itemsPath) 25 | .query({ 26 | ids: 'LC80370382019170,LC81392162019261' 27 | }) 28 | .expect('Content-Type', /json/) 29 | .expect(200, done) 30 | .expect(r => { 31 | r.body.features.length.should.equal(2); 32 | }); 33 | }); 34 | 35 | it('Ids can be passed as query parameter in GET for wfs items', function (done) { 36 | restService() 37 | .get(wfsItemsPath) 38 | .query({ 39 | ids: 'LC80370382019170,LC81392162019261' 40 | }) 41 | .expect('Content-Type', /json/) 42 | .expect(200, done) 43 | .expect(r => { 44 | r.body.features.length.should.equal(2); 45 | }); 46 | }); 47 | 48 | it('Ids filter works with the fields filter', function (done) { 49 | restService() 50 | .post(searchPath) 51 | .send({ 52 | ids: ['LC80370382019170', 'LC81392162019261'], 53 | fields: { 54 | exclude: ['assets'] 55 | } 56 | }) 57 | .expect('Content-Type', /json/) 58 | .expect(200, done) 59 | .expect(r => { 60 | r.body.features.length.should.equal(2); 61 | should.not.exist(r.body.features[0].assets); 62 | }); 63 | }); 64 | 65 | it('Ids filter ignores further query parameter', function (done) { 66 | restService() 67 | .post(searchPath) 68 | .send({ 69 | ids: ['LC80370382019170', 'LC81392162019261'], 70 | fields: { 71 | exclude: ['assets'] 72 | }, 73 | query: { 74 | 'eo:cloud_cover': { 75 | gt: 10 76 | } 77 | } 78 | }) 79 | .expect('Content-Type', /json/) 80 | .expect(200, done) 81 | .expect(r => { 82 | r.body.features.length.should.equal(2); 83 | should.not.exist(r.body.features[0].assets); 84 | }); 85 | }); 86 | }); 87 | -------------------------------------------------------------------------------- /tests/rest/intersects.js: -------------------------------------------------------------------------------- 1 | import { restService, resetdb } from './common'; 2 | import should from 'should'; // eslint-disable-line no-unused-vars 3 | import { searchPath, itemsPath } from './constants'; 4 | import intersectPolygon from './intersects.json'; 5 | import intersectsPoint from './intersectsPoint.json'; 6 | 7 | describe('intersects filter', function () { 8 | before(function (done) { resetdb(); done(); }); 9 | after(function (done) { resetdb(); done(); }); 10 | 11 | it('Handles intersects filter for search endpoint with POST', function (done) { 12 | restService() 13 | .post(searchPath) 14 | .send({ 15 | intersects: intersectPolygon 16 | }) 17 | .expect('Content-Type', /json/) 18 | .expect(200, done) 19 | .expect(r => { 20 | r.body.features.length.should.equal(1); 21 | }); 22 | }); 23 | 24 | it('Handles intersects filter for items endpoint with GET and query parameter', 25 | function (done) { 26 | restService() 27 | .get(itemsPath) 28 | .query({ 29 | intersects: JSON.stringify(intersectPolygon) 30 | }) 31 | .expect('Content-Type', /json/) 32 | .expect(200, done) 33 | .expect(r => { 34 | r.body.features.length.should.equal(1); 35 | }); 36 | }); 37 | 38 | it('Handles intersects with point geometry', function (done) { 39 | restService() 40 | .post(searchPath) 41 | .send({ 42 | intersects: intersectsPoint 43 | }) 44 | .expect('Content-Type', /json/) 45 | .expect(200, done) 46 | .expect(r => { 47 | r.body.features.length.should.equal(1); 48 | }); 49 | }); 50 | }); 51 | -------------------------------------------------------------------------------- /tests/rest/intersects.json: -------------------------------------------------------------------------------- 1 | { 2 | "type": "Polygon", 3 | "coordinates": [ 4 | [ 5 | [ 6 | -113.851318359375, 7 | 31.89621446335144 8 | ], 9 | [ 10 | -113.08227539062499, 11 | 31.372399104880525 12 | ], 13 | [ 14 | -112.664794921875, 15 | 31.512995857454676 16 | ], 17 | [ 18 | -112.379150390625, 19 | 32.222095840502334 20 | ], 21 | [ 22 | -113.258056640625, 23 | 32.18491105051798 24 | ], 25 | [ 26 | -113.851318359375, 27 | 31.89621446335144 28 | ] 29 | ] 30 | ] 31 | } 32 | -------------------------------------------------------------------------------- /tests/rest/intersectsPoint.json: -------------------------------------------------------------------------------- 1 | { 2 | "type": "Point", 3 | "coordinates": [ 4 | -105.5621337890625, 5 | 30.424992973925598 6 | ] 7 | } 8 | -------------------------------------------------------------------------------- /tests/rest/items.js: -------------------------------------------------------------------------------- 1 | import { restService, resetdb } from './common'; 2 | import should from 'should'; // eslint-disable-line no-unused-vars 3 | import landsatItem from './landsatItem.json'; 4 | import landsatItems from './landsatItems.json'; 5 | import { itemsPath } from './constants'; 6 | 7 | const proxy = process.env.SERVER_PROXY_URI; 8 | describe('items', function () { 9 | beforeEach(function (done) { resetdb(); done(); }); 10 | afterEach(function (done) { resetdb(); done(); }); 11 | it('Initial insert of an item returns 201', function (done) { 12 | restService() 13 | .post(itemsPath) 14 | .set('Prefer', 'return=minimal') 15 | .set('Content-Type', 'application/json') 16 | .withRole('application') 17 | .send(landsatItem) 18 | .expect(201, done); 19 | }); 20 | 21 | it('Insert an item without a valid JWT or role returns 401', function (done) { 22 | restService() 23 | .post(itemsPath) 24 | .set('Prefer', 'return=minimal') 25 | .set('Content-Type', 'application/json') 26 | .send(landsatItem) 27 | .expect(401, done); 28 | }); 29 | 30 | it('Inserting an item with a duplicate id returns 409', function (done) { 31 | restService() 32 | .post(itemsPath) 33 | .set('Prefer', 'return=minimal') 34 | .set('Content-Type', 'application/json') 35 | .withRole('application') 36 | .send(landsatItem) 37 | .end(() => { 38 | restService() 39 | .post(itemsPath) 40 | .set('Prefer', 'return=minimal') 41 | .set('Content-Type', 'application/json') 42 | .withRole('application') 43 | .send(landsatItem) 44 | .expect(409, done); 45 | }); 46 | }); 47 | 48 | it('Adds self and parent links based on apiUrl value', function (done) { 49 | restService() 50 | .get(itemsPath) 51 | .expect('Content-Type', /json/) 52 | .expect(200, done) 53 | .expect(r => { 54 | r.body.features[0].links.length.should.equal(2); 55 | r.body.features[0].links.should.containDeep([{ 56 | rel: 'self', 57 | href: `${proxy}collections/landsat-8-l1/LC80320392019263`, 58 | type: 'application/geo+json', 59 | title: null 60 | }, 61 | { 62 | rel: 'parent', 63 | href: `${proxy}collections/landsat-8-l1`, 64 | type: 'application/json', 65 | title: null 66 | }]); 67 | }); 68 | }); 69 | 70 | it('Merges derived_from link if included in inserted item', function (done) { 71 | restService() 72 | .post(itemsPath) 73 | .set('Prefer', 'return=minimal') 74 | .set('Content-Type', 'application/json') 75 | .withRole('application') 76 | .send(landsatItem) 77 | .end(() => { 78 | restService() 79 | .get(itemsPath) 80 | .expect('Content-Type', /json/) 81 | .expect(200, done) 82 | .expect(r => { 83 | r.body.features[2].links.length.should.equal(4); 84 | r.body.features[2].links.should.containDeep([ 85 | { 86 | rel: 'self', 87 | href: `${proxy}collections/landsat-8-l1/LC81152062019205`, 88 | type: 'application/geo+json', 89 | title: null 90 | }, 91 | { 92 | rel: 'parent', 93 | href: `${proxy}collections/landsat-8-l1`, 94 | type: 'application/json', 95 | title: null 96 | }, 97 | { 98 | rel: 'derived_from', 99 | href: 'derived', 100 | title: null, 101 | type: null 102 | }]); 103 | }); 104 | }); 105 | }); 106 | 107 | it('Insert an array of items', function (done) { 108 | restService() 109 | .post(itemsPath) 110 | .set('Prefer', 'return=minimal') 111 | .set('Content-Type', 'application/json') 112 | .withRole('application') 113 | .send(landsatItems) 114 | .end(() => { 115 | restService() 116 | .get(itemsPath) 117 | .set('Content-Type', 'application/json') 118 | .expect(200, done) 119 | .expect(r => { 120 | r.body.features.length.should.equal(5); 121 | }); 122 | }); 123 | }); 124 | 125 | it('Delete is restricted for items', function (done) { 126 | restService() 127 | .delete(itemsPath) 128 | .set('Prefer', 'return=minimal') 129 | .set('Content-Type', 'application/json') 130 | .withRole('application') 131 | .expect(405, done); 132 | }); 133 | 134 | it('Patch is restricted for items', function (done) { 135 | restService() 136 | .patch(itemsPath) 137 | .set('Prefer', 'return=minimal') 138 | .set('Content-Type', 'application/json') 139 | .withRole('application') 140 | .expect(405, done); 141 | }); 142 | }); 143 | -------------------------------------------------------------------------------- /tests/rest/landsat8l2Collection.json: -------------------------------------------------------------------------------- 1 | { 2 | "id": "landsat-8-l2", 3 | "description": "Landat 8 imagery radiometrically calibrated and orthorectified using gound points and Digital Elevation Model (DEM) data to correct relief displacement.", 4 | "license": "MIT", 5 | "extent": { 6 | "bbox": [-180.0, -90.0, 180.0, 90.0], 7 | "interval": [["2009-01-01T00:00:00Z", null]] 8 | }, 9 | "links": [{ 10 | "rel": "derived_from", 11 | "href": "derived", 12 | "type": null, 13 | "title": null 14 | }, 15 | { 16 | "rel": "derived_from", 17 | "href": "derived", 18 | "type": null, 19 | "title": null 20 | } 21 | ], 22 | "properties": { 23 | "eo:gsd": 15, 24 | "eo:platform": "landsat-8", 25 | "eo:instrument": "OLI_TIRS", 26 | "eo:off_nadir": 0, 27 | "eo:bands": [ 28 | { 29 | "name": "B1", 30 | "common_name": "coastal", 31 | "gsd": 30, 32 | "center_wavelength": 0.44, 33 | "full_width_half_max": 0.02 34 | }, 35 | { 36 | "name": "B2", 37 | "common_name": "blue", 38 | "gsd": 30, 39 | "center_wavelength": 0.48, 40 | "full_width_half_max": 0.06 41 | }, 42 | { 43 | "name": "B3", 44 | "common_name": "green", 45 | "gsd": 30, 46 | "center_wavelength": 0.56, 47 | "full_width_half_max": 0.06 48 | }, 49 | { 50 | "name": "B4", 51 | "common_name": "red", 52 | "gsd": 30, 53 | "center_wavelength": 0.65, 54 | "full_width_half_max": 0.04 55 | }, 56 | { 57 | "name": "B5", 58 | "common_name": "nir", 59 | "gsd": 30, 60 | "center_wavelength": 0.86, 61 | "full_width_half_max": 0.03 62 | }, 63 | { 64 | "name": "B6", 65 | "common_name": "swir16", 66 | "gsd": 30, 67 | "center_wavelength": 1.6, 68 | "full_width_half_max": 0.08 69 | }, 70 | { 71 | "name": "B7", 72 | "common_name": "swir22", 73 | "gsd": 30, 74 | "center_wavelength": 2.2, 75 | "full_width_half_max": 0.2 76 | }, 77 | { 78 | "name": "B8", 79 | "common_name": "pan", 80 | "gsd": 15, 81 | "center_wavelength": 0.59, 82 | "full_width_half_max": 0.18 83 | }, 84 | { 85 | "name": "B9", 86 | "common_name": "cirrus", 87 | "gsd": 30, 88 | "center_wavelength": 1.37, 89 | "full_width_half_max": 0.02 90 | }, 91 | { 92 | "name": "B10", 93 | "common_name": "lwir11", 94 | "gsd": 100, 95 | "center_wavelength": 10.9, 96 | "full_width_half_max": 0.8 97 | }, 98 | { 99 | "name": "B11", 100 | "common_name": "lwir12", 101 | "gsd": 100, 102 | "center_wavelength": 12, 103 | "full_width_half_max": 1 104 | } 105 | ] 106 | } 107 | } 108 | -------------------------------------------------------------------------------- /tests/rest/landsatItem.json: -------------------------------------------------------------------------------- 1 | { 2 | "stac_version": "0", 3 | "links": [{ 4 | "rel": "derived_from", 5 | "href": "derived", 6 | "type": null, 7 | "title": null 8 | }, 9 | { 10 | "rel": "derived_from", 11 | "href": "derived", 12 | "type": null, 13 | "title": null 14 | }], 15 | "geometry": { 16 | "coordinates": [ 17 | [ 18 | [ 19 | -79.15036257344, 20 | 31.073356917267 21 | ], 22 | [ 23 | -81.131111855504, 24 | 30.674004943359 25 | ], 26 | [ 27 | -81.613077436032, 28 | 32.409029367536 29 | ], 30 | [ 31 | -79.629383540244, 32 | 32.801841359618 33 | ], 34 | [ 35 | -79.15036257344, 36 | 31.073356917267 37 | ] 38 | ] 39 | ], 40 | "type": "Polygon" 41 | }, 42 | "datetime": "2019-07-24T03:13:30.564234+00:00", 43 | "id": "LC81152062019205", 44 | "bbox": [ 45 | -81.61417, 46 | 30.67334, 47 | -79.14719, 48 | 32.80377 49 | ], 50 | "collection": "landsat-8-l1", 51 | "properties": { 52 | "landsat:revision": "00", 53 | "landsat:tier": "RT", 54 | "eo:cloud_cover": -1, 55 | "eo:column": "115", 56 | "eo:off_nadir": 0, 57 | "landsat:processing_level": "L1GT", 58 | "landsat:product_id": "LC08_L1GT_115206_20190724_20190724_01_RT", 59 | "eo:sun_azimuth": -36.57719268, 60 | "landsat:scene_id": "LC81152062019205LGN00", 61 | "eo:sun_elevation": -29.09100653, 62 | "datetime": "2019-07-24T03:13:30.564234+00:00", 63 | "eo:platform": "landsat-8", 64 | "eo:row": "206", 65 | "eo:instrument": "OLI_TIRS" 66 | }, 67 | "assets": { 68 | "B11": { 69 | "eo:bands": [ 70 | 10 71 | ], 72 | "href": "https://landsat-pds.s3.amazonaws.com/c1/L8/115/206/LC08_L1GT_115206_20190724_20190724_01_RT/LC08_L1GT_115206_20190724_20190724_01_RT_B11.TIF", 73 | "title": "Band 11 (lwir)", 74 | "type": "image/x.geotiff" 75 | }, 76 | "thumbnail": { 77 | "href": "https://landsat-pds.s3.amazonaws.com/c1/L8/115/206/LC08_L1GT_115206_20190724_20190724_01_RT/LC08_L1GT_115206_20190724_20190724_01_RT_thumb_large.jpg", 78 | "title": "Thumbnail image", 79 | "type": "image/jpeg" 80 | }, 81 | "B1": { 82 | "eo:bands": [ 83 | 0 84 | ], 85 | "href": "https://landsat-pds.s3.amazonaws.com/c1/L8/115/206/LC08_L1GT_115206_20190724_20190724_01_RT/LC08_L1GT_115206_20190724_20190724_01_RT_B1.TIF", 86 | "title": "Band 1 (coastal)", 87 | "type": "image/x.geotiff" 88 | }, 89 | "B4": { 90 | "eo:bands": [ 91 | 3 92 | ], 93 | "href": "https://landsat-pds.s3.amazonaws.com/c1/L8/115/206/LC08_L1GT_115206_20190724_20190724_01_RT/LC08_L1GT_115206_20190724_20190724_01_RT_B4.TIF", 94 | "title": "Band 4 (red)", 95 | "type": "image/x.geotiff" 96 | }, 97 | "index": { 98 | "href": "https://landsat-pds.s3.amazonaws.com/c1/L8/115/206/LC08_L1GT_115206_20190724_20190724_01_RT/LC08_L1GT_115206_20190724_20190724_01_RT_MTL.txt", 99 | "title": "HTML index page", 100 | "type": "text/html" 101 | }, 102 | "B2": { 103 | "eo:bands": [ 104 | 1 105 | ], 106 | "href": "https://landsat-pds.s3.amazonaws.com/c1/L8/115/206/LC08_L1GT_115206_20190724_20190724_01_RT/LC08_L1GT_115206_20190724_20190724_01_RT_B2.TIF", 107 | "title": "Band 2 (blue)", 108 | "type": "image/x.geotiff" 109 | }, 110 | "MTL": { 111 | "href": "https://landsat-pds.s3.amazonaws.com/c1/L8/115/206/LC08_L1GT_115206_20190724_20190724_01_RT/LC08_L1GT_115206_20190724_20190724_01_RT_MTL.txt", 112 | "title": "original metadata file", 113 | "type": "text/plain" 114 | }, 115 | "B5": { 116 | "eo:bands": [ 117 | 4 118 | ], 119 | "href": "https://landsat-pds.s3.amazonaws.com/c1/L8/115/206/LC08_L1GT_115206_20190724_20190724_01_RT/LC08_L1GT_115206_20190724_20190724_01_RT_B5.TIF", 120 | "title": "Band 5 (nir)", 121 | "type": "image/x.geotiff" 122 | }, 123 | "B3": { 124 | "eo:bands": [ 125 | 2 126 | ], 127 | "href": "https://landsat-pds.s3.amazonaws.com/c1/L8/115/206/LC08_L1GT_115206_20190724_20190724_01_RT/LC08_L1GT_115206_20190724_20190724_01_RT_B3.TIF", 128 | "title": "Band 3 (green)", 129 | "type": "image/x.geotiff" 130 | }, 131 | "B10": { 132 | "eo:bands": [ 133 | 9 134 | ], 135 | "href": "https://landsat-pds.s3.amazonaws.com/c1/L8/115/206/LC08_L1GT_115206_20190724_20190724_01_RT/LC08_L1GT_115206_20190724_20190724_01_RT_B10.TIF", 136 | "title": "Band 10 (lwir)", 137 | "type": "image/x.geotiff" 138 | }, 139 | "B6": { 140 | "eo:bands": [ 141 | 5 142 | ], 143 | "href": "https://landsat-pds.s3.amazonaws.com/c1/L8/115/206/LC08_L1GT_115206_20190724_20190724_01_RT/LC08_L1GT_115206_20190724_20190724_01_RT_B6.TIF", 144 | "title": "Band 6 (swir16)", 145 | "type": "image/x.geotiff" 146 | }, 147 | "ANG": { 148 | "href": "https://landsat-pds.s3.amazonaws.com/c1/L8/115/206/LC08_L1GT_115206_20190724_20190724_01_RT/LC08_L1GT_115206_20190724_20190724_01_RT_ANG.txt", 149 | "title": "Angle coefficients file", 150 | "type": "text/plain" 151 | }, 152 | "B9": { 153 | "eo:bands": [ 154 | 8 155 | ], 156 | "href": "https://landsat-pds.s3.amazonaws.com/c1/L8/115/206/LC08_L1GT_115206_20190724_20190724_01_RT/LC08_L1GT_115206_20190724_20190724_01_RT_B9.TIF", 157 | "title": "Band 9 (cirrus)", 158 | "type": "image/x.geotiff" 159 | }, 160 | "B7": { 161 | "eo:bands": [ 162 | 6 163 | ], 164 | "href": "https://landsat-pds.s3.amazonaws.com/c1/L8/115/206/LC08_L1GT_115206_20190724_20190724_01_RT/LC08_L1GT_115206_20190724_20190724_01_RT_B7.TIF", 165 | "title": "Band 7 (swir22)", 166 | "type": "image/x.geotiff" 167 | }, 168 | "B8": { 169 | "eo:bands": [ 170 | 7 171 | ], 172 | "href": "https://landsat-pds.s3.amazonaws.com/c1/L8/115/206/LC08_L1GT_115206_20190724_20190724_01_RT/LC08_L1GT_115206_20190724_20190724_01_RT_B8.TIF", 173 | "title": "Band 8 (pan)", 174 | "type": "image/x.geotiff" 175 | } 176 | }, 177 | "type": "Feature" 178 | } 179 | -------------------------------------------------------------------------------- /tests/rest/landsatItems.json: -------------------------------------------------------------------------------- 1 | [{ 2 | "stac_version": "0", 3 | "links": [{ 4 | "rel": "derived_from", 5 | "href": "derived", 6 | "type": null, 7 | "title": null 8 | }], 9 | "geometry": { 10 | "coordinates": [ 11 | [ 12 | [ 13 | -79.15036257344, 14 | 31.073356917267 15 | ], 16 | [ 17 | -81.131111855504, 18 | 30.674004943359 19 | ], 20 | [ 21 | -81.613077436032, 22 | 32.409029367536 23 | ], 24 | [ 25 | -79.629383540244, 26 | 32.801841359618 27 | ], 28 | [ 29 | -79.15036257344, 30 | 31.073356917267 31 | ] 32 | ] 33 | ], 34 | "type": "Polygon" 35 | }, 36 | "datetime": "2019-07-24T03:13:30.564234+00:00", 37 | "id": "LC81152062019205", 38 | "bbox": [ 39 | -81.61417, 40 | 30.67334, 41 | -79.14719, 42 | 32.80377 43 | ], 44 | "collection": "landsat-8-l1", 45 | "properties": { 46 | "landsat:revision": "00", 47 | "landsat:tier": "RT", 48 | "eo:cloud_cover": -1, 49 | "eo:column": "115", 50 | "eo:off_nadir": 0, 51 | "landsat:processing_level": "L1GT", 52 | "landsat:product_id": "LC08_L1GT_115206_20190724_20190724_01_RT", 53 | "eo:sun_azimuth": -36.57719268, 54 | "landsat:scene_id": "LC81152062019205LGN00", 55 | "eo:sun_elevation": -29.09100653, 56 | "datetime": "2019-07-24T03:13:30.564234+00:00", 57 | "eo:platform": "landsat-8", 58 | "eo:row": "206", 59 | "eo:instrument": "OLI_TIRS" 60 | }, 61 | "assets": { 62 | "B11": { 63 | "eo:bands": [ 64 | 10 65 | ], 66 | "href": "https://landsat-pds.s3.amazonaws.com/c1/L8/115/206/LC08_L1GT_115206_20190724_20190724_01_RT/LC08_L1GT_115206_20190724_20190724_01_RT_B11.TIF", 67 | "title": "Band 11 (lwir)", 68 | "type": "image/x.geotiff" 69 | }, 70 | "thumbnail": { 71 | "href": "https://landsat-pds.s3.amazonaws.com/c1/L8/115/206/LC08_L1GT_115206_20190724_20190724_01_RT/LC08_L1GT_115206_20190724_20190724_01_RT_thumb_large.jpg", 72 | "title": "Thumbnail image", 73 | "type": "image/jpeg" 74 | }, 75 | "B1": { 76 | "eo:bands": [ 77 | 0 78 | ], 79 | "href": "https://landsat-pds.s3.amazonaws.com/c1/L8/115/206/LC08_L1GT_115206_20190724_20190724_01_RT/LC08_L1GT_115206_20190724_20190724_01_RT_B1.TIF", 80 | "title": "Band 1 (coastal)", 81 | "type": "image/x.geotiff" 82 | }, 83 | "B4": { 84 | "eo:bands": [ 85 | 3 86 | ], 87 | "href": "https://landsat-pds.s3.amazonaws.com/c1/L8/115/206/LC08_L1GT_115206_20190724_20190724_01_RT/LC08_L1GT_115206_20190724_20190724_01_RT_B4.TIF", 88 | "title": "Band 4 (red)", 89 | "type": "image/x.geotiff" 90 | }, 91 | "index": { 92 | "href": "https://landsat-pds.s3.amazonaws.com/c1/L8/115/206/LC08_L1GT_115206_20190724_20190724_01_RT/LC08_L1GT_115206_20190724_20190724_01_RT_MTL.txt", 93 | "title": "HTML index page", 94 | "type": "text/html" 95 | }, 96 | "B2": { 97 | "eo:bands": [ 98 | 1 99 | ], 100 | "href": "https://landsat-pds.s3.amazonaws.com/c1/L8/115/206/LC08_L1GT_115206_20190724_20190724_01_RT/LC08_L1GT_115206_20190724_20190724_01_RT_B2.TIF", 101 | "title": "Band 2 (blue)", 102 | "type": "image/x.geotiff" 103 | }, 104 | "MTL": { 105 | "href": "https://landsat-pds.s3.amazonaws.com/c1/L8/115/206/LC08_L1GT_115206_20190724_20190724_01_RT/LC08_L1GT_115206_20190724_20190724_01_RT_MTL.txt", 106 | "title": "original metadata file", 107 | "type": "text/plain" 108 | }, 109 | "B5": { 110 | "eo:bands": [ 111 | 4 112 | ], 113 | "href": "https://landsat-pds.s3.amazonaws.com/c1/L8/115/206/LC08_L1GT_115206_20190724_20190724_01_RT/LC08_L1GT_115206_20190724_20190724_01_RT_B5.TIF", 114 | "title": "Band 5 (nir)", 115 | "type": "image/x.geotiff" 116 | }, 117 | "B3": { 118 | "eo:bands": [ 119 | 2 120 | ], 121 | "href": "https://landsat-pds.s3.amazonaws.com/c1/L8/115/206/LC08_L1GT_115206_20190724_20190724_01_RT/LC08_L1GT_115206_20190724_20190724_01_RT_B3.TIF", 122 | "title": "Band 3 (green)", 123 | "type": "image/x.geotiff" 124 | }, 125 | "B10": { 126 | "eo:bands": [ 127 | 9 128 | ], 129 | "href": "https://landsat-pds.s3.amazonaws.com/c1/L8/115/206/LC08_L1GT_115206_20190724_20190724_01_RT/LC08_L1GT_115206_20190724_20190724_01_RT_B10.TIF", 130 | "title": "Band 10 (lwir)", 131 | "type": "image/x.geotiff" 132 | }, 133 | "B6": { 134 | "eo:bands": [ 135 | 5 136 | ], 137 | "href": "https://landsat-pds.s3.amazonaws.com/c1/L8/115/206/LC08_L1GT_115206_20190724_20190724_01_RT/LC08_L1GT_115206_20190724_20190724_01_RT_B6.TIF", 138 | "title": "Band 6 (swir16)", 139 | "type": "image/x.geotiff" 140 | }, 141 | "ANG": { 142 | "href": "https://landsat-pds.s3.amazonaws.com/c1/L8/115/206/LC08_L1GT_115206_20190724_20190724_01_RT/LC08_L1GT_115206_20190724_20190724_01_RT_ANG.txt", 143 | "title": "Angle coefficients file", 144 | "type": "text/plain" 145 | }, 146 | "B9": { 147 | "eo:bands": [ 148 | 8 149 | ], 150 | "href": "https://landsat-pds.s3.amazonaws.com/c1/L8/115/206/LC08_L1GT_115206_20190724_20190724_01_RT/LC08_L1GT_115206_20190724_20190724_01_RT_B9.TIF", 151 | "title": "Band 9 (cirrus)", 152 | "type": "image/x.geotiff" 153 | }, 154 | "B7": { 155 | "eo:bands": [ 156 | 6 157 | ], 158 | "href": "https://landsat-pds.s3.amazonaws.com/c1/L8/115/206/LC08_L1GT_115206_20190724_20190724_01_RT/LC08_L1GT_115206_20190724_20190724_01_RT_B7.TIF", 159 | "title": "Band 7 (swir22)", 160 | "type": "image/x.geotiff" 161 | }, 162 | "B8": { 163 | "eo:bands": [ 164 | 7 165 | ], 166 | "href": "https://landsat-pds.s3.amazonaws.com/c1/L8/115/206/LC08_L1GT_115206_20190724_20190724_01_RT/LC08_L1GT_115206_20190724_20190724_01_RT_B8.TIF", 167 | "title": "Band 8 (pan)", 168 | "type": "image/x.geotiff" 169 | } 170 | }, 171 | "type": "Feature" 172 | }, 173 | { 174 | "stac_version": "0", 175 | "links": [{ 176 | "rel": "derived_from", 177 | "href": "derived", 178 | "type": null, 179 | "title": null 180 | }], 181 | "geometry": { 182 | "coordinates": [ 183 | [ 184 | [ 185 | -79.15036257344, 186 | 31.073356917267 187 | ], 188 | [ 189 | -81.131111855504, 190 | 30.674004943359 191 | ], 192 | [ 193 | -81.613077436032, 194 | 32.409029367536 195 | ], 196 | [ 197 | -79.629383540244, 198 | 32.801841359618 199 | ], 200 | [ 201 | -79.15036257344, 202 | 31.073356917267 203 | ] 204 | ] 205 | ], 206 | "type": "Polygon" 207 | }, 208 | "datetime": "2019-07-24T03:13:30.564234+00:00", 209 | "id": "copiedItem", 210 | "bbox": [ 211 | -81.61417, 212 | 30.67334, 213 | -79.14719, 214 | 32.80377 215 | ], 216 | "collection": "landsat-8-l1", 217 | "properties": { 218 | "landsat:revision": "00", 219 | "landsat:tier": "RT", 220 | "eo:cloud_cover": -1, 221 | "eo:column": "115", 222 | "eo:off_nadir": 0, 223 | "landsat:processing_level": "L1GT", 224 | "landsat:product_id": "LC08_L1GT_115206_20190724_20190724_01_RT", 225 | "eo:sun_azimuth": -36.57719268, 226 | "landsat:scene_id": "LC81152062019205LGN00", 227 | "eo:sun_elevation": -29.09100653, 228 | "datetime": "2019-07-24T03:13:30.564234+00:00", 229 | "eo:platform": "landsat-8", 230 | "eo:row": "206", 231 | "eo:instrument": "OLI_TIRS" 232 | }, 233 | "assets": { 234 | "B11": { 235 | "eo:bands": [ 236 | 10 237 | ], 238 | "href": "https://landsat-pds.s3.amazonaws.com/c1/L8/115/206/LC08_L1GT_115206_20190724_20190724_01_RT/LC08_L1GT_115206_20190724_20190724_01_RT_B11.TIF", 239 | "title": "Band 11 (lwir)", 240 | "type": "image/x.geotiff" 241 | }, 242 | "thumbnail": { 243 | "href": "https://landsat-pds.s3.amazonaws.com/c1/L8/115/206/LC08_L1GT_115206_20190724_20190724_01_RT/LC08_L1GT_115206_20190724_20190724_01_RT_thumb_large.jpg", 244 | "title": "Thumbnail image", 245 | "type": "image/jpeg" 246 | }, 247 | "B1": { 248 | "eo:bands": [ 249 | 0 250 | ], 251 | "href": "https://landsat-pds.s3.amazonaws.com/c1/L8/115/206/LC08_L1GT_115206_20190724_20190724_01_RT/LC08_L1GT_115206_20190724_20190724_01_RT_B1.TIF", 252 | "title": "Band 1 (coastal)", 253 | "type": "image/x.geotiff" 254 | }, 255 | "B4": { 256 | "eo:bands": [ 257 | 3 258 | ], 259 | "href": "https://landsat-pds.s3.amazonaws.com/c1/L8/115/206/LC08_L1GT_115206_20190724_20190724_01_RT/LC08_L1GT_115206_20190724_20190724_01_RT_B4.TIF", 260 | "title": "Band 4 (red)", 261 | "type": "image/x.geotiff" 262 | }, 263 | "index": { 264 | "href": "https://landsat-pds.s3.amazonaws.com/c1/L8/115/206/LC08_L1GT_115206_20190724_20190724_01_RT/LC08_L1GT_115206_20190724_20190724_01_RT_MTL.txt", 265 | "title": "HTML index page", 266 | "type": "text/html" 267 | }, 268 | "B2": { 269 | "eo:bands": [ 270 | 1 271 | ], 272 | "href": "https://landsat-pds.s3.amazonaws.com/c1/L8/115/206/LC08_L1GT_115206_20190724_20190724_01_RT/LC08_L1GT_115206_20190724_20190724_01_RT_B2.TIF", 273 | "title": "Band 2 (blue)", 274 | "type": "image/x.geotiff" 275 | }, 276 | "MTL": { 277 | "href": "https://landsat-pds.s3.amazonaws.com/c1/L8/115/206/LC08_L1GT_115206_20190724_20190724_01_RT/LC08_L1GT_115206_20190724_20190724_01_RT_MTL.txt", 278 | "title": "original metadata file", 279 | "type": "text/plain" 280 | }, 281 | "B5": { 282 | "eo:bands": [ 283 | 4 284 | ], 285 | "href": "https://landsat-pds.s3.amazonaws.com/c1/L8/115/206/LC08_L1GT_115206_20190724_20190724_01_RT/LC08_L1GT_115206_20190724_20190724_01_RT_B5.TIF", 286 | "title": "Band 5 (nir)", 287 | "type": "image/x.geotiff" 288 | }, 289 | "B3": { 290 | "eo:bands": [ 291 | 2 292 | ], 293 | "href": "https://landsat-pds.s3.amazonaws.com/c1/L8/115/206/LC08_L1GT_115206_20190724_20190724_01_RT/LC08_L1GT_115206_20190724_20190724_01_RT_B3.TIF", 294 | "title": "Band 3 (green)", 295 | "type": "image/x.geotiff" 296 | }, 297 | "B10": { 298 | "eo:bands": [ 299 | 9 300 | ], 301 | "href": "https://landsat-pds.s3.amazonaws.com/c1/L8/115/206/LC08_L1GT_115206_20190724_20190724_01_RT/LC08_L1GT_115206_20190724_20190724_01_RT_B10.TIF", 302 | "title": "Band 10 (lwir)", 303 | "type": "image/x.geotiff" 304 | }, 305 | "B6": { 306 | "eo:bands": [ 307 | 5 308 | ], 309 | "href": "https://landsat-pds.s3.amazonaws.com/c1/L8/115/206/LC08_L1GT_115206_20190724_20190724_01_RT/LC08_L1GT_115206_20190724_20190724_01_RT_B6.TIF", 310 | "title": "Band 6 (swir16)", 311 | "type": "image/x.geotiff" 312 | }, 313 | "ANG": { 314 | "href": "https://landsat-pds.s3.amazonaws.com/c1/L8/115/206/LC08_L1GT_115206_20190724_20190724_01_RT/LC08_L1GT_115206_20190724_20190724_01_RT_ANG.txt", 315 | "title": "Angle coefficients file", 316 | "type": "text/plain" 317 | }, 318 | "B9": { 319 | "eo:bands": [ 320 | 8 321 | ], 322 | "href": "https://landsat-pds.s3.amazonaws.com/c1/L8/115/206/LC08_L1GT_115206_20190724_20190724_01_RT/LC08_L1GT_115206_20190724_20190724_01_RT_B9.TIF", 323 | "title": "Band 9 (cirrus)", 324 | "type": "image/x.geotiff" 325 | }, 326 | "B7": { 327 | "eo:bands": [ 328 | 6 329 | ], 330 | "href": "https://landsat-pds.s3.amazonaws.com/c1/L8/115/206/LC08_L1GT_115206_20190724_20190724_01_RT/LC08_L1GT_115206_20190724_20190724_01_RT_B7.TIF", 331 | "title": "Band 7 (swir22)", 332 | "type": "image/x.geotiff" 333 | }, 334 | "B8": { 335 | "eo:bands": [ 336 | 7 337 | ], 338 | "href": "https://landsat-pds.s3.amazonaws.com/c1/L8/115/206/LC08_L1GT_115206_20190724_20190724_01_RT/LC08_L1GT_115206_20190724_20190724_01_RT_B8.TIF", 339 | "title": "Band 8 (pan)", 340 | "type": "image/x.geotiff" 341 | } 342 | }, 343 | "type": "Feature" 344 | }] 345 | -------------------------------------------------------------------------------- /tests/rest/next_limit.js: -------------------------------------------------------------------------------- 1 | import { restService, resetdb } from './common'; 2 | import should from 'should'; // eslint-disable-line no-unused-vars 3 | import { searchPath, itemsPath } from './constants'; 4 | 5 | describe('next and limit filters', function () { 6 | before(function (done) { resetdb(); done(); }); 7 | after(function (done) { resetdb(); done(); }); 8 | 9 | it('Limits response search POST', function (done) { 10 | restService() 11 | .post(searchPath) 12 | .send({ 13 | next: 1, 14 | limit: 2 15 | }) 16 | .expect('Content-Type', /json/) 17 | .expect(200, done) 18 | .expect(r => { 19 | r.body.features.length.should.equal(2); 20 | }); 21 | }); 22 | 23 | it('Limits response for items GET', function (done) { 24 | restService() 25 | .get(itemsPath) 26 | .query({ 27 | next: 0, 28 | limit: 2 29 | }) 30 | .expect('Content-Type', /json/) 31 | .expect(200, done) 32 | .expect(r => { 33 | r.body.features.length.should.equal(2); 34 | }); 35 | }); 36 | 37 | it('Prefer header returns current range and totals for item GET', 38 | function (done) { 39 | restService() 40 | .get(itemsPath) 41 | .query({ 42 | next: 0, 43 | limit: 2 44 | }) 45 | .set('Prefer', 'count=exact') 46 | .expect('Content-Type', /json/) 47 | // Should be a 206 Partial Content 48 | .expect(206, done) 49 | .expect(r => { 50 | const range = r.headers['content-range'].split('/')[0]; 51 | range.should.equal('0-1'); 52 | }); 53 | }); 54 | 55 | it('Prefer header returns current range and totals for search POST', 56 | function (done) { 57 | restService() 58 | .post(searchPath) 59 | .send({ 60 | query: { 61 | 'eo:cloud_cover': { 62 | lt: 100 63 | } 64 | }, 65 | next: 0, 66 | limit: 2 67 | }) 68 | .set('Prefer', 'count=exact') 69 | .expect('Content-Type', /json/) 70 | .expect(200, done) 71 | .expect(r => { 72 | const range = r.headers['content-range'].split('/')[0]; 73 | range.should.equal('0-1'); 74 | }); 75 | }); 76 | }); 77 | -------------------------------------------------------------------------------- /tests/rest/query.js: -------------------------------------------------------------------------------- 1 | import { restService, resetdb } from './common'; 2 | import should from 'should'; // eslint-disable-line no-unused-vars 3 | import { searchPath } from './constants'; 4 | 5 | describe('query extension', function () { 6 | before(function (done) { resetdb(); done(); }); 7 | after(function (done) { resetdb(); done(); }); 8 | 9 | it('Uses collection properties and item properties for query', function (done) { 10 | restService() 11 | .post(searchPath) 12 | .send({ 13 | query: { 14 | 'eo:gsd': { 15 | eq: 15 16 | } 17 | } 18 | }) 19 | .expect('Content-Type', /json/) 20 | .expect(200, done) 21 | .expect(r => { 22 | r.body.features.length.should.be.above(1); 23 | r.body.features[0].properties.should.not.have.property('eo:gsd'); 24 | }); 25 | }); 26 | 27 | it('Handles queries with no bbox or intersects', function (done) { 28 | restService() 29 | .post(searchPath) 30 | .send({ 31 | query: { 32 | 'eo:cloud_cover': { 33 | eq: 26 34 | } 35 | } 36 | }) 37 | .expect('Content-Type', /json/) 38 | .expect(200, done) 39 | .expect(r => { 40 | r.body.features.length.should.equal(1); 41 | }); 42 | }); 43 | 44 | it('gte lte operators', function (done) { 45 | restService() 46 | .post(searchPath) 47 | .send({ 48 | query: { 49 | 'eo:cloud_cover': { 50 | gte: 26, 51 | lte: 40 52 | } 53 | } 54 | }) 55 | .expect('Content-Type', /json/) 56 | .expect(200, done) 57 | .expect(r => { 58 | r.body.features.length.should.equal(1); 59 | }); 60 | }); 61 | 62 | it('in operator with strings', function (done) { 63 | restService() 64 | .post(searchPath) 65 | .send({ 66 | query: { 67 | 'landsat:column': { 68 | in: ['032', '037'] 69 | } 70 | } 71 | }) 72 | .expect('Content-Type', /json/) 73 | .expect(200, done) 74 | .expect(r => { 75 | r.body.features.length.should.equal(2); 76 | }); 77 | }); 78 | 79 | it('in operator with strings', function (done) { 80 | restService() 81 | .post(searchPath) 82 | .send({ 83 | query: { 84 | 'landsat:processing_level': { 85 | in: ['L1TP'] 86 | } 87 | } 88 | }) 89 | .expect('Content-Type', /json/) 90 | .expect(200, done) 91 | .expect(r => { 92 | r.body.features.length.should.equal(2); 93 | }); 94 | }); 95 | 96 | it('in operator with numbers', function (done) { 97 | restService() 98 | .post(searchPath) 99 | .send({ 100 | query: { 101 | 'eo:cloud_cover': { 102 | in: [0] 103 | } 104 | } 105 | }) 106 | .expect('Content-Type', /json/) 107 | .expect(200, done) 108 | .expect(r => { 109 | r.body.features.length.should.equal(1); 110 | }); 111 | }); 112 | 113 | it('in operator with numbers', function (done) { 114 | restService() 115 | .post(searchPath) 116 | .send({ 117 | query: { 118 | 'eo:epsg': { 119 | in: [32610, 32613] 120 | } 121 | } 122 | }) 123 | .expect('Content-Type', /json/) 124 | .expect(200, done) 125 | .expect(r => { 126 | r.body.features.length.should.equal(2); 127 | }); 128 | }); 129 | 130 | it('Json field queries with numbers', function (done) { 131 | restService() 132 | .post(searchPath) 133 | .send({ 134 | query: { 135 | 'eo:cloud_cover': { 136 | lt: 100 137 | } 138 | } 139 | }) 140 | .expect('Content-Type', /json/) 141 | .expect(200, done) 142 | .expect(r => { 143 | r.body.features.length.should.equal(3); 144 | }); 145 | }); 146 | 147 | it('Json field queries with strings', function (done) { 148 | restService() 149 | .post(searchPath) 150 | .send({ 151 | query: { 152 | 'landsat:processing_level': { 153 | eq: 'L1TP' 154 | } 155 | } 156 | }) 157 | .expect('Content-Type', /json/) 158 | .expect(200, done) 159 | .expect(r => { 160 | r.body.features.length.should.equal(2); 161 | }); 162 | }); 163 | 164 | it('Handles multiple query properties', function (done) { 165 | restService() 166 | .post(searchPath) 167 | .send({ 168 | query: { 169 | 'eo:cloud_cover': { 170 | lt: 6 171 | }, 172 | 'eo:sun_azimuth': { 173 | lt: 50 174 | }, 175 | 'landsat:row': { 176 | eq: '216' 177 | } 178 | } 179 | }) 180 | .expect('Content-Type', /json/) 181 | .expect(200, done) 182 | .expect(r => { 183 | r.body.features.length.should.equal(1); 184 | }); 185 | }); 186 | 187 | it('Handles false in multiple query properties', function (done) { 188 | restService() 189 | .post(searchPath) 190 | .send({ 191 | query: { 192 | 'eo:cloud_cover': { 193 | lt: 6 194 | }, 195 | 'eo:sun_azimuth': { 196 | lt: 50 197 | }, 198 | 'landsat:row': { 199 | eq: '0' 200 | } 201 | } 202 | }) 203 | .expect('Content-Type', /json/) 204 | .expect(200, done) 205 | .expect(r => { 206 | r.body.features.length.should.equal(0); 207 | }); 208 | }); 209 | 210 | it('Includes datetime as part of posted query clause', function (done) { 211 | restService() 212 | .post(searchPath) 213 | .send({ 214 | query: { 215 | 'eo:cloud_cover': { 216 | lt: 6 217 | }, 218 | 'eo:sun_azimuth': { 219 | lt: 50 220 | }, 221 | 'landsat:row': { 222 | eq: '0' 223 | } 224 | }, 225 | datetime: '2019-04-01T12:00/2019-08-21T14:02' 226 | }) 227 | .expect('Content-Type', /json/) 228 | .expect(200, done) 229 | .expect(r => { 230 | r.body.features.length.should.equal(0); 231 | }); 232 | }); 233 | }); 234 | -------------------------------------------------------------------------------- /tests/rest/root.js: -------------------------------------------------------------------------------- 1 | /* eslint no-unused-expressions: 0 */ 2 | import { restService, resetdb } from './common'; 3 | import should from 'should'; // eslint-disable-line no-unused-vars 4 | const proxy = process.env.SERVER_PROXY_URI; 5 | 6 | describe('root endpoint', function () { 7 | before(function (done) { resetdb(); done(); }); 8 | after(function (done) { resetdb(); done(); }); 9 | 10 | it('Returns the correct object structure', function (done) { 11 | restService() 12 | .get('') 13 | .expect('Content-Type', /json/) 14 | .expect(200, done) 15 | .expect(r => { 16 | r.body.id.should.exist; 17 | r.body.title.should.exist; 18 | r.body.description.should.exist; 19 | r.body.stac_version.should.exist; 20 | r.body.links.should.containDeep( 21 | [{ 22 | href: `${proxy}collections`, 23 | rel: 'data', 24 | type: 'application/json', 25 | title: null 26 | }, 27 | { 28 | href: `${proxy}conformance`, 29 | rel: 'conformance', 30 | type: 'application/json', 31 | title: null 32 | }, 33 | { 34 | href: proxy.slice(0, -1), 35 | rel: 'self', 36 | type: 'application/json', 37 | title: null 38 | }]); 39 | }); 40 | }); 41 | }); 42 | -------------------------------------------------------------------------------- /tests/rest/sort.js: -------------------------------------------------------------------------------- 1 | import { restService, resetdb } from './common'; 2 | import should from 'should'; // eslint-disable-line no-unused-vars 3 | import { searchPath, itemsPath } from './constants'; 4 | 5 | describe('sort extension', function () { 6 | before(function (done) { resetdb(); done(); }); 7 | after(function (done) { resetdb(); done(); }); 8 | 9 | it('Default sort by datetime for search POST', function (done) { 10 | restService() 11 | .post(searchPath) 12 | .expect('Content-Type', /json/) 13 | .expect(200, done) 14 | .expect(r => { 15 | const firstDate = Date.parse(r.body.features[0].properties.datetime); 16 | const secondDate = Date.parse(r.body.features[1].properties.datetime); 17 | const thirdDate = Date.parse(r.body.features[2].properties.datetime); 18 | firstDate.should.be.above(secondDate); 19 | secondDate.should.be.above(thirdDate); 20 | }); 21 | }); 22 | 23 | it('Default sort by datetime for items GET', function (done) { 24 | restService() 25 | .get(itemsPath) 26 | .expect('Content-Type', /json/) 27 | .expect(200, done) 28 | .expect(r => { 29 | const firstDate = Date.parse(r.body.features[0].properties.datetime); 30 | const secondDate = Date.parse(r.body.features[1].properties.datetime); 31 | const thirdDate = Date.parse(r.body.features[2].properties.datetime); 32 | firstDate.should.be.above(secondDate); 33 | secondDate.should.be.above(thirdDate); 34 | }); 35 | }); 36 | 37 | it('Search sorts desc by nested property', function (done) { 38 | restService() 39 | .post(searchPath) 40 | .send({ 41 | sort: [{ 42 | field: 'properties.eo:cloud_cover', 43 | direction: 'desc' 44 | }] 45 | }) 46 | .expect('Content-Type', /json/) 47 | .expect(200, done) 48 | .expect(r => { 49 | const firstcc = r.body.features[0].properties['eo:cloud_cover']; 50 | const secondcc = r.body.features[1].properties['eo:cloud_cover']; 51 | const thirdcc = r.body.features[2].properties['eo:cloud_cover']; 52 | firstcc.should.be.above(secondcc); 53 | secondcc.should.be.above(thirdcc); 54 | }); 55 | }); 56 | 57 | it('Search sorts asc by nested property', function (done) { 58 | restService() 59 | .post(searchPath) 60 | .send({ 61 | sort: [{ 62 | field: 'properties.eo:cloud_cover', 63 | direction: 'asc' 64 | }] 65 | }) 66 | .expect('Content-Type', /json/) 67 | .expect(200, done) 68 | .expect(r => { 69 | const firstcc = r.body.features[0].properties['eo:cloud_cover']; 70 | const secondcc = r.body.features[1].properties['eo:cloud_cover']; 71 | const thirdcc = r.body.features[2].properties['eo:cloud_cover']; 72 | firstcc.should.be.below(secondcc); 73 | secondcc.should.be.below(thirdcc); 74 | }); 75 | }); 76 | 77 | it('Search sorts desc by nested property', function (done) { 78 | restService() 79 | .post(searchPath) 80 | .send({ 81 | sort: [{ 82 | field: 'properties.eo:cloud_cover', 83 | direction: 'desc' 84 | }] 85 | }) 86 | .expect('Content-Type', /json/) 87 | .expect(200, done) 88 | .expect(r => { 89 | const firstcc = r.body.features[0].properties['eo:cloud_cover']; 90 | const secondcc = r.body.features[1].properties['eo:cloud_cover']; 91 | const thirdcc = r.body.features[2].properties['eo:cloud_cover']; 92 | firstcc.should.be.above(secondcc); 93 | secondcc.should.be.above(thirdcc); 94 | }); 95 | }); 96 | 97 | it('Search sorts desc by nested numeric property', function (done) { 98 | restService() 99 | .post(searchPath) 100 | .send({ 101 | sort: [{ 102 | field: 'properties.eo:sun_azimuth', 103 | direction: 'desc' 104 | }] 105 | }) 106 | .expect('Content-Type', /json/) 107 | .expect(200, done) 108 | .expect(r => { 109 | const firstaz = r.body.features[0].properties['eo:sun_azimuth']; 110 | const secondaz = r.body.features[1].properties['eo:sun_azimuth']; 111 | const thirdaz = r.body.features[2].properties['eo:sun_azimuth']; 112 | firstaz.should.be.above(secondaz); 113 | secondaz.should.be.above(thirdaz); 114 | }); 115 | }); 116 | }); 117 | -------------------------------------------------------------------------------- /tests/rest/wfs.js: -------------------------------------------------------------------------------- 1 | import { restService, resetdb } from './common'; 2 | import should from 'should'; // eslint-disable-line no-unused-vars 3 | import { collectionsPath } from './constants'; 4 | 5 | describe('wfs endpoints', function () { 6 | before(function (done) { resetdb(); done(); }); 7 | after(function (done) { resetdb(); done(); }); 8 | 9 | it('Returns specific collection as object', function (done) { 10 | restService() 11 | .get(`${collectionsPath}/landsat-8-l1`) 12 | .expect('Content-Type', /json/) 13 | .expect(200, done) 14 | .expect(r => { 15 | r.body.id.should.equal('landsat-8-l1'); 16 | }); 17 | }); 18 | 19 | it('Returns items form a collection as a feature collection', function (done) { 20 | restService() 21 | .get(`${collectionsPath}/landsat-8-l1/items`) 22 | .expect('Content-Type', /json/) 23 | .expect(200, done) 24 | .expect(r => { 25 | r.body.features.forEach((feature) => { 26 | feature.collection.should.equal('landsat-8-l1'); 27 | }); 28 | }); 29 | }); 30 | 31 | it('Uses collection id in search function and query', function (done) { 32 | restService() 33 | .get(`${collectionsPath}/nocollection/items`) 34 | .query({ 35 | bbox: '-180,-90,180,90' 36 | }) 37 | .expect('Content-Type', /json/) 38 | .expect(200, done) 39 | .expect(r => { 40 | r.body.features.length.should.equal(0); 41 | }); 42 | }); 43 | 44 | it('Returns the specified item id', function (done) { 45 | restService() 46 | .get(`${collectionsPath}/landsat-8-l1/items/LC80320392019263`) 47 | .expect('Content-Type', /json/) 48 | .expect(200, done) 49 | .expect(r => { 50 | r.body.id.should.equal('LC80320392019263'); 51 | }); 52 | }); 53 | }); 54 | --------------------------------------------------------------------------------