> /var/lib/postgresql/data/postgresql.conf
22 | fi
--------------------------------------------------------------------------------
/db/src/init.sql:
--------------------------------------------------------------------------------
1 | -- some setting to make the output less verbose
2 | \set QUIET on
3 | \set ON_ERROR_STOP on
4 | set client_min_messages to warning;
5 |
6 | -- load some variables from the env
7 | \setenv base_dir :DIR
8 | \set base_dir `if [ $base_dir != ":"DIR ]; then echo $base_dir; else echo "/docker-entrypoint-initdb.d"; fi`
9 | \set anonymous `echo $DB_ANON_ROLE`
10 | \set authenticator `echo $DB_USER`
11 | \set authenticator_pass `echo $DB_PASS`
12 | \set jwt_secret `echo $JWT_SECRET`
13 | \set quoted_jwt_secret '\'' :jwt_secret '\''
14 |
15 |
16 | \echo # Loading database definition
17 | begin;
18 | create extension if not exists pgcrypto;
19 |
20 | \echo # Loading dependencies
21 |
22 | -- functions for storing different settins in a table
23 | \ir libs/settings.sql
24 |
25 | -- functions for reading different http request properties exposed by PostgREST
26 | \ir libs/request.sql
27 |
28 | -- functions for sending messages to RabbitMQ entities
29 | \ir libs/rabbitmq.sql
30 |
31 | -- functions for JWT token generation in the database context
32 | \ir libs/pgjwt.sql
33 |
34 | -- save app settings (they are storred in the settings.secrets table)
35 | select settings.set('jwt_secret', :quoted_jwt_secret);
36 | select settings.set('jwt_lifetime', '3600');
37 |
38 |
39 | \echo # Loading application definitions
40 |
41 | -- private schema where all tables will be defined
42 | -- you can use othere names besides "data" or even spread the tables
43 | -- between different schemas. The schema name "data" is just a convention
44 | \ir data/schema.sql
45 |
46 | -- entities inside this schema (which should be only views and stored procedures) will be
47 | -- exposed as API endpoints. Access to them however is still governed by the
48 | -- privileges defined for the current PostgreSQL role making the requests
49 | \ir api/schema.sql
50 |
51 |
52 | \echo # Loading roles and privilege settings
53 | \ir authorization/roles.sql
54 | \ir authorization/privileges.sql
55 |
56 | \echo # Loading sample data
57 | \ir sample_data/data.sql
58 |
59 |
60 | commit;
61 | \echo # ==========================================
62 |
--------------------------------------------------------------------------------
/db/src/libs/pgjwt.sql:
--------------------------------------------------------------------------------
1 | -- addapted from https://github.com/michelp/pgjwt
2 | -- license follows
3 |
4 | -- The MIT License (MIT)
5 |
6 | -- Copyright (c) 2016 Michel Pelletier
7 |
8 | -- Permission is hereby granted, free of charge, to any person obtaining a copy
9 | -- of this software and associated documentation files (the "Software"), to deal
10 | -- in the Software without restriction, including without limitation the rights
11 | -- to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 | -- copies of the Software, and to permit persons to whom the Software is
13 | -- furnished to do so, subject to the following conditions:
14 |
15 | -- The above copyright notice and this permission notice shall be included in all
16 | -- copies or substantial portions of the Software.
17 |
18 | -- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 | -- IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 | -- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 | -- AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 | -- LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 | -- OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 | -- SOFTWARE.
25 |
26 |
27 | create extension if not exists pgcrypto;
28 | drop schema if exists pgjwt cascade;
29 | create schema pgjwt;
30 | set search_path to pgjwt, public;
31 |
32 | CREATE OR REPLACE FUNCTION url_encode(data bytea) RETURNS text LANGUAGE sql AS $$
33 | SELECT translate(encode(data, 'base64'), E'+/=\n', '-_');
34 | $$;
35 |
36 |
37 | CREATE OR REPLACE FUNCTION url_decode(data text) RETURNS bytea LANGUAGE sql AS $$
38 | WITH t AS (SELECT translate(data, '-_', '+/')),
39 | rem AS (SELECT length((SELECT * FROM t)) % 4) -- compute padding size
40 | SELECT decode(
41 | (SELECT * FROM t) ||
42 | CASE WHEN (SELECT * FROM rem) > 0
43 | THEN repeat('=', (4 - (SELECT * FROM rem)))
44 | ELSE '' END,
45 | 'base64');
46 | $$;
47 |
48 |
49 | CREATE OR REPLACE FUNCTION algorithm_sign(signables text, secret text, algorithm text)
50 | RETURNS text LANGUAGE sql AS $$
51 | WITH
52 | alg AS (
53 | SELECT CASE
54 | WHEN algorithm = 'HS256' THEN 'sha256'
55 | WHEN algorithm = 'HS384' THEN 'sha384'
56 | WHEN algorithm = 'HS512' THEN 'sha512'
57 | ELSE '' END) -- hmac throws error
58 | SELECT pgjwt.url_encode(public.hmac(signables, secret, (select * FROM alg)));
59 | $$;
60 |
61 |
62 | CREATE OR REPLACE FUNCTION sign(payload json, secret text, algorithm text DEFAULT 'HS256')
63 | RETURNS text LANGUAGE sql AS $$
64 | WITH
65 | header AS (
66 | SELECT pgjwt.url_encode(convert_to('{"alg":"' || algorithm || '","typ":"JWT"}', 'utf8'))
67 | ),
68 | payload AS (
69 | SELECT pgjwt.url_encode(convert_to(payload::text, 'utf8'))
70 | ),
71 | signables AS (
72 | SELECT (SELECT * FROM header) || '.' || (SELECT * FROM payload)
73 | )
74 | SELECT
75 | (SELECT * FROM signables)
76 | || '.' ||
77 | pgjwt.algorithm_sign((SELECT * FROM signables), secret, algorithm);
78 | $$;
79 |
80 |
81 | CREATE OR REPLACE FUNCTION verify(token text, secret text, algorithm text DEFAULT 'HS256')
82 | RETURNS table(header json, payload json, valid boolean) LANGUAGE sql AS $$
83 | SELECT
84 | convert_from(pgjwt.url_decode(r[1]), 'utf8')::json AS header,
85 | convert_from(pgjwt.url_decode(r[2]), 'utf8')::json AS payload,
86 | r[3] = pgjwt.algorithm_sign(r[1] || '.' || r[2], secret, algorithm) AS valid
87 | FROM regexp_split_to_array(token, '\.') r;
88 | $$;
89 |
90 |
91 | SET search_path TO public;
92 |
--------------------------------------------------------------------------------
/db/src/libs/rabbitmq.sql:
--------------------------------------------------------------------------------
1 | drop schema if exists rabbitmq cascade;
2 | create schema rabbitmq;
3 | grant usage on schema rabbitmq to public;
4 |
5 | create or replace function rabbitmq.send_message(
6 | channel text,
7 | routing_key text,
8 | message text) returns void as $$
9 |
10 | select pg_notify(
11 | channel,
12 | routing_key || '|' || message
13 | );
14 | $$ stable language sql;
15 |
16 | create or replace function rabbitmq.on_row_change() returns trigger as $$
17 | declare
18 | routing_key text;
19 | row jsonb;
20 | config jsonb;
21 | excluded_columns text[];
22 | col text;
23 | begin
24 | routing_key := 'row_change'
25 | '.table-'::text || TG_TABLE_NAME::text ||
26 | '.event-'::text || TG_OP::text;
27 | if (TG_OP = 'DELETE') then
28 | row := row_to_json(old)::jsonb;
29 | elsif (TG_OP = 'UPDATE') then
30 | row := row_to_json(new)::jsonb;
31 | elsif (TG_OP = 'INSERT') then
32 | row := row_to_json(new)::jsonb;
33 | end if;
34 |
35 | -- decide what row columns to send based on the config parameter
36 | -- there is a 8000 byte hard limit on the payload size so sending many big columns is not possible
37 | if ( TG_NARGS = 1 ) then
38 | config := TG_ARGV[0];
39 | if (config ? 'include') then
40 | --excluded_columns := ARRAY(SELECT unnest(jsonb_object_keys(row)::text[]) EXCEPT SELECT unnest( array(select jsonb_array_elements_text(config->'include')) ));
41 | -- this is a diff between two arrays
42 | excluded_columns := array(
43 | -- array of all row columns
44 | select unnest(
45 | array(select jsonb_object_keys(row))
46 | )
47 | except
48 | -- array of included columns
49 | select unnest(
50 | array(select jsonb_array_elements_text(config->'include'))
51 | )
52 | );
53 | end if;
54 |
55 | if (config ? 'exclude') then
56 | excluded_columns := array(select jsonb_array_elements_text(config->'exclude'));
57 | end if;
58 |
59 | if (current_setting('server_version_num')::int >= 100000) then
60 | row := row - excluded_columns;
61 | else
62 | FOREACH col IN ARRAY excluded_columns
63 | LOOP
64 | row := row - col;
65 | END LOOP;
66 | end if;
67 | end if;
68 |
69 | perform rabbitmq.send_message('events', routing_key, row::text);
70 | return null;
71 | end;
72 | $$ stable language plpgsql;
73 |
74 |
--------------------------------------------------------------------------------
/db/src/libs/request.sql:
--------------------------------------------------------------------------------
1 | drop schema if exists request cascade;
2 | create schema request;
3 | grant usage on schema request to public;
4 |
5 | create or replace function request.env_var(v text) returns text as $$
6 | select current_setting(v, true);
7 | $$ stable language sql;
8 |
9 | create or replace function request.jwt_claim(c text) returns text as $$
10 | select request.env_var('request.jwt.claim.' || c);
11 | $$ stable language sql;
12 |
13 | create or replace function request.cookie(c text) returns text as $$
14 | select request.env_var('request.cookie.' || c);
15 | $$ stable language sql;
16 |
17 | create or replace function request.header(h text) returns text as $$
18 | select request.env_var('request.header.' || h);
19 | $$ stable language sql;
20 |
21 | create or replace function request.user_id() returns int as $$
22 | select
23 | case coalesce(request.jwt_claim('user_id'),'')
24 | when '' then 0
25 | else request.jwt_claim('user_id')::int
26 | end
27 | $$ stable language sql;
28 |
29 | create or replace function request.user_role() returns text as $$
30 | select request.jwt_claim('role')::text;
31 | $$ stable language sql;
32 |
--------------------------------------------------------------------------------
/db/src/libs/settings.sql:
--------------------------------------------------------------------------------
1 | drop schema if exists settings cascade;
2 | create schema settings;
3 |
4 | create table settings.secrets (
5 | key text primary key,
6 | value text not null
7 | );
8 |
9 |
10 | create or replace function settings.get(text) returns text as $$
11 | select value from settings.secrets where key = $1
12 | $$ security definer stable language sql;
13 |
14 | create or replace function settings.set(text, text) returns void as $$
15 | insert into settings.secrets (key, value)
16 | values ($1, $2)
17 | on conflict (key) do update
18 | set value = $2;
19 | $$ security definer language sql;
20 |
--------------------------------------------------------------------------------
/db/src/sample_data/README.md:
--------------------------------------------------------------------------------
1 | Sample data in this directory was generated using `datafiller` utility
2 | you can install it like so
3 |
4 | ```shell
5 | wget -q https://raw.githubusercontent.com/memsql/datafiller/master/datafiller -O /usr/local/bin/datafiller \
6 | && chmod +x /usr/local/bin/datafiller
7 | ```
8 |
9 | after it's installed, you can generate sample data using a comand like the one below
10 |
11 | ```shell
12 | ( \
13 | cd db/src/data && \
14 | cat users.sql items.sql subitems.sql | datafiller --size=2 > ../sample_data/data.sql \
15 | )
16 | ```
--------------------------------------------------------------------------------
/db/src/sample_data/data.sql:
--------------------------------------------------------------------------------
1 |
2 | -- This file is generated by the DataFiller free software.
3 | -- This software comes without any warranty whatsoever.
4 | -- Use at your own risk. Beware, this script may destroy your data!
5 | -- License is GPLv3, see http://www.gnu.org/copyleft/gpl.html
6 | -- Get latest version from http://www.coelho.net/datafiller.html
7 |
8 | -- Data generated by: /usr/local/bin/datafiller
9 | -- Version 2.0.1-dev (r832 on 2015-11-01)
10 | -- For postgresql on 2017-05-03T12:34:39.879063 (UTC)
11 | --
12 | -- fill table data.user (2)
13 | \echo # filling table data.user (2)
14 | COPY data.user (id,name,email,"password") FROM STDIN (FREEZE ON);
15 | 1 alice alice@email.com pass
16 | 2 bob bob@email.com pass
17 | \.
18 | --
19 | -- fill table data.todo (6)
20 | \echo # filling table data.todo (6)
21 | COPY data.todo (id,todo,private,owner_id) FROM STDIN (FREEZE ON);
22 | 1 item_1 FALSE 1
23 | 2 item_2 TRUE 1
24 | 3 item_3 FALSE 1
25 | 4 item_4 TRUE 2
26 | 5 item_5 TRUE 2
27 | 6 item_6 FALSE 2
28 | \.
29 | --
30 | -- restart sequences
31 | ALTER SEQUENCE data.user_id_seq RESTART WITH 3;
32 | ALTER SEQUENCE data.todo_id_seq RESTART WITH 7;
33 | --
34 | -- analyze modified tables
35 | ANALYZE data.user;
36 | ANALYZE data.todo;
37 |
--------------------------------------------------------------------------------
/db/src/sample_data/reset.sql:
--------------------------------------------------------------------------------
1 | BEGIN;
2 | \set QUIET on
3 | \set ON_ERROR_STOP on
4 | set client_min_messages to warning;
5 | truncate data.todo restart identity cascade;
6 | truncate data.user restart identity cascade;
7 | \ir data.sql
8 | COMMIT;
--------------------------------------------------------------------------------
/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '2'
2 | services:
3 |
4 | ### DB START
5 | # This is the database to which the all the other components in the stack will connect and interact with
6 | # (but mostly it's PostgREST that is going to be responsible for the bulk of the db traffic)
7 | # Having the database in a container is very convenient in development but in production you will
8 | # use a separate database instance, like Amazon RDS, i.e. in production this section will be
9 | # commented and in the .env file you will specify the ip of your separate database instance
10 | db:
11 | image: postgres:${PG_VERSION}
12 | ports:
13 | - "5432:5432"
14 | environment:
15 | # env vars specific to postgres image used on first boot
16 | - POSTGRES_USER=${SUPER_USER}
17 | - POSTGRES_PASSWORD=${SUPER_USER_PASSWORD}
18 | - POSTGRES_DB=${DB_NAME}
19 | # env vars useful for our sql scripts
20 | - SUPER_USER=${SUPER_USER}
21 | - SUPER_USER_PASSWORD=${SUPER_USER_PASSWORD}
22 | - DB_NAME=${DB_NAME}
23 | - DB_USER=${DB_USER}
24 | - DB_PASS=${DB_PASS}
25 | - DB_ANON_ROLE=${DB_ANON_ROLE}
26 | - DEVELOPMENT=${DEVELOPMENT}
27 | - JWT_SECRET=${JWT_SECRET}
28 |
29 | volumes:
30 | - "./db/src:/docker-entrypoint-initdb.d"
31 | ### DB END
32 |
33 | # PostgREST instance, is responsible for communicating with the database
34 | # and providing a REST api, (almost) every request that is sent to the database goes through it
35 | postgrest:
36 | image: postgrest/postgrest
37 | ports:
38 | - "3000:3000"
39 | links:
40 | - db:db
41 | environment:
42 | - PGRST_DB_URI=postgres://${DB_USER}:${DB_PASS}@${DB_HOST}:${DB_PORT}/${DB_NAME}
43 | - PGRST_DB_SCHEMA=${DB_SCHEMA}
44 | - PGRST_DB_ANON_ROLE=${DB_ANON_ROLE}
45 | - PGRST_DB_POOL=${DB_POOL}
46 | - PGRST_JWT_SECRET=${JWT_SECRET}
47 | - PGRST_MAX_ROWS=${MAX_ROWS}
48 | - PGRST_PRE_REQUEST=${PRE_REQUEST}
49 | - PGRST_SERVER_PROXY_URI=${SERVER_PROXY_URI}
50 | depends_on:
51 | - db
52 |
53 | # OpenResty (Nginx + Lua) instance that sits in front of PostgREST.
54 | # All the requests coming into the system are first hitting this component.
55 | # After some processing/checks and transformation, the request is forwarded
56 | # to PostgREST down the stack.
57 | openresty:
58 | image: openresty/openresty:stretch
59 | command: ["/usr/bin/openresty", "-g", "daemon off; error_log /dev/stderr info;"]
60 | ports:
61 | - "8080:80"
62 | links:
63 | - db:db
64 | - postgrest:postgrest
65 | environment:
66 | - JWT_SECRET=${JWT_SECRET}
67 | - DEVELOPMENT=${DEVELOPMENT}
68 | - POSTGREST_HOST=${POSTGREST_HOST}
69 | - POSTGREST_PORT=${POSTGREST_PORT}
70 | - DB_HOST=${DB_HOST}
71 | - DB_PORT=${DB_PORT}
72 | - DB_NAME=${DB_NAME}
73 | - DB_SCHEMA=${DB_SCHEMA}
74 | - DB_USER=${DB_USER}
75 | - DB_PASS=${DB_PASS}
76 | volumes:
77 | - "./openresty/nginx:/usr/local/openresty/nginx/conf"
78 | - "./openresty/html:/usr/local/openresty/nginx/html"
79 | - "./openresty/lua:/usr/local/openresty/lualib/user_code"
80 | depends_on:
81 | - postgrest
82 |
83 | # pg-amqp-bridge instance is responsible for forwarding NOTIFY events in PostgreSQL
84 | # to RabbitMQ based on the BRIDGE_CHANNELS configuration
85 | pg_amqp_bridge:
86 | image: subzerocloud/pg-amqp-bridge
87 | links:
88 | - db
89 | - rabbitmq
90 | environment:
91 | - RUST_LOG=info # output forwarded messages
92 | - POSTGRESQL_URI=postgres://${DB_USER}:${DB_PASS}@${DB_HOST}:${DB_PORT}/${DB_NAME}
93 | - AMQP_URI=amqp://${RABBITMQ_DEFAULT_USER}:${RABBITMQ_DEFAULT_PASS}@rabbitmq//
94 | - BRIDGE_CHANNELS=events:amq.topic
95 | depends_on:
96 | - db
97 |
98 | # RabbitMQ instance can be used to consolidate events that originated in your database/application.
99 | # You can connect here with different consumers and take actions based on those events (like sending signup emails)
100 | rabbitmq:
101 | image: rabbitmq:3-management
102 | ports:
103 | - "5671:5671"
104 | - "5672:5672"
105 | - "15672:15672"
106 | environment:
107 | - RABBITMQ_DEFAULT_USER=${RABBITMQ_DEFAULT_USER}
108 | - RABBITMQ_DEFAULT_PASS=${RABBITMQ_DEFAULT_PASS}
109 |
110 |
--------------------------------------------------------------------------------
/openresty/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM openresty/openresty:stretch
2 |
3 | COPY nginx /usr/local/openresty/nginx/conf
4 | COPY html /usr/local/openresty/nginx
5 | COPY lua /usr/local/openresty/lualib/user_code
6 |
--------------------------------------------------------------------------------
/openresty/html/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | Welcome to PostgREST Starter Kit!
5 |
6 |
12 |
20 |
21 |
22 | PostgREST Starter Kit!
23 |
24 | Your API is up and running.
25 |
26 | API Endpoints
27 |
30 |
31 |
32 | Explore
33 | curl rest/todos?select=id,todo
34 |
35 | Support and Documentation
36 |
43 |
44 | Developed by subZero
45 |
46 |
47 |
--------------------------------------------------------------------------------
/openresty/lua/hooks.lua:
--------------------------------------------------------------------------------
1 | local utils = require 'utils'
2 | local cjson = require 'cjson'
3 |
4 | local function on_init()
5 | -- print "on_init called"
6 | end
7 |
8 | local function on_rest_request()
9 | -- print "on_rest_request called"
10 | end
11 |
12 | local function before_rest_response()
13 | -- print "before_rest_response called"
14 | -- postprocess response
15 | -- utils.set_body_postprocess_mode(utils.postprocess_modes.ALL)
16 | -- utils.set_body_postprocess_fn(function(body)
17 | -- local b = cjson.decode(body)
18 | -- b.custom_key = 'custom_value'
19 | -- return cjson.encode(b)
20 | -- end)
21 | end
22 |
23 | return {
24 | on_init = on_init,
25 | on_rest_request = on_rest_request,
26 | before_rest_response = before_rest_response,
27 | }
--------------------------------------------------------------------------------
/openresty/lua/init_phase.lua:
--------------------------------------------------------------------------------
1 | cjson = require('cjson')
2 | utils = require('utils')
3 |
4 | hooks = require("hooks")
5 |
6 | if type(hooks.on_init) == 'function' then
7 | hooks.on_init()
8 | end
9 |
10 |
--------------------------------------------------------------------------------
/openresty/lua/internal_rest_body_filter_phase.lua:
--------------------------------------------------------------------------------
1 | -- call body postprocess hook function
2 | -- to trigger this code, one would have these lines in one of
3 | -- the hooks (on_rest_request, before_rest_response)
4 | --[[
5 | utils.set_body_postprocess_mode(utils.postprocess_modes.ALL)
6 | utils.set_body_postprocess_fn(function(body)
7 | local b = cjson.decode(body)
8 | b.custom_key = 'custom_value'
9 | return cjson.encode(b)
10 | end)
11 | --]]
12 | local mode = utils.get_body_postprocess_mode()
13 | local fn = utils.get_body_postprocess_fn()
14 | if type(fn) == 'function' then
15 | if mode == utils.postprocess_modes.CHUNKS then
16 | ngx.arg[1], ngx.arg[2] = fn(ngx.arg[1], ngx.arg[2])
17 | end
18 |
19 | if mode == utils.postprocess_modes.ALL then
20 | local response_body = utils.buffer_response_body()
21 | if response_body then
22 | ngx.arg[1] = fn(response_body)
23 | end
24 | end
25 | end
26 |
--------------------------------------------------------------------------------
/openresty/lua/internal_rest_header_filter_phase.lua:
--------------------------------------------------------------------------------
1 | -- call hook function if present
2 | if type(hooks.before_rest_response) == 'function' then
3 | hooks.before_rest_response()
4 | end
--------------------------------------------------------------------------------
/openresty/lua/internal_rest_rewrite_phase.lua:
--------------------------------------------------------------------------------
1 | -- support /endpoint/:id url style
2 | local m, err = ngx.re.match(ngx.var.uri, "^/([a-z_]+)/([0-9]+)")
3 | if m then
4 | ngx.req.set_uri('/' .. m[1])
5 | local args = ngx.req.get_uri_args()
6 | args.id = 'eq.' .. m[2]
7 | ngx.req.set_uri_args(args)
8 | ngx.req.set_header('Accept', 'application/vnd.pgrst.object+json')
9 | end
10 |
11 | -- call hook function if present
12 | if type(hooks.on_rest_request) == 'function' then
13 | hooks.on_rest_request()
14 | end
15 |
--------------------------------------------------------------------------------
/openresty/lua/utils.lua:
--------------------------------------------------------------------------------
1 | -- response body postprocess mode
2 | local NONE = 0
3 | local CHUNKS = 1
4 | local ALL = 2
5 |
6 | local function set_body_postprocess_mode(mode)
7 | ngx.ctx.body_postprocess_mode = mode
8 | end
9 |
10 | local function get_body_postprocess_mode()
11 | return ngx.ctx.body_postprocess_mode
12 | end
13 |
14 | local function get_body_postprocess_fn()
15 | return ngx.ctx.body_postprocess_fn
16 | end
17 |
18 | local function set_body_postprocess_fn(fn)
19 | ngx.ctx.body_postprocess_fn = fn
20 | end
21 |
22 | local function buffer_response_body()
23 | local chunk, eof = ngx.arg[1], ngx.arg[2]
24 | local buffered = ngx.ctx.buffered_respose_body
25 | if not buffered then
26 | buffered = {}
27 | ngx.ctx.buffered_respose_body = buffered
28 | end
29 | if chunk ~= "" then
30 | buffered[#buffered + 1] = chunk
31 | ngx.arg[1] = nil
32 | end
33 | if eof then
34 | local response = table.concat(buffered)
35 | ngx.ctx.buffered_respose_body = nil
36 | --ngx.arg[1] = response
37 | ngx.arg[1] = nil
38 | return response
39 | end
40 | end
41 |
42 | return {
43 | postprocess_modes = {
44 | NONE = NONE,
45 | CHUNKS = CHUNKS,
46 | ALL = ALL
47 | },
48 | set_body_postprocess_mode = set_body_postprocess_mode,
49 | get_body_postprocess_mode = get_body_postprocess_mode,
50 | buffer_response_body = buffer_response_body,
51 | get_body_postprocess_fn = get_body_postprocess_fn,
52 | set_body_postprocess_fn = set_body_postprocess_fn
53 | }
--------------------------------------------------------------------------------
/openresty/nginx/cors.conf:
--------------------------------------------------------------------------------
1 | # set the needed cors headers
2 | if ($request_method = 'OPTIONS') {
3 | add_header 'Access-Control-Allow-Origin' $http_origin;
4 | add_header 'Access-Control-Allow-Methods' 'GET, POST, PATCH, DELETE, OPTIONS';
5 | add_header 'Access-Control-Allow-Headers' $http_access_control_request_headers;
6 | add_header 'Access-Control-Allow-Credentials' true;
7 | add_header 'Access-Control-Max-Age' 1728000;
8 | add_header 'Content-Type' 'text/plain charset=UTF-8';
9 | add_header 'Content-Length' 0;
10 | return 204;
11 | }
12 | if ($request_method = 'POST') {
13 | set_by_lua_block $dummy {
14 | ngx.header['Access-Control-Allow-Origin'] = ngx.var.http_origin;
15 | ngx.header['Access-Control-Allow-Methods'] = 'GET, POST, PATCH, DELETE, OPTIONS';
16 | ngx.header['Access-Control-Allow-Credentials'] = 'true';
17 | return true
18 | }
19 | }
--------------------------------------------------------------------------------
/openresty/nginx/nginx.conf:
--------------------------------------------------------------------------------
1 | # a list of env vars that nginx will make avaliable for configuration files and Lua code
2 | env POSTGREST_HOST;
3 | env POSTGREST_PORT;
4 | env JWT_SECRET;
5 | env DB_SCHEMA;
6 | env DB_HOST;
7 | env DB_PORT;
8 | env DB_NAME;
9 | env DB_USER;
10 | env DB_PASS;
11 | env DEVELOPMENT;
12 |
13 | worker_processes 1;
14 | events {
15 | worker_connections 1024;
16 | }
17 |
18 | http {
19 |
20 | # set search paths for pure Lua external libraries (';;' is the default path):
21 | lua_package_path '${prefix}../lualib/user_code/?.lua;;';
22 | init_by_lua_file '../lualib/user_code/init_phase.lua';
23 |
24 | # a shorter log format for development
25 | log_format development '[$time_local] "$request" $status $body_bytes_sent "$request_time ms"';
26 |
27 | resolver 127.0.0.11 ipv6=off;
28 |
29 | server {
30 | listen 80 default_server;
31 | server_name _;
32 | charset utf-8;
33 | # uninitialized_variable_warn off;
34 |
35 |
36 | #depending the env (production/development) switch between log formats
37 | set_by_lua_block $development { return os.getenv('DEVELOPMENT') or "0" }
38 | set $log_development 0;
39 | set $log_production 0;
40 | if ($development = "1") {
41 | set $log_development 1;
42 | }
43 | if ($development = "0") {
44 | set $log_production 1;
45 | }
46 | access_log logs/access.log combined if=$log_production;
47 | access_log logs/access.log development if=$log_development;
48 |
49 | # enable gzip compression
50 | gzip on;
51 | gzip_disable "msie6";
52 | gzip_vary on;
53 | gzip_proxied any;
54 | gzip_comp_level 6;
55 | gzip_buffers 16 8k;
56 | gzip_http_version 1.1;
57 | gzip_types text/plain text/css application/json application/vnd.pgrst.object+json application/x-javascript text/xml application/xml application/xml+rss text/javascript;
58 |
59 | include rest.conf;
60 |
61 | # this is the last location in the chain that points to html directory
62 | # this is where the files of your frontend application would go (html/javascript/css)
63 | location / {
64 | root html;
65 | index index.html index.htm;
66 | }
67 | }
68 | }
69 |
--------------------------------------------------------------------------------
/openresty/nginx/rest.conf:
--------------------------------------------------------------------------------
1 | # variable used by lua code, needs to match line below
2 | set $rest_prefix "/rest";
3 |
4 | # user facing location for the rest api
5 | location /rest {
6 | include cors.conf;
7 |
8 | # rewrite for the main internal location
9 | rewrite ^/rest/(.*)$ /internal/rest/$1;
10 | }
11 |
12 | # main internal location that will farward requests to PostgREST
13 | location /internal/rest/ {
14 | # this location is only avalable through internal redirects
15 | internal;
16 | default_type application/json;
17 |
18 | rewrite_by_lua_file '../lualib/user_code/internal_rest_rewrite_phase.lua';
19 | header_filter_by_lua_file '../lualib/user_code/internal_rest_header_filter_phase.lua';
20 | body_filter_by_lua_file '../lualib/user_code/internal_rest_body_filter_phase.lua';
21 |
22 | proxy_set_header Accept-Encoding ""; #force postgrest not to gzip the output
23 | proxy_set_header Connection ""; #optimise communication with upstream (keep alive)
24 | proxy_set_header Host $host;
25 | proxy_http_version 1.1;
26 | set_by_lua_block $postgrest_host { return os.getenv('POSTGREST_HOST') or "0" }
27 | set_by_lua_block $postgrest_port { return os.getenv('POSTGREST_PORT') or "0" }
28 | proxy_pass http://$postgrest_host:$postgrest_port; # Reverse proxy to your PostgREST
29 |
30 | # Rewrite the Content-Location header to match our location
31 | proxy_hide_header Content-Location;
32 | set_by_lua_block $rest_prefix { return ngx.var.rest_prefix or "/rest" } # this needs to match user facing location path
33 | more_set_headers 'Content-Location: $rest_prefix$upstream_http_content_location';
34 |
35 | # Debug Info
36 | if ($development = "1") {
37 | more_set_headers 'Request-Time: $request_time';
38 | }
39 |
40 | # this needs to be at the end
41 | rewrite /internal/rest(.+) $1 break;
42 | }
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "starter-kit",
3 | "dependencies": {},
4 | "devDependencies": {
5 | "babel-core": "^6.24.0",
6 | "babel-preset-latest": "^6.24.1",
7 | "dotenv": "4.0.0",
8 | "jsonwebtoken": "^8.3.0",
9 | "mocha": "^6.2.0",
10 | "should": "^11.2.0",
11 | "supertest": "^3.0.0"
12 | },
13 | "scripts": {
14 | "test_rest": "mocha --no-timeouts --require babel-core/register ./tests/rest/",
15 | "test_db": "node tests/bin/test_db.js",
16 | "test": "npm run test_db && npm run test_rest"
17 | },
18 | "author": "ruslan.talpa@subzero.cloud",
19 | "license": "ISC"
20 | }
21 |
--------------------------------------------------------------------------------
/tests/bin/test_db.js:
--------------------------------------------------------------------------------
1 | const spawn = require('child_process').spawn;
2 | require('dotenv').config();
3 | const PG_MAJOR_VERSION = process.env.PG_VERSION.replace(/\..*/,'');
4 | spawn('docker', [
5 | 'run',
6 | '-i',
7 | '-t',
8 | '--rm',
9 | '--name', 'pgtap',
10 | '--net', `${process.env.COMPOSE_PROJECT_NAME}_default`,
11 | '--link', `${process.env.COMPOSE_PROJECT_NAME}_db_1:db`,
12 | '-v', `${process.cwd()}/tests/db/:/test`,
13 | '-e', `HOST=${process.env.DB_HOST}`,
14 | '-e', `DATABASE=${process.env.DB_NAME}`,
15 | '-e', `USER=${process.env.SUPER_USER}`,
16 | '-e', `PASSWORD=${process.env.SUPER_USER_PASSWORD}`, `subzerocloud/pgtap:pg${PG_MAJOR_VERSION}`,
17 | ],
18 | { stdio: 'inherit' });
--------------------------------------------------------------------------------
/tests/db/README.md:
--------------------------------------------------------------------------------
1 | To run the tests in this directory do the following
2 | - change to the root of the project folder
3 | - bring the system up using docker-compose
4 | - run the command below
5 |
6 | ```shell
7 | ( \
8 | source .env && \
9 | docker run -i -t --rm --name pgtap \
10 | --network ${COMPOSE_PROJECT_NAME}_default \
11 | --link ${COMPOSE_PROJECT_NAME}_db_1:db \
12 | -v $(pwd)/tests/db/:/test \
13 | -e HOST=$DB_HOST \
14 | -e DATABASE=$DB_NAME \
15 | -e USER=$SUPER_USER \
16 | -e PASSWORD=$SUPER_USER_PASSWORD \
17 | lren/pgtap:0.96.0-2 \
18 | )
19 | ```
20 |
--------------------------------------------------------------------------------
/tests/db/rls.sql:
--------------------------------------------------------------------------------
1 | begin;
2 | select * from no_plan();
3 |
4 | SELECT schema_privs_are(
5 | 'api', 'webuser', ARRAY['USAGE'],
6 | 'authenticated users should have usage privilege of the api schema'
7 | );
8 |
9 |
10 | -- switch to a anonymous application user
11 | set local role anonymous;
12 | set request.jwt.claim.role = 'anonymous';
13 |
14 | select set_eq(
15 | 'select id from api.todos',
16 | array[ 1, 3, 6 ],
17 | 'only public todos are visible to anonymous users'
18 | );
19 |
20 |
21 | -- switch to a specific application user
22 | set local role webuser;
23 | set request.jwt.claim.role = 'webuser';
24 | set request.jwt.claim.user_id = '1'; --alice
25 |
26 | select set_eq(
27 | 'select id from api.todos where mine = true',
28 | array[ 1, 2, 3 ],
29 | 'can see all his todos'
30 | );
31 |
32 | select set_eq(
33 | 'select id from api.todos',
34 | array[ 1, 2, 3, 6 ],
35 | 'can see his todos and public ones'
36 | );
37 |
38 |
39 |
40 | select * from finish();
41 | rollback;
42 |
--------------------------------------------------------------------------------
/tests/db/simple.sql:
--------------------------------------------------------------------------------
1 | begin;
2 | select * from no_plan();
3 |
4 | select has_schema('information_schema');
5 |
6 | select has_view('information_schema', 'routines', 'has routines information_schema.routines view');
7 |
8 | select has_column('information_schema', 'routines', 'specific_name', 'has information_schema.routines.specific_name column');
9 |
10 | select * from finish();
11 | rollback;
12 |
--------------------------------------------------------------------------------
/tests/db/structure.sql:
--------------------------------------------------------------------------------
1 | begin;
2 | select * from no_plan();
3 |
4 | select * from check_test(
5 | views_are('api', array['todos'], 'tables present' ),
6 | true,
7 | 'all views are present in api schema',
8 | 'tables present',
9 | ''
10 | );
11 |
12 | select * from check_test(
13 | functions_are('api', array['login', 'signup', 'refresh_token', 'me'], 'functions present' ),
14 | true,
15 | 'all functions are present in api schema',
16 | 'functions present',
17 | ''
18 | );
19 |
20 | select * from finish();
21 | rollback;
22 |
--------------------------------------------------------------------------------
/tests/rest/auth.js:
--------------------------------------------------------------------------------
1 | import { rest_service, resetdb } from './common'
2 | import { should } from 'should'
3 |
4 | describe('auth', function () {
5 | before(function (done) { resetdb(); done() })
6 | after(function (done) { resetdb(); done() })
7 |
8 | it('login', function (done) {
9 | rest_service()
10 | .post('/rpc/login')
11 | .set('Accept', 'application/vnd.pgrst.object+json')
12 | .send({
13 | email: 'alice@email.com',
14 | password: 'pass'
15 | })
16 | .expect('Content-Type', /json/)
17 | .expect(r => {
18 | //console.log(r.body)
19 | r.body.me.email.should.equal('alice@email.com')
20 | }).expect(200, done)
21 | })
22 |
23 | it('me', function (done) {
24 | rest_service()
25 | .post('/rpc/me')
26 | .set('Accept', 'application/vnd.pgrst.object+json')
27 | .withRole('webuser')
28 | .send({})
29 | .expect('Content-Type', /json/)
30 | .expect(r => {
31 | //console.log(r.body)
32 | r.body.email.should.equal('alice@email.com')
33 | }).expect(200, done)
34 | })
35 |
36 | it('refresh_token', function (done) {
37 | rest_service()
38 | .post('/rpc/refresh_token')
39 | .set('Accept', 'application/vnd.pgrst.object+json')
40 | .withRole('webuser')
41 | .send({})
42 | .expect('Content-Type', /json/)
43 | .expect(r => {
44 | //console.log(r.body)
45 | r.body.length.should.above(0)
46 | }).expect(200, done)
47 | })
48 |
49 | it('signup', function (done) {
50 | rest_service()
51 | .post('/rpc/signup')
52 | .set('Accept', 'application/vnd.pgrst.object+json')
53 | .send({
54 | name: 'John Doe',
55 | email: 'john@email.com',
56 | password: 'pass'
57 | })
58 | .expect('Content-Type', /json/)
59 |
60 | .expect(r => {
61 | //console.log(r.body)
62 | r.body.me.email.should.equal('john@email.com')
63 | }).expect(200, done)
64 | })
65 | })
66 |
--------------------------------------------------------------------------------
/tests/rest/common.js:
--------------------------------------------------------------------------------
1 | import jsonwebtoken from 'jsonwebtoken'
2 | import request from 'supertest'
3 | import { config } from 'dotenv'
4 | import { spawnSync } from 'child_process'
5 | // var execSync = require('child_process').execSync;
6 |
7 | config()// .env file vars added to process.env
8 |
9 | const COMPOSE_PROJECT_NAME = process.env.COMPOSE_PROJECT_NAME
10 | const POSTGRES_USER = process.env.POSTGRES_USER
11 | const POSTGRES_PASSWORD = process.env.POSTGRES_PASSWORD
12 | const SUPER_USER = process.env.SUPER_USER
13 | const SUPER_USER_PASSWORD = process.env.SUPER_USER_PASSWORD
14 |
15 | const DB_HOST = process.env.DB_HOST
16 | const DB_NAME = process.env.DB_NAME
17 | const PG = `${COMPOSE_PROJECT_NAME}_db_1`
18 |
19 | const psql_version = spawnSync('psql', ['--version'])
20 | const have_psql = (psql_version.stdout && psql_version.stdout.toString('utf8').trim().length > 0)
21 |
22 | export function rest_service () {
23 | return request(process.env.SERVER_PROXY_URI)
24 | }
25 |
26 | export function resetdb () {
27 | let pg
28 | if (have_psql) {
29 | var env = Object.create(process.env)
30 | env.PGPASSWORD = SUPER_USER_PASSWORD
31 | pg = spawnSync('psql', ['-h', 'localhost', '-U', SUPER_USER, DB_NAME, '-f', process.env.PWD + '/db/src/sample_data/reset.sql'], { env: env })
32 | } else {
33 | pg = spawnSync('docker', ['exec', PG, 'psql', '-U', SUPER_USER, DB_NAME, '-f', 'docker-entrypoint-initdb.d/sample_data/reset.sql'])
34 | }
35 | if (pg.status !== 0) {
36 | throw new Error(`Could not reset database in rest tests. Error = ${pg.stderr.toString()}`)
37 | }
38 | }
39 |
40 | request.Test.prototype.withRole = function (role) {
41 | if (typeof role !== 'string') {
42 | throw new TypeError(`The role must be given as a string`)
43 | }
44 |
45 | let payload = {
46 | user_id: 1,
47 | role,
48 | // Pretend that the JWT was issued 30 seconds ago in the past
49 | iat: Math.floor(Date.now() / 1000) - 30
50 | }
51 |
52 | let jwt = jsonwebtoken.sign(payload, process.env.JWT_SECRET)
53 |
54 | return this.set('Authorization', `Bearer ${jwt}`)
55 | }
56 |
--------------------------------------------------------------------------------
/tests/rest/read.js:
--------------------------------------------------------------------------------
1 | import { rest_service, resetdb } from './common'
2 | import should from 'should'
3 |
4 | describe('read', function () {
5 | before(function (done) { resetdb(); done() })
6 | after(function (done) { resetdb(); done() })
7 |
8 | it('basic', function (done) {
9 | rest_service()
10 | .get('/todos?select=id,todo')
11 | .expect('Content-Type', /json/)
12 | .expect(200, done)
13 | .expect(r => {
14 | r.body.length.should.equal(3)
15 | r.body[0].id.should.equal(1)
16 | })
17 | })
18 |
19 | it('by primary key', function (done) {
20 | rest_service()
21 | .get('/todos/1?select=id,todo')
22 | .expect(200, done)
23 | .expect(r => {
24 | r.body.id.should.equal(1)
25 | r.body.todo.should.equal('item_1')
26 | })
27 | })
28 | })
29 |
--------------------------------------------------------------------------------
/tests/rest/simple.js:
--------------------------------------------------------------------------------
1 | import { rest_service } from './common'
2 |
3 | describe('root endpoint', function () {
4 | it('returns json', function (done) {
5 | rest_service()
6 | .get('/')
7 | .expect('Content-Type', /json/)
8 | .expect(200, done)
9 | })
10 | })
11 |
--------------------------------------------------------------------------------