├── .babelrc ├── .env ├── .gitattributes ├── .gitignore ├── LICENSE.txt ├── README.md ├── db └── src │ ├── api │ ├── login.sql │ ├── me.sql │ ├── refresh_token.sql │ ├── schema.sql │ ├── signup.sql │ └── todos.sql │ ├── authorization │ ├── privileges.sql │ └── roles.sql │ ├── data │ ├── schema.sql │ ├── todo.sql │ └── user.sql │ ├── init.sh │ ├── init.sql │ ├── libs │ ├── pgjwt.sql │ ├── rabbitmq.sql │ ├── request.sql │ └── settings.sql │ └── sample_data │ ├── README.md │ ├── data.sql │ └── reset.sql ├── docker-compose.yml ├── openresty ├── Dockerfile ├── html │ └── index.html ├── lua │ ├── hooks.lua │ ├── init_phase.lua │ ├── internal_rest_body_filter_phase.lua │ ├── internal_rest_header_filter_phase.lua │ ├── internal_rest_rewrite_phase.lua │ └── utils.lua └── nginx │ ├── cors.conf │ ├── nginx.conf │ └── rest.conf ├── package.json └── tests ├── bin └── test_db.js ├── db ├── README.md ├── rls.sql ├── simple.sql └── structure.sql └── rest ├── auth.js ├── common.js ├── read.js └── simple.js /.babelrc: -------------------------------------------------------------------------------- 1 | { 2 | "presets": ["latest"] 3 | } -------------------------------------------------------------------------------- /.env: -------------------------------------------------------------------------------- 1 | # Docker specific configs 2 | # use only letters and numbers for the project name 3 | COMPOSE_PROJECT_NAME=postgreststarterkit 4 | 5 | 6 | # Global configs 7 | DEVELOPMENT=1 8 | JWT_SECRET=reallyreallyreallyreallyverysafe 9 | 10 | # DB connection details (used by all containers) 11 | # set PG_VERSION to match your production db major version 12 | PG_VERSION=11.2 13 | DB_HOST=db 14 | DB_PORT=5432 15 | DB_NAME=app 16 | DB_SCHEMA=api 17 | DB_USER=authenticator 18 | DB_PASS=authenticatorpass 19 | 20 | # OpenResty 21 | POSTGREST_HOST=postgrest 22 | POSTGREST_PORT=3000 23 | 24 | # PostgREST 25 | DB_ANON_ROLE=anonymous 26 | DB_POOL=10 27 | #MAX_ROWS= 28 | #PRE_REQUEST= 29 | SERVER_PROXY_URI=http://localhost:8080/rest 30 | 31 | # PostgreSQL container config 32 | # Use this to connect directly to the db running in the container 33 | SUPER_USER=superuser 34 | SUPER_USER_PASSWORD=superuserpass 35 | 36 | # RabbitMQ 37 | RABBITMQ_DEFAULT_USER=admin 38 | RABBITMQ_DEFAULT_PASS=adminpass 39 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | # Set the default behavior, in case people don't have core.autocrlf set. 2 | * text=auto 3 | # convert all bash scripts to run with linux line endings by default. 4 | *.sh text eol=lf 5 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | node_modules 2 | package-lock.json -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | The MIT License 2 | 3 | Copyright (c) 2017-present Ruslan Talpa, subZero Cloud LLC. 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in 13 | all copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | THE SOFTWARE. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # PostgREST Starter Kit 2 | 3 | Base project and tooling for authoring REST API backends with [PostgREST](https://postgrest.com). 4 | 5 | ![PostgREST Starter Kit](https://raw.githubusercontent.com/wiki/subzerocloud/postgrest-starter-kit/images/postgrest-starter-kit.gif "PostgREST Starter Kit") 6 | 7 | 8 | ## Purpose 9 | 10 | PostgREST enables a different way of building data driven API backends. It does "one thing well" and that is to provide you with a REST api over your database, however to build a complex production system that does things like talk to 3rd party systems, sends emails, implements real time updates for browsers, write integration tests, implement authentication, you need additional components. For this reason, some developers either submit feature requests that are not the scope of PostgREST or think of it just as a prototyping utility and not a powerful/flexible production component with excellent performance. This repository aims to be a starting point for all PostgREST based projects and bring all components together under a well defined structure. We also provide tooling that will aid you with iterating on your project and tools/scripts to enable a build pipeline to push everything to production. There are quite a few components in the stack but you can safely comment out pg_amqp_bridge/rabbitmq (or even openresty) instances in docker-compose.yml if you don't need the features/functionality they provide. 11 | 12 | ## PostgREST+ as a service 13 | Run your PostgREST instance in [subZero cloud](https://subzero.cloud/postgrest-plus.html) and get additional features to the OS version ( [free plan](https://subzero.cloud/pricing.html) available). 14 | 15 | Alternatively, deploy an [enhanced version](https://docs.subzero.cloud/postgrest-plus/) of PostgREST on your infrastructure using binary and docker distributions. 16 | 17 | ✓ **Fully Managed** — subZero automates every part of setup, running and scaling of PostgREST. Let your team focus on what they do best - building your product. Leave PostgREST management and monitoring to the experts.
18 | ✓ **Faster Queries** — Run an enhanced PostgREST version that uses prepared statements instead of inline queries. This results in up to 30% faster response times.
19 | ✓ **Custom Relations** — Define custom relations when automatic detection does not work. This allows you to use the powerful embedding feature even with the most complicated views
20 | ✓ **GraphQL API** — In addition to the REST API you get a GraphQL api with no additional coding. Leverage all the powerful tooling, libraries and integrations for GraphQL in your frontend.
21 | ✓ **Preconfigured Authentication** — Authenticate users with local email/password or using 3rd party OAuth 2.0 providers (google/facebook/github preconfigured)
22 | ✓ **Analytical queries** — Use `GROUP BY`, aggregate and window functions in your queries
23 | 24 | ## Features 25 | 26 | ✓ Cross-platform development on macOS, Windows or Linux inside [Docker](https://www.docker.com/)
27 | ✓ [PostgreSQL](https://www.postgresql.org/) database schema boilerplate with authentication and authorization flow
28 | ✓ [OpenResty](https://openresty.org/en/) configuration files for the reverse proxy
29 | ✓ [RabbitMQ](https://www.rabbitmq.com/) integration through [pg-amqp-bridge](https://github.com/subzerocloud/pg-amqp-bridge)
30 | ✓ [Lua](https://www.lua.org/) functions to hook into each stage of the HTTP request and add custom logic (integrate 3rd party systems)
31 | ✓ Debugging and live code reloading (sql/configs/lua) functionality using [subzero-cli](https://github.com/subzerocloud/subzero-cli)
32 | ✓ Full migration management (migration files are automatically created) through [subzero-cli](https://github.com/subzerocloud/subzero-cli)/[sqitch](http://sqitch.org/)/[apgdiff](https://github.com/subzerocloud/apgdiff)
33 | ✓ SQL unit test using [pgTAP](http://pgtap.org/)
34 | ✓ Integration tests with [SuperTest / Mocha](https://github.com/visionmedia/supertest)
35 | ✓ Docker files for building production images
36 | ✓ Community support on [Slack](https://slack.subzero.cloud/)
37 | ✓ Compatible with [subZero Starter Kit](https://github.com/subzerocloud/subzero-starter-kit) if you need a GraphQL API and a few [more features](https://github.com/subzerocloud/subzero-starter-kit#features) with no additional work
38 | 39 | 40 | ## Directory Layout 41 | 42 | ```bash 43 | . 44 | ├── db # Database schema source files and tests 45 | │ └── src # Schema definition 46 | │ ├── api # Api entities avaiable as REST endpoints 47 | │ ├── data # Definition of source tables that hold the data 48 | │ ├── libs # A collection modules of used throughout the code 49 | │ ├── authorization # Application level roles and their privileges 50 | │ ├── sample_data # A few sample rows 51 | │ └── init.sql # Schema definition entry point 52 | ├── openresty # Reverse proxy configurations and Lua code 53 | │ ├── lua # Application Lua code 54 | │ ├── nginx # Nginx configuration files 55 | │ ├── html # Static frontend files 56 | │ └── Dockerfile # Dockerfile definition for building production images 57 | ├── tests # Tests for all the components 58 | │ ├── db # pgTap tests for the db 59 | │ └── rest # REST interface tests 60 | ├── docker-compose.yml # Defines Docker services, networks and volumes 61 | └── .env # Project configurations 62 | 63 | ``` 64 | 65 | 66 | 67 | ## Installation 68 | 69 | ### Prerequisites 70 | * [Docker](https://www.docker.com) 71 | * [Node.js](https://nodejs.org/en/) 72 | * [subzero-cli](https://github.com/subzerocloud/subzero-cli#install) 73 | 74 | ### Create a New Project 75 | Click **[Use this template]** (green) button. 76 | Choose the name of your new repository, description and public/private state then click **[Create repository from template]** button. 77 | Check out the [step by step guide](https://docs.github.com/en/github/creating-cloning-and-archiving-repositories/creating-a-repository-from-a-template) if you encounter any problems. 78 | 79 | After this, clone the newly created repository to your computer. 80 | In the root folder of application, run the docker-compose command 81 | 82 | ```bash 83 | docker-compose up -d 84 | ``` 85 | 86 | The API server will become available at the following endpoints: 87 | 88 | - Frontend [http://localhost:8080/](http://localhost:8080/) 89 | - REST [http://localhost:8080/rest/](http://localhost:8080/rest/) 90 | 91 | Try a simple request 92 | 93 | ```bash 94 | curl http://localhost:8080/rest/todos?select=id,todo 95 | ``` 96 | 97 | ## Development workflow and debugging 98 | 99 | Execute `subzero dashboard` in the root of your project.
100 | After this step you can view the logs of all the stack components (SQL queries will also be logged) and 101 | if you edit a sql/conf/lua file in your project, the changes will immediately be applied. 102 | 103 | 104 | ## Testing 105 | 106 | The starter kit comes with a testing infrastructure setup. 107 | You can write pgTAP tests that run directly in your database, useful for testing the logic that resides in your database (user privileges, Row Level Security, stored procedures). 108 | Integration tests are written in JavaScript. 109 | 110 | Here is how you run them 111 | 112 | ```bash 113 | npm install # Install test dependencies 114 | npm test # Run all tests (db, rest) 115 | npm run test_db # Run pgTAP tests 116 | npm run test_rest # Run integration tests 117 | ``` 118 | 119 | ## Contributing 120 | 121 | Anyone and everyone is welcome to contribute. 122 | 123 | ## Support and Documentation 124 | * [Documentation](https://docs.subzero.cloud/postgrest-starter-kit/) 125 | * [PostgREST API Referance](https://postgrest.com/en/stable/api.html) 126 | * [PostgreSQL Manual](https://www.postgresql.org/docs/current/static/index.html) 127 | * [Slack](https://slack.subzero.cloud/) — Get help, share ideas and feedback 128 | * [GitHub Issues](https://github.com/subzerocloud/postgrest-starter-kit/issues) — Check open issues, send feature requests 129 | 130 | ## License 131 | 132 | Copyright © 2017-present subZero Cloud, LLC.
133 | This source code is licensed under [MIT](https://github.com/subzerocloud/postgrest-starter-kit/blob/master/LICENSE.txt) license
134 | The documentation to the project is licensed under the [CC BY-SA 4.0](http://creativecommons.org/licenses/by-sa/4.0/) license. 135 | 136 | -------------------------------------------------------------------------------- /db/src/api/login.sql: -------------------------------------------------------------------------------- 1 | 2 | create or replace function login(email text, password text) returns json as $$ 3 | declare 4 | usr record; 5 | begin 6 | 7 | select * from data."user" as u 8 | where u.email = $1 and u.password = public.crypt($2, u.password) 9 | INTO usr; 10 | 11 | if usr is NULL then 12 | raise exception 'invalid email/password'; 13 | else 14 | 15 | return json_build_object( 16 | 'me', json_build_object( 17 | 'id', usr.id, 18 | 'name', usr.name, 19 | 'email', usr.email, 20 | 'role', 'customer' 21 | ), 22 | 'token', pgjwt.sign( 23 | json_build_object( 24 | 'role', usr.role, 25 | 'user_id', usr.id, 26 | 'exp', extract(epoch from now())::integer + settings.get('jwt_lifetime')::int -- token expires in 1 hour 27 | ), 28 | settings.get('jwt_secret') 29 | ) 30 | ); 31 | end if; 32 | end 33 | $$ stable security definer language plpgsql; 34 | -- by default all functions are accessible to the public, we need to remove that and define our specific access rules 35 | revoke all privileges on function login(text, text) from public; -------------------------------------------------------------------------------- /db/src/api/me.sql: -------------------------------------------------------------------------------- 1 | create or replace function me() returns json as $$ 2 | declare 3 | usr record; 4 | begin 5 | 6 | select * from data."user" 7 | where id = request.user_id() 8 | into usr; 9 | 10 | return json_build_object( 11 | 'id', usr.id, 12 | 'name', usr.name, 13 | 'email', usr.email, 14 | 'role', usr.role 15 | ); 16 | end 17 | $$ stable security definer language plpgsql; 18 | 19 | revoke all privileges on function me() from public; 20 | -------------------------------------------------------------------------------- /db/src/api/refresh_token.sql: -------------------------------------------------------------------------------- 1 | create or replace function refresh_token() returns text as $$ 2 | declare 3 | usr record; 4 | token text; 5 | begin 6 | 7 | select * from data."user" as u 8 | where id = request.user_id() 9 | into usr; 10 | 11 | if usr is null then 12 | raise exception 'user not found'; 13 | else 14 | token := pgjwt.sign( 15 | json_build_object( 16 | 'role', usr.role, 17 | 'user_id', usr.id, 18 | 'exp', extract(epoch from now())::integer + settings.get('jwt_lifetime')::int -- token expires in 1 hour 19 | ), 20 | settings.get('jwt_secret') 21 | ); 22 | return token; 23 | end if; 24 | end 25 | $$ stable security definer language plpgsql; 26 | 27 | -- by default all functions are accessible to the public, we need to remove that and define our specific access rules 28 | revoke all privileges on function refresh_token() from public; 29 | -------------------------------------------------------------------------------- /db/src/api/schema.sql: -------------------------------------------------------------------------------- 1 | drop schema if exists api cascade; 2 | create schema api; 3 | set search_path = api, public; 4 | 5 | -- this role will be used as the owner of the views in the api schema 6 | -- it is needed for the definition of the RLS policies 7 | drop role if exists api; 8 | create role api; 9 | grant api to current_user; -- this is a workaround for RDS where the master user does not have SUPERUSER priviliges 10 | 11 | 12 | -- our endpoints 13 | \ir login.sql 14 | \ir refresh_token.sql 15 | \ir signup.sql 16 | \ir me.sql 17 | \ir todos.sql 18 | -------------------------------------------------------------------------------- /db/src/api/signup.sql: -------------------------------------------------------------------------------- 1 | create or replace function signup(name text, email text, password text) returns json as $$ 2 | declare 3 | usr record; 4 | begin 5 | insert into data."user" as u 6 | (name, email, password) values ($1, $2, $3) 7 | returning * 8 | into usr; 9 | 10 | return json_build_object( 11 | 'me', json_build_object( 12 | 'id', usr.id, 13 | 'name', usr.name, 14 | 'email', usr.email, 15 | 'role', 'customer' 16 | ), 17 | 'token', pgjwt.sign( 18 | json_build_object( 19 | 'role', usr.role, 20 | 'user_id', usr.id, 21 | 'exp', extract(epoch from now())::integer + settings.get('jwt_lifetime')::int -- token expires in 1 hour 22 | ), 23 | settings.get('jwt_secret') 24 | ) 25 | ); 26 | end 27 | $$ security definer language plpgsql; 28 | 29 | revoke all privileges on function signup(text, text, text) from public; 30 | -------------------------------------------------------------------------------- /db/src/api/todos.sql: -------------------------------------------------------------------------------- 1 | -- define the view which is just selecting everything from the underlying table 2 | -- although it looks like a user would see all the rows by looking just at this definition, 3 | -- the RLS policy defined on the underlying table attached to the view owner (api) 4 | -- will make sure only the appropriate roles will be revealed. 5 | -- notice how for the api we don't expose the owner_id column even though it exists and is used 6 | -- in the RLS policy, also, while out table name is "todo", singular, meant to symbolize a data type/model, 7 | -- the view is named "todos", plural, to match the rest conventions. 8 | create or replace view todos as 9 | select id, todo, private, (owner_id = request.user_id()) as mine from data.todo; 10 | alter view todos owner to api; -- it is important to set the correct owner to the RLS policy kicks in 11 | -------------------------------------------------------------------------------- /db/src/authorization/privileges.sql: -------------------------------------------------------------------------------- 1 | \echo # Loading roles privilege 2 | 3 | -- this file contains the privileges of all aplications roles to each database entity 4 | -- if it gets too long, you can split it one file per entity ore move the permissions 5 | -- to the file where you defined the entity 6 | 7 | -- specify which application roles can access this api (you'll probably list them all) 8 | grant usage on schema api to anonymous, webuser; 9 | 10 | -- set privileges to all the auth flow functions 11 | grant execute on function api.login(text,text) to anonymous; 12 | grant execute on function api.signup(text,text,text) to anonymous; 13 | grant execute on function api.me() to webuser; 14 | grant execute on function api.login(text,text) to webuser; 15 | grant execute on function api.refresh_token() to webuser; 16 | 17 | -- define the who can access todo model data 18 | -- enable RLS on the table holding the data 19 | alter table data.todo enable row level security; 20 | -- define the RLS policy controlling what rows are visible to a particular application user 21 | create policy todo_access_policy on data.todo to api 22 | using ( 23 | -- the authenticated users can see all his todo items 24 | -- notice how the rule changes based on the current user_id 25 | -- which is specific to each individual request 26 | (request.user_role() = 'webuser' and request.user_id() = owner_id) 27 | 28 | or 29 | -- everyone can see public todo 30 | (private = false) 31 | ) 32 | with check ( 33 | -- authenticated users can only update/delete their todos 34 | (request.user_role() = 'webuser' and request.user_id() = owner_id) 35 | ); 36 | 37 | 38 | -- give access to the view owner to this table 39 | grant select, insert, update, delete on data.todo to api; 40 | grant usage on data.todo_id_seq to webuser; 41 | 42 | 43 | -- While grants to the view owner and the RLS policy on the underlying table 44 | -- takes care of what rows the view can see, we still need to define what 45 | -- are the rights of our application user in regard to this api view. 46 | 47 | -- authenticated users can request/change all the columns for this view 48 | grant select, insert, update, delete on api.todos to webuser; 49 | 50 | -- anonymous users can only request specific columns from this view 51 | grant select (id, todo) on api.todos to anonymous; 52 | ------------------------------------------------------------------------------- 53 | -------------------------------------------------------------------------------- /db/src/authorization/roles.sql: -------------------------------------------------------------------------------- 1 | -- This file contains the definition of the applications specific roles 2 | -- the roles defined here should not be made owners of database entities (tables/views/...) 3 | 4 | \echo # Loading roles 5 | 6 | -- the role used by postgrest to connect to the database 7 | -- notice how this role does not have any privileges attached specifically to it 8 | -- it can only switch to other roles 9 | drop role if exists :authenticator; 10 | create role :"authenticator" with login password :'authenticator_pass'; 11 | 12 | -- this is an application level role 13 | -- requests that are not authenticated will be executed with this role's privileges 14 | drop role if exists :"anonymous"; 15 | create role :"anonymous"; 16 | grant :"anonymous" to :"authenticator"; 17 | 18 | -- role for the main application user accessing the api 19 | drop role if exists webuser; 20 | create role webuser; 21 | grant webuser to :"authenticator"; 22 | -------------------------------------------------------------------------------- /db/src/data/schema.sql: -------------------------------------------------------------------------------- 1 | drop schema if exists data cascade; 2 | create schema data; 3 | set search_path = data, public; 4 | 5 | -- import our application models 6 | \ir user.sql 7 | \ir todo.sql 8 | -------------------------------------------------------------------------------- /db/src/data/todo.sql: -------------------------------------------------------------------------------- 1 | create table todo ( 2 | id serial primary key, 3 | todo text not null, 4 | private boolean default true, 5 | owner_id int references "user"(id) default request.user_id() 6 | ); 7 | 8 | -- attach the trigger to send events to rabbitmq 9 | -- there is a 8000 bytes hard limit on the message payload size (PG NOTIFY) so it's better not to send data that is not used 10 | -- on_row_change call can take the following forms 11 | -- on_row_change() - send all columns 12 | -- on_row_change('{"include":["id"]}'::json) - send only the listed columns 13 | -- on_row_change('{"exclude":["bigcolumn"]}'::json) - exclude listed columns from the payload 14 | 15 | create trigger send_todo_change_event 16 | after insert or update or delete on todo 17 | for each row execute procedure rabbitmq.on_row_change('{"include":["id","todo"]}'); -------------------------------------------------------------------------------- /db/src/data/user.sql: -------------------------------------------------------------------------------- 1 | create type user_role as enum ('webuser'); 2 | create table "user" ( 3 | id serial primary key, 4 | name text not null, 5 | email text not null unique, 6 | "password" text not null, 7 | "role" user_role not null default 'webuser', 8 | 9 | check (length(name)>2), 10 | check (email ~* '^[A-Za-z0-9._%-]+@[A-Za-z0-9.-]+[.][A-Za-z]+$') 11 | ); 12 | 13 | create or replace function encrypt_pass() returns trigger as $$ 14 | begin 15 | if new.password is not null then 16 | new.password = public.crypt(new.password, public.gen_salt('bf')); 17 | end if; 18 | return new; 19 | end 20 | $$ language plpgsql; 21 | 22 | create trigger user_encrypt_pass_trigger 23 | before insert or update on "user" 24 | for each row 25 | execute procedure encrypt_pass(); 26 | 27 | -- attach the trigger to send events to rabbitmq 28 | -- there is a 8000 bytes hard limit on the message payload size (PG NOTIFY) so it's better not to send data that is not used 29 | -- on_row_change call can take the following forms 30 | -- on_row_change() - send all columns 31 | -- on_row_change('{"include":["id"]}'::json) - send only the listed columns 32 | -- on_row_change('{"exclude":["bigcolumn"]}'::json) - exclude listed columns from the payload 33 | 34 | create trigger send_user_change_event 35 | after insert or update or delete on "user" 36 | for each row execute procedure rabbitmq.on_row_change('{"include":["id","name","email","role"]}'); -------------------------------------------------------------------------------- /db/src/init.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | CUSTOM_CONFIG=$(cat <> /var/lib/postgresql/data/postgresql.conf 22 | fi -------------------------------------------------------------------------------- /db/src/init.sql: -------------------------------------------------------------------------------- 1 | -- some setting to make the output less verbose 2 | \set QUIET on 3 | \set ON_ERROR_STOP on 4 | set client_min_messages to warning; 5 | 6 | -- load some variables from the env 7 | \setenv base_dir :DIR 8 | \set base_dir `if [ $base_dir != ":"DIR ]; then echo $base_dir; else echo "/docker-entrypoint-initdb.d"; fi` 9 | \set anonymous `echo $DB_ANON_ROLE` 10 | \set authenticator `echo $DB_USER` 11 | \set authenticator_pass `echo $DB_PASS` 12 | \set jwt_secret `echo $JWT_SECRET` 13 | \set quoted_jwt_secret '\'' :jwt_secret '\'' 14 | 15 | 16 | \echo # Loading database definition 17 | begin; 18 | create extension if not exists pgcrypto; 19 | 20 | \echo # Loading dependencies 21 | 22 | -- functions for storing different settins in a table 23 | \ir libs/settings.sql 24 | 25 | -- functions for reading different http request properties exposed by PostgREST 26 | \ir libs/request.sql 27 | 28 | -- functions for sending messages to RabbitMQ entities 29 | \ir libs/rabbitmq.sql 30 | 31 | -- functions for JWT token generation in the database context 32 | \ir libs/pgjwt.sql 33 | 34 | -- save app settings (they are storred in the settings.secrets table) 35 | select settings.set('jwt_secret', :quoted_jwt_secret); 36 | select settings.set('jwt_lifetime', '3600'); 37 | 38 | 39 | \echo # Loading application definitions 40 | 41 | -- private schema where all tables will be defined 42 | -- you can use othere names besides "data" or even spread the tables 43 | -- between different schemas. The schema name "data" is just a convention 44 | \ir data/schema.sql 45 | 46 | -- entities inside this schema (which should be only views and stored procedures) will be 47 | -- exposed as API endpoints. Access to them however is still governed by the 48 | -- privileges defined for the current PostgreSQL role making the requests 49 | \ir api/schema.sql 50 | 51 | 52 | \echo # Loading roles and privilege settings 53 | \ir authorization/roles.sql 54 | \ir authorization/privileges.sql 55 | 56 | \echo # Loading sample data 57 | \ir sample_data/data.sql 58 | 59 | 60 | commit; 61 | \echo # ========================================== 62 | -------------------------------------------------------------------------------- /db/src/libs/pgjwt.sql: -------------------------------------------------------------------------------- 1 | -- addapted from https://github.com/michelp/pgjwt 2 | -- license follows 3 | 4 | -- The MIT License (MIT) 5 | 6 | -- Copyright (c) 2016 Michel Pelletier 7 | 8 | -- Permission is hereby granted, free of charge, to any person obtaining a copy 9 | -- of this software and associated documentation files (the "Software"), to deal 10 | -- in the Software without restriction, including without limitation the rights 11 | -- to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 12 | -- copies of the Software, and to permit persons to whom the Software is 13 | -- furnished to do so, subject to the following conditions: 14 | 15 | -- The above copyright notice and this permission notice shall be included in all 16 | -- copies or substantial portions of the Software. 17 | 18 | -- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 | -- IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 | -- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 21 | -- AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 22 | -- LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 23 | -- OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 24 | -- SOFTWARE. 25 | 26 | 27 | create extension if not exists pgcrypto; 28 | drop schema if exists pgjwt cascade; 29 | create schema pgjwt; 30 | set search_path to pgjwt, public; 31 | 32 | CREATE OR REPLACE FUNCTION url_encode(data bytea) RETURNS text LANGUAGE sql AS $$ 33 | SELECT translate(encode(data, 'base64'), E'+/=\n', '-_'); 34 | $$; 35 | 36 | 37 | CREATE OR REPLACE FUNCTION url_decode(data text) RETURNS bytea LANGUAGE sql AS $$ 38 | WITH t AS (SELECT translate(data, '-_', '+/')), 39 | rem AS (SELECT length((SELECT * FROM t)) % 4) -- compute padding size 40 | SELECT decode( 41 | (SELECT * FROM t) || 42 | CASE WHEN (SELECT * FROM rem) > 0 43 | THEN repeat('=', (4 - (SELECT * FROM rem))) 44 | ELSE '' END, 45 | 'base64'); 46 | $$; 47 | 48 | 49 | CREATE OR REPLACE FUNCTION algorithm_sign(signables text, secret text, algorithm text) 50 | RETURNS text LANGUAGE sql AS $$ 51 | WITH 52 | alg AS ( 53 | SELECT CASE 54 | WHEN algorithm = 'HS256' THEN 'sha256' 55 | WHEN algorithm = 'HS384' THEN 'sha384' 56 | WHEN algorithm = 'HS512' THEN 'sha512' 57 | ELSE '' END) -- hmac throws error 58 | SELECT pgjwt.url_encode(public.hmac(signables, secret, (select * FROM alg))); 59 | $$; 60 | 61 | 62 | CREATE OR REPLACE FUNCTION sign(payload json, secret text, algorithm text DEFAULT 'HS256') 63 | RETURNS text LANGUAGE sql AS $$ 64 | WITH 65 | header AS ( 66 | SELECT pgjwt.url_encode(convert_to('{"alg":"' || algorithm || '","typ":"JWT"}', 'utf8')) 67 | ), 68 | payload AS ( 69 | SELECT pgjwt.url_encode(convert_to(payload::text, 'utf8')) 70 | ), 71 | signables AS ( 72 | SELECT (SELECT * FROM header) || '.' || (SELECT * FROM payload) 73 | ) 74 | SELECT 75 | (SELECT * FROM signables) 76 | || '.' || 77 | pgjwt.algorithm_sign((SELECT * FROM signables), secret, algorithm); 78 | $$; 79 | 80 | 81 | CREATE OR REPLACE FUNCTION verify(token text, secret text, algorithm text DEFAULT 'HS256') 82 | RETURNS table(header json, payload json, valid boolean) LANGUAGE sql AS $$ 83 | SELECT 84 | convert_from(pgjwt.url_decode(r[1]), 'utf8')::json AS header, 85 | convert_from(pgjwt.url_decode(r[2]), 'utf8')::json AS payload, 86 | r[3] = pgjwt.algorithm_sign(r[1] || '.' || r[2], secret, algorithm) AS valid 87 | FROM regexp_split_to_array(token, '\.') r; 88 | $$; 89 | 90 | 91 | SET search_path TO public; 92 | -------------------------------------------------------------------------------- /db/src/libs/rabbitmq.sql: -------------------------------------------------------------------------------- 1 | drop schema if exists rabbitmq cascade; 2 | create schema rabbitmq; 3 | grant usage on schema rabbitmq to public; 4 | 5 | create or replace function rabbitmq.send_message( 6 | channel text, 7 | routing_key text, 8 | message text) returns void as $$ 9 | 10 | select pg_notify( 11 | channel, 12 | routing_key || '|' || message 13 | ); 14 | $$ stable language sql; 15 | 16 | create or replace function rabbitmq.on_row_change() returns trigger as $$ 17 | declare 18 | routing_key text; 19 | row jsonb; 20 | config jsonb; 21 | excluded_columns text[]; 22 | col text; 23 | begin 24 | routing_key := 'row_change' 25 | '.table-'::text || TG_TABLE_NAME::text || 26 | '.event-'::text || TG_OP::text; 27 | if (TG_OP = 'DELETE') then 28 | row := row_to_json(old)::jsonb; 29 | elsif (TG_OP = 'UPDATE') then 30 | row := row_to_json(new)::jsonb; 31 | elsif (TG_OP = 'INSERT') then 32 | row := row_to_json(new)::jsonb; 33 | end if; 34 | 35 | -- decide what row columns to send based on the config parameter 36 | -- there is a 8000 byte hard limit on the payload size so sending many big columns is not possible 37 | if ( TG_NARGS = 1 ) then 38 | config := TG_ARGV[0]; 39 | if (config ? 'include') then 40 | --excluded_columns := ARRAY(SELECT unnest(jsonb_object_keys(row)::text[]) EXCEPT SELECT unnest( array(select jsonb_array_elements_text(config->'include')) )); 41 | -- this is a diff between two arrays 42 | excluded_columns := array( 43 | -- array of all row columns 44 | select unnest( 45 | array(select jsonb_object_keys(row)) 46 | ) 47 | except 48 | -- array of included columns 49 | select unnest( 50 | array(select jsonb_array_elements_text(config->'include')) 51 | ) 52 | ); 53 | end if; 54 | 55 | if (config ? 'exclude') then 56 | excluded_columns := array(select jsonb_array_elements_text(config->'exclude')); 57 | end if; 58 | 59 | if (current_setting('server_version_num')::int >= 100000) then 60 | row := row - excluded_columns; 61 | else 62 | FOREACH col IN ARRAY excluded_columns 63 | LOOP 64 | row := row - col; 65 | END LOOP; 66 | end if; 67 | end if; 68 | 69 | perform rabbitmq.send_message('events', routing_key, row::text); 70 | return null; 71 | end; 72 | $$ stable language plpgsql; 73 | 74 | -------------------------------------------------------------------------------- /db/src/libs/request.sql: -------------------------------------------------------------------------------- 1 | drop schema if exists request cascade; 2 | create schema request; 3 | grant usage on schema request to public; 4 | 5 | create or replace function request.env_var(v text) returns text as $$ 6 | select current_setting(v, true); 7 | $$ stable language sql; 8 | 9 | create or replace function request.jwt_claim(c text) returns text as $$ 10 | select request.env_var('request.jwt.claim.' || c); 11 | $$ stable language sql; 12 | 13 | create or replace function request.cookie(c text) returns text as $$ 14 | select request.env_var('request.cookie.' || c); 15 | $$ stable language sql; 16 | 17 | create or replace function request.header(h text) returns text as $$ 18 | select request.env_var('request.header.' || h); 19 | $$ stable language sql; 20 | 21 | create or replace function request.user_id() returns int as $$ 22 | select 23 | case coalesce(request.jwt_claim('user_id'),'') 24 | when '' then 0 25 | else request.jwt_claim('user_id')::int 26 | end 27 | $$ stable language sql; 28 | 29 | create or replace function request.user_role() returns text as $$ 30 | select request.jwt_claim('role')::text; 31 | $$ stable language sql; 32 | -------------------------------------------------------------------------------- /db/src/libs/settings.sql: -------------------------------------------------------------------------------- 1 | drop schema if exists settings cascade; 2 | create schema settings; 3 | 4 | create table settings.secrets ( 5 | key text primary key, 6 | value text not null 7 | ); 8 | 9 | 10 | create or replace function settings.get(text) returns text as $$ 11 | select value from settings.secrets where key = $1 12 | $$ security definer stable language sql; 13 | 14 | create or replace function settings.set(text, text) returns void as $$ 15 | insert into settings.secrets (key, value) 16 | values ($1, $2) 17 | on conflict (key) do update 18 | set value = $2; 19 | $$ security definer language sql; 20 | -------------------------------------------------------------------------------- /db/src/sample_data/README.md: -------------------------------------------------------------------------------- 1 | Sample data in this directory was generated using `datafiller` utility 2 | you can install it like so 3 | 4 | ```shell 5 | wget -q https://raw.githubusercontent.com/memsql/datafiller/master/datafiller -O /usr/local/bin/datafiller \ 6 | && chmod +x /usr/local/bin/datafiller 7 | ``` 8 | 9 | after it's installed, you can generate sample data using a comand like the one below 10 | 11 | ```shell 12 | ( \ 13 | cd db/src/data && \ 14 | cat users.sql items.sql subitems.sql | datafiller --size=2 > ../sample_data/data.sql \ 15 | ) 16 | ``` -------------------------------------------------------------------------------- /db/src/sample_data/data.sql: -------------------------------------------------------------------------------- 1 | 2 | -- This file is generated by the DataFiller free software. 3 | -- This software comes without any warranty whatsoever. 4 | -- Use at your own risk. Beware, this script may destroy your data! 5 | -- License is GPLv3, see http://www.gnu.org/copyleft/gpl.html 6 | -- Get latest version from http://www.coelho.net/datafiller.html 7 | 8 | -- Data generated by: /usr/local/bin/datafiller 9 | -- Version 2.0.1-dev (r832 on 2015-11-01) 10 | -- For postgresql on 2017-05-03T12:34:39.879063 (UTC) 11 | -- 12 | -- fill table data.user (2) 13 | \echo # filling table data.user (2) 14 | COPY data.user (id,name,email,"password") FROM STDIN (FREEZE ON); 15 | 1 alice alice@email.com pass 16 | 2 bob bob@email.com pass 17 | \. 18 | -- 19 | -- fill table data.todo (6) 20 | \echo # filling table data.todo (6) 21 | COPY data.todo (id,todo,private,owner_id) FROM STDIN (FREEZE ON); 22 | 1 item_1 FALSE 1 23 | 2 item_2 TRUE 1 24 | 3 item_3 FALSE 1 25 | 4 item_4 TRUE 2 26 | 5 item_5 TRUE 2 27 | 6 item_6 FALSE 2 28 | \. 29 | -- 30 | -- restart sequences 31 | ALTER SEQUENCE data.user_id_seq RESTART WITH 3; 32 | ALTER SEQUENCE data.todo_id_seq RESTART WITH 7; 33 | -- 34 | -- analyze modified tables 35 | ANALYZE data.user; 36 | ANALYZE data.todo; 37 | -------------------------------------------------------------------------------- /db/src/sample_data/reset.sql: -------------------------------------------------------------------------------- 1 | BEGIN; 2 | \set QUIET on 3 | \set ON_ERROR_STOP on 4 | set client_min_messages to warning; 5 | truncate data.todo restart identity cascade; 6 | truncate data.user restart identity cascade; 7 | \ir data.sql 8 | COMMIT; -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | services: 3 | 4 | ### DB START 5 | # This is the database to which the all the other components in the stack will connect and interact with 6 | # (but mostly it's PostgREST that is going to be responsible for the bulk of the db traffic) 7 | # Having the database in a container is very convenient in development but in production you will 8 | # use a separate database instance, like Amazon RDS, i.e. in production this section will be 9 | # commented and in the .env file you will specify the ip of your separate database instance 10 | db: 11 | image: postgres:${PG_VERSION} 12 | ports: 13 | - "5432:5432" 14 | environment: 15 | # env vars specific to postgres image used on first boot 16 | - POSTGRES_USER=${SUPER_USER} 17 | - POSTGRES_PASSWORD=${SUPER_USER_PASSWORD} 18 | - POSTGRES_DB=${DB_NAME} 19 | # env vars useful for our sql scripts 20 | - SUPER_USER=${SUPER_USER} 21 | - SUPER_USER_PASSWORD=${SUPER_USER_PASSWORD} 22 | - DB_NAME=${DB_NAME} 23 | - DB_USER=${DB_USER} 24 | - DB_PASS=${DB_PASS} 25 | - DB_ANON_ROLE=${DB_ANON_ROLE} 26 | - DEVELOPMENT=${DEVELOPMENT} 27 | - JWT_SECRET=${JWT_SECRET} 28 | 29 | volumes: 30 | - "./db/src:/docker-entrypoint-initdb.d" 31 | ### DB END 32 | 33 | # PostgREST instance, is responsible for communicating with the database 34 | # and providing a REST api, (almost) every request that is sent to the database goes through it 35 | postgrest: 36 | image: postgrest/postgrest 37 | ports: 38 | - "3000:3000" 39 | links: 40 | - db:db 41 | environment: 42 | - PGRST_DB_URI=postgres://${DB_USER}:${DB_PASS}@${DB_HOST}:${DB_PORT}/${DB_NAME} 43 | - PGRST_DB_SCHEMA=${DB_SCHEMA} 44 | - PGRST_DB_ANON_ROLE=${DB_ANON_ROLE} 45 | - PGRST_DB_POOL=${DB_POOL} 46 | - PGRST_JWT_SECRET=${JWT_SECRET} 47 | - PGRST_MAX_ROWS=${MAX_ROWS} 48 | - PGRST_PRE_REQUEST=${PRE_REQUEST} 49 | - PGRST_SERVER_PROXY_URI=${SERVER_PROXY_URI} 50 | depends_on: 51 | - db 52 | 53 | # OpenResty (Nginx + Lua) instance that sits in front of PostgREST. 54 | # All the requests coming into the system are first hitting this component. 55 | # After some processing/checks and transformation, the request is forwarded 56 | # to PostgREST down the stack. 57 | openresty: 58 | image: openresty/openresty:stretch 59 | command: ["/usr/bin/openresty", "-g", "daemon off; error_log /dev/stderr info;"] 60 | ports: 61 | - "8080:80" 62 | links: 63 | - db:db 64 | - postgrest:postgrest 65 | environment: 66 | - JWT_SECRET=${JWT_SECRET} 67 | - DEVELOPMENT=${DEVELOPMENT} 68 | - POSTGREST_HOST=${POSTGREST_HOST} 69 | - POSTGREST_PORT=${POSTGREST_PORT} 70 | - DB_HOST=${DB_HOST} 71 | - DB_PORT=${DB_PORT} 72 | - DB_NAME=${DB_NAME} 73 | - DB_SCHEMA=${DB_SCHEMA} 74 | - DB_USER=${DB_USER} 75 | - DB_PASS=${DB_PASS} 76 | volumes: 77 | - "./openresty/nginx:/usr/local/openresty/nginx/conf" 78 | - "./openresty/html:/usr/local/openresty/nginx/html" 79 | - "./openresty/lua:/usr/local/openresty/lualib/user_code" 80 | depends_on: 81 | - postgrest 82 | 83 | # pg-amqp-bridge instance is responsible for forwarding NOTIFY events in PostgreSQL 84 | # to RabbitMQ based on the BRIDGE_CHANNELS configuration 85 | pg_amqp_bridge: 86 | image: subzerocloud/pg-amqp-bridge 87 | links: 88 | - db 89 | - rabbitmq 90 | environment: 91 | - RUST_LOG=info # output forwarded messages 92 | - POSTGRESQL_URI=postgres://${DB_USER}:${DB_PASS}@${DB_HOST}:${DB_PORT}/${DB_NAME} 93 | - AMQP_URI=amqp://${RABBITMQ_DEFAULT_USER}:${RABBITMQ_DEFAULT_PASS}@rabbitmq// 94 | - BRIDGE_CHANNELS=events:amq.topic 95 | depends_on: 96 | - db 97 | 98 | # RabbitMQ instance can be used to consolidate events that originated in your database/application. 99 | # You can connect here with different consumers and take actions based on those events (like sending signup emails) 100 | rabbitmq: 101 | image: rabbitmq:3-management 102 | ports: 103 | - "5671:5671" 104 | - "5672:5672" 105 | - "15672:15672" 106 | environment: 107 | - RABBITMQ_DEFAULT_USER=${RABBITMQ_DEFAULT_USER} 108 | - RABBITMQ_DEFAULT_PASS=${RABBITMQ_DEFAULT_PASS} 109 | 110 | -------------------------------------------------------------------------------- /openresty/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM openresty/openresty:stretch 2 | 3 | COPY nginx /usr/local/openresty/nginx/conf 4 | COPY html /usr/local/openresty/nginx 5 | COPY lua /usr/local/openresty/lualib/user_code 6 | -------------------------------------------------------------------------------- /openresty/html/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | Welcome to PostgREST Starter Kit! 5 | 6 | 12 | 20 | 21 | 22 |

PostgREST Starter Kit!

23 | 24 |

Your API is up and running.

25 | 26 |

API Endpoints

27 | 30 | 31 | 32 |

Explore

33 | curl rest/todos?select=id,todo 34 | 35 |

Support and Documentation

36 | 43 | 44 | Developed by subZero 45 | 46 | 47 | -------------------------------------------------------------------------------- /openresty/lua/hooks.lua: -------------------------------------------------------------------------------- 1 | local utils = require 'utils' 2 | local cjson = require 'cjson' 3 | 4 | local function on_init() 5 | -- print "on_init called" 6 | end 7 | 8 | local function on_rest_request() 9 | -- print "on_rest_request called" 10 | end 11 | 12 | local function before_rest_response() 13 | -- print "before_rest_response called" 14 | -- postprocess response 15 | -- utils.set_body_postprocess_mode(utils.postprocess_modes.ALL) 16 | -- utils.set_body_postprocess_fn(function(body) 17 | -- local b = cjson.decode(body) 18 | -- b.custom_key = 'custom_value' 19 | -- return cjson.encode(b) 20 | -- end) 21 | end 22 | 23 | return { 24 | on_init = on_init, 25 | on_rest_request = on_rest_request, 26 | before_rest_response = before_rest_response, 27 | } -------------------------------------------------------------------------------- /openresty/lua/init_phase.lua: -------------------------------------------------------------------------------- 1 | cjson = require('cjson') 2 | utils = require('utils') 3 | 4 | hooks = require("hooks") 5 | 6 | if type(hooks.on_init) == 'function' then 7 | hooks.on_init() 8 | end 9 | 10 | -------------------------------------------------------------------------------- /openresty/lua/internal_rest_body_filter_phase.lua: -------------------------------------------------------------------------------- 1 | -- call body postprocess hook function 2 | -- to trigger this code, one would have these lines in one of 3 | -- the hooks (on_rest_request, before_rest_response) 4 | --[[ 5 | utils.set_body_postprocess_mode(utils.postprocess_modes.ALL) 6 | utils.set_body_postprocess_fn(function(body) 7 | local b = cjson.decode(body) 8 | b.custom_key = 'custom_value' 9 | return cjson.encode(b) 10 | end) 11 | --]] 12 | local mode = utils.get_body_postprocess_mode() 13 | local fn = utils.get_body_postprocess_fn() 14 | if type(fn) == 'function' then 15 | if mode == utils.postprocess_modes.CHUNKS then 16 | ngx.arg[1], ngx.arg[2] = fn(ngx.arg[1], ngx.arg[2]) 17 | end 18 | 19 | if mode == utils.postprocess_modes.ALL then 20 | local response_body = utils.buffer_response_body() 21 | if response_body then 22 | ngx.arg[1] = fn(response_body) 23 | end 24 | end 25 | end 26 | -------------------------------------------------------------------------------- /openresty/lua/internal_rest_header_filter_phase.lua: -------------------------------------------------------------------------------- 1 | -- call hook function if present 2 | if type(hooks.before_rest_response) == 'function' then 3 | hooks.before_rest_response() 4 | end -------------------------------------------------------------------------------- /openresty/lua/internal_rest_rewrite_phase.lua: -------------------------------------------------------------------------------- 1 | -- support /endpoint/:id url style 2 | local m, err = ngx.re.match(ngx.var.uri, "^/([a-z_]+)/([0-9]+)") 3 | if m then 4 | ngx.req.set_uri('/' .. m[1]) 5 | local args = ngx.req.get_uri_args() 6 | args.id = 'eq.' .. m[2] 7 | ngx.req.set_uri_args(args) 8 | ngx.req.set_header('Accept', 'application/vnd.pgrst.object+json') 9 | end 10 | 11 | -- call hook function if present 12 | if type(hooks.on_rest_request) == 'function' then 13 | hooks.on_rest_request() 14 | end 15 | -------------------------------------------------------------------------------- /openresty/lua/utils.lua: -------------------------------------------------------------------------------- 1 | -- response body postprocess mode 2 | local NONE = 0 3 | local CHUNKS = 1 4 | local ALL = 2 5 | 6 | local function set_body_postprocess_mode(mode) 7 | ngx.ctx.body_postprocess_mode = mode 8 | end 9 | 10 | local function get_body_postprocess_mode() 11 | return ngx.ctx.body_postprocess_mode 12 | end 13 | 14 | local function get_body_postprocess_fn() 15 | return ngx.ctx.body_postprocess_fn 16 | end 17 | 18 | local function set_body_postprocess_fn(fn) 19 | ngx.ctx.body_postprocess_fn = fn 20 | end 21 | 22 | local function buffer_response_body() 23 | local chunk, eof = ngx.arg[1], ngx.arg[2] 24 | local buffered = ngx.ctx.buffered_respose_body 25 | if not buffered then 26 | buffered = {} 27 | ngx.ctx.buffered_respose_body = buffered 28 | end 29 | if chunk ~= "" then 30 | buffered[#buffered + 1] = chunk 31 | ngx.arg[1] = nil 32 | end 33 | if eof then 34 | local response = table.concat(buffered) 35 | ngx.ctx.buffered_respose_body = nil 36 | --ngx.arg[1] = response 37 | ngx.arg[1] = nil 38 | return response 39 | end 40 | end 41 | 42 | return { 43 | postprocess_modes = { 44 | NONE = NONE, 45 | CHUNKS = CHUNKS, 46 | ALL = ALL 47 | }, 48 | set_body_postprocess_mode = set_body_postprocess_mode, 49 | get_body_postprocess_mode = get_body_postprocess_mode, 50 | buffer_response_body = buffer_response_body, 51 | get_body_postprocess_fn = get_body_postprocess_fn, 52 | set_body_postprocess_fn = set_body_postprocess_fn 53 | } -------------------------------------------------------------------------------- /openresty/nginx/cors.conf: -------------------------------------------------------------------------------- 1 | # set the needed cors headers 2 | if ($request_method = 'OPTIONS') { 3 | add_header 'Access-Control-Allow-Origin' $http_origin; 4 | add_header 'Access-Control-Allow-Methods' 'GET, POST, PATCH, DELETE, OPTIONS'; 5 | add_header 'Access-Control-Allow-Headers' $http_access_control_request_headers; 6 | add_header 'Access-Control-Allow-Credentials' true; 7 | add_header 'Access-Control-Max-Age' 1728000; 8 | add_header 'Content-Type' 'text/plain charset=UTF-8'; 9 | add_header 'Content-Length' 0; 10 | return 204; 11 | } 12 | if ($request_method = 'POST') { 13 | set_by_lua_block $dummy { 14 | ngx.header['Access-Control-Allow-Origin'] = ngx.var.http_origin; 15 | ngx.header['Access-Control-Allow-Methods'] = 'GET, POST, PATCH, DELETE, OPTIONS'; 16 | ngx.header['Access-Control-Allow-Credentials'] = 'true'; 17 | return true 18 | } 19 | } -------------------------------------------------------------------------------- /openresty/nginx/nginx.conf: -------------------------------------------------------------------------------- 1 | # a list of env vars that nginx will make avaliable for configuration files and Lua code 2 | env POSTGREST_HOST; 3 | env POSTGREST_PORT; 4 | env JWT_SECRET; 5 | env DB_SCHEMA; 6 | env DB_HOST; 7 | env DB_PORT; 8 | env DB_NAME; 9 | env DB_USER; 10 | env DB_PASS; 11 | env DEVELOPMENT; 12 | 13 | worker_processes 1; 14 | events { 15 | worker_connections 1024; 16 | } 17 | 18 | http { 19 | 20 | # set search paths for pure Lua external libraries (';;' is the default path): 21 | lua_package_path '${prefix}../lualib/user_code/?.lua;;'; 22 | init_by_lua_file '../lualib/user_code/init_phase.lua'; 23 | 24 | # a shorter log format for development 25 | log_format development '[$time_local] "$request" $status $body_bytes_sent "$request_time ms"'; 26 | 27 | resolver 127.0.0.11 ipv6=off; 28 | 29 | server { 30 | listen 80 default_server; 31 | server_name _; 32 | charset utf-8; 33 | # uninitialized_variable_warn off; 34 | 35 | 36 | #depending the env (production/development) switch between log formats 37 | set_by_lua_block $development { return os.getenv('DEVELOPMENT') or "0" } 38 | set $log_development 0; 39 | set $log_production 0; 40 | if ($development = "1") { 41 | set $log_development 1; 42 | } 43 | if ($development = "0") { 44 | set $log_production 1; 45 | } 46 | access_log logs/access.log combined if=$log_production; 47 | access_log logs/access.log development if=$log_development; 48 | 49 | # enable gzip compression 50 | gzip on; 51 | gzip_disable "msie6"; 52 | gzip_vary on; 53 | gzip_proxied any; 54 | gzip_comp_level 6; 55 | gzip_buffers 16 8k; 56 | gzip_http_version 1.1; 57 | gzip_types text/plain text/css application/json application/vnd.pgrst.object+json application/x-javascript text/xml application/xml application/xml+rss text/javascript; 58 | 59 | include rest.conf; 60 | 61 | # this is the last location in the chain that points to html directory 62 | # this is where the files of your frontend application would go (html/javascript/css) 63 | location / { 64 | root html; 65 | index index.html index.htm; 66 | } 67 | } 68 | } 69 | -------------------------------------------------------------------------------- /openresty/nginx/rest.conf: -------------------------------------------------------------------------------- 1 | # variable used by lua code, needs to match line below 2 | set $rest_prefix "/rest"; 3 | 4 | # user facing location for the rest api 5 | location /rest { 6 | include cors.conf; 7 | 8 | # rewrite for the main internal location 9 | rewrite ^/rest/(.*)$ /internal/rest/$1; 10 | } 11 | 12 | # main internal location that will farward requests to PostgREST 13 | location /internal/rest/ { 14 | # this location is only avalable through internal redirects 15 | internal; 16 | default_type application/json; 17 | 18 | rewrite_by_lua_file '../lualib/user_code/internal_rest_rewrite_phase.lua'; 19 | header_filter_by_lua_file '../lualib/user_code/internal_rest_header_filter_phase.lua'; 20 | body_filter_by_lua_file '../lualib/user_code/internal_rest_body_filter_phase.lua'; 21 | 22 | proxy_set_header Accept-Encoding ""; #force postgrest not to gzip the output 23 | proxy_set_header Connection ""; #optimise communication with upstream (keep alive) 24 | proxy_set_header Host $host; 25 | proxy_http_version 1.1; 26 | set_by_lua_block $postgrest_host { return os.getenv('POSTGREST_HOST') or "0" } 27 | set_by_lua_block $postgrest_port { return os.getenv('POSTGREST_PORT') or "0" } 28 | proxy_pass http://$postgrest_host:$postgrest_port; # Reverse proxy to your PostgREST 29 | 30 | # Rewrite the Content-Location header to match our location 31 | proxy_hide_header Content-Location; 32 | set_by_lua_block $rest_prefix { return ngx.var.rest_prefix or "/rest" } # this needs to match user facing location path 33 | more_set_headers 'Content-Location: $rest_prefix$upstream_http_content_location'; 34 | 35 | # Debug Info 36 | if ($development = "1") { 37 | more_set_headers 'Request-Time: $request_time'; 38 | } 39 | 40 | # this needs to be at the end 41 | rewrite /internal/rest(.+) $1 break; 42 | } -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "starter-kit", 3 | "dependencies": {}, 4 | "devDependencies": { 5 | "babel-core": "^6.24.0", 6 | "babel-preset-latest": "^6.24.1", 7 | "dotenv": "4.0.0", 8 | "jsonwebtoken": "^8.3.0", 9 | "mocha": "^6.2.0", 10 | "should": "^11.2.0", 11 | "supertest": "^3.0.0" 12 | }, 13 | "scripts": { 14 | "test_rest": "mocha --no-timeouts --require babel-core/register ./tests/rest/", 15 | "test_db": "node tests/bin/test_db.js", 16 | "test": "npm run test_db && npm run test_rest" 17 | }, 18 | "author": "ruslan.talpa@subzero.cloud", 19 | "license": "ISC" 20 | } 21 | -------------------------------------------------------------------------------- /tests/bin/test_db.js: -------------------------------------------------------------------------------- 1 | const spawn = require('child_process').spawn; 2 | require('dotenv').config(); 3 | const PG_MAJOR_VERSION = process.env.PG_VERSION.replace(/\..*/,''); 4 | spawn('docker', [ 5 | 'run', 6 | '-i', 7 | '-t', 8 | '--rm', 9 | '--name', 'pgtap', 10 | '--net', `${process.env.COMPOSE_PROJECT_NAME}_default`, 11 | '--link', `${process.env.COMPOSE_PROJECT_NAME}_db_1:db`, 12 | '-v', `${process.cwd()}/tests/db/:/test`, 13 | '-e', `HOST=${process.env.DB_HOST}`, 14 | '-e', `DATABASE=${process.env.DB_NAME}`, 15 | '-e', `USER=${process.env.SUPER_USER}`, 16 | '-e', `PASSWORD=${process.env.SUPER_USER_PASSWORD}`, `subzerocloud/pgtap:pg${PG_MAJOR_VERSION}`, 17 | ], 18 | { stdio: 'inherit' }); -------------------------------------------------------------------------------- /tests/db/README.md: -------------------------------------------------------------------------------- 1 | To run the tests in this directory do the following 2 | - change to the root of the project folder 3 | - bring the system up using docker-compose 4 | - run the command below 5 | 6 | ```shell 7 | ( \ 8 | source .env && \ 9 | docker run -i -t --rm --name pgtap \ 10 | --network ${COMPOSE_PROJECT_NAME}_default \ 11 | --link ${COMPOSE_PROJECT_NAME}_db_1:db \ 12 | -v $(pwd)/tests/db/:/test \ 13 | -e HOST=$DB_HOST \ 14 | -e DATABASE=$DB_NAME \ 15 | -e USER=$SUPER_USER \ 16 | -e PASSWORD=$SUPER_USER_PASSWORD \ 17 | lren/pgtap:0.96.0-2 \ 18 | ) 19 | ``` 20 | -------------------------------------------------------------------------------- /tests/db/rls.sql: -------------------------------------------------------------------------------- 1 | begin; 2 | select * from no_plan(); 3 | 4 | SELECT schema_privs_are( 5 | 'api', 'webuser', ARRAY['USAGE'], 6 | 'authenticated users should have usage privilege of the api schema' 7 | ); 8 | 9 | 10 | -- switch to a anonymous application user 11 | set local role anonymous; 12 | set request.jwt.claim.role = 'anonymous'; 13 | 14 | select set_eq( 15 | 'select id from api.todos', 16 | array[ 1, 3, 6 ], 17 | 'only public todos are visible to anonymous users' 18 | ); 19 | 20 | 21 | -- switch to a specific application user 22 | set local role webuser; 23 | set request.jwt.claim.role = 'webuser'; 24 | set request.jwt.claim.user_id = '1'; --alice 25 | 26 | select set_eq( 27 | 'select id from api.todos where mine = true', 28 | array[ 1, 2, 3 ], 29 | 'can see all his todos' 30 | ); 31 | 32 | select set_eq( 33 | 'select id from api.todos', 34 | array[ 1, 2, 3, 6 ], 35 | 'can see his todos and public ones' 36 | ); 37 | 38 | 39 | 40 | select * from finish(); 41 | rollback; 42 | -------------------------------------------------------------------------------- /tests/db/simple.sql: -------------------------------------------------------------------------------- 1 | begin; 2 | select * from no_plan(); 3 | 4 | select has_schema('information_schema'); 5 | 6 | select has_view('information_schema', 'routines', 'has routines information_schema.routines view'); 7 | 8 | select has_column('information_schema', 'routines', 'specific_name', 'has information_schema.routines.specific_name column'); 9 | 10 | select * from finish(); 11 | rollback; 12 | -------------------------------------------------------------------------------- /tests/db/structure.sql: -------------------------------------------------------------------------------- 1 | begin; 2 | select * from no_plan(); 3 | 4 | select * from check_test( 5 | views_are('api', array['todos'], 'tables present' ), 6 | true, 7 | 'all views are present in api schema', 8 | 'tables present', 9 | '' 10 | ); 11 | 12 | select * from check_test( 13 | functions_are('api', array['login', 'signup', 'refresh_token', 'me'], 'functions present' ), 14 | true, 15 | 'all functions are present in api schema', 16 | 'functions present', 17 | '' 18 | ); 19 | 20 | select * from finish(); 21 | rollback; 22 | -------------------------------------------------------------------------------- /tests/rest/auth.js: -------------------------------------------------------------------------------- 1 | import { rest_service, resetdb } from './common' 2 | import { should } from 'should' 3 | 4 | describe('auth', function () { 5 | before(function (done) { resetdb(); done() }) 6 | after(function (done) { resetdb(); done() }) 7 | 8 | it('login', function (done) { 9 | rest_service() 10 | .post('/rpc/login') 11 | .set('Accept', 'application/vnd.pgrst.object+json') 12 | .send({ 13 | email: 'alice@email.com', 14 | password: 'pass' 15 | }) 16 | .expect('Content-Type', /json/) 17 | .expect(r => { 18 | //console.log(r.body) 19 | r.body.me.email.should.equal('alice@email.com') 20 | }).expect(200, done) 21 | }) 22 | 23 | it('me', function (done) { 24 | rest_service() 25 | .post('/rpc/me') 26 | .set('Accept', 'application/vnd.pgrst.object+json') 27 | .withRole('webuser') 28 | .send({}) 29 | .expect('Content-Type', /json/) 30 | .expect(r => { 31 | //console.log(r.body) 32 | r.body.email.should.equal('alice@email.com') 33 | }).expect(200, done) 34 | }) 35 | 36 | it('refresh_token', function (done) { 37 | rest_service() 38 | .post('/rpc/refresh_token') 39 | .set('Accept', 'application/vnd.pgrst.object+json') 40 | .withRole('webuser') 41 | .send({}) 42 | .expect('Content-Type', /json/) 43 | .expect(r => { 44 | //console.log(r.body) 45 | r.body.length.should.above(0) 46 | }).expect(200, done) 47 | }) 48 | 49 | it('signup', function (done) { 50 | rest_service() 51 | .post('/rpc/signup') 52 | .set('Accept', 'application/vnd.pgrst.object+json') 53 | .send({ 54 | name: 'John Doe', 55 | email: 'john@email.com', 56 | password: 'pass' 57 | }) 58 | .expect('Content-Type', /json/) 59 | 60 | .expect(r => { 61 | //console.log(r.body) 62 | r.body.me.email.should.equal('john@email.com') 63 | }).expect(200, done) 64 | }) 65 | }) 66 | -------------------------------------------------------------------------------- /tests/rest/common.js: -------------------------------------------------------------------------------- 1 | import jsonwebtoken from 'jsonwebtoken' 2 | import request from 'supertest' 3 | import { config } from 'dotenv' 4 | import { spawnSync } from 'child_process' 5 | // var execSync = require('child_process').execSync; 6 | 7 | config()// .env file vars added to process.env 8 | 9 | const COMPOSE_PROJECT_NAME = process.env.COMPOSE_PROJECT_NAME 10 | const POSTGRES_USER = process.env.POSTGRES_USER 11 | const POSTGRES_PASSWORD = process.env.POSTGRES_PASSWORD 12 | const SUPER_USER = process.env.SUPER_USER 13 | const SUPER_USER_PASSWORD = process.env.SUPER_USER_PASSWORD 14 | 15 | const DB_HOST = process.env.DB_HOST 16 | const DB_NAME = process.env.DB_NAME 17 | const PG = `${COMPOSE_PROJECT_NAME}_db_1` 18 | 19 | const psql_version = spawnSync('psql', ['--version']) 20 | const have_psql = (psql_version.stdout && psql_version.stdout.toString('utf8').trim().length > 0) 21 | 22 | export function rest_service () { 23 | return request(process.env.SERVER_PROXY_URI) 24 | } 25 | 26 | export function resetdb () { 27 | let pg 28 | if (have_psql) { 29 | var env = Object.create(process.env) 30 | env.PGPASSWORD = SUPER_USER_PASSWORD 31 | pg = spawnSync('psql', ['-h', 'localhost', '-U', SUPER_USER, DB_NAME, '-f', process.env.PWD + '/db/src/sample_data/reset.sql'], { env: env }) 32 | } else { 33 | pg = spawnSync('docker', ['exec', PG, 'psql', '-U', SUPER_USER, DB_NAME, '-f', 'docker-entrypoint-initdb.d/sample_data/reset.sql']) 34 | } 35 | if (pg.status !== 0) { 36 | throw new Error(`Could not reset database in rest tests. Error = ${pg.stderr.toString()}`) 37 | } 38 | } 39 | 40 | request.Test.prototype.withRole = function (role) { 41 | if (typeof role !== 'string') { 42 | throw new TypeError(`The role must be given as a string`) 43 | } 44 | 45 | let payload = { 46 | user_id: 1, 47 | role, 48 | // Pretend that the JWT was issued 30 seconds ago in the past 49 | iat: Math.floor(Date.now() / 1000) - 30 50 | } 51 | 52 | let jwt = jsonwebtoken.sign(payload, process.env.JWT_SECRET) 53 | 54 | return this.set('Authorization', `Bearer ${jwt}`) 55 | } 56 | -------------------------------------------------------------------------------- /tests/rest/read.js: -------------------------------------------------------------------------------- 1 | import { rest_service, resetdb } from './common' 2 | import should from 'should' 3 | 4 | describe('read', function () { 5 | before(function (done) { resetdb(); done() }) 6 | after(function (done) { resetdb(); done() }) 7 | 8 | it('basic', function (done) { 9 | rest_service() 10 | .get('/todos?select=id,todo') 11 | .expect('Content-Type', /json/) 12 | .expect(200, done) 13 | .expect(r => { 14 | r.body.length.should.equal(3) 15 | r.body[0].id.should.equal(1) 16 | }) 17 | }) 18 | 19 | it('by primary key', function (done) { 20 | rest_service() 21 | .get('/todos/1?select=id,todo') 22 | .expect(200, done) 23 | .expect(r => { 24 | r.body.id.should.equal(1) 25 | r.body.todo.should.equal('item_1') 26 | }) 27 | }) 28 | }) 29 | -------------------------------------------------------------------------------- /tests/rest/simple.js: -------------------------------------------------------------------------------- 1 | import { rest_service } from './common' 2 | 3 | describe('root endpoint', function () { 4 | it('returns json', function (done) { 5 | rest_service() 6 | .get('/') 7 | .expect('Content-Type', /json/) 8 | .expect(200, done) 9 | }) 10 | }) 11 | --------------------------------------------------------------------------------