├── .babelrc
├── .codeclimate.yml
├── .editorconfig
├── .env.development
├── .env.test
├── .eslintrc.yml
├── .github
└── FUNDING.yml
├── .gitignore
├── .graphqlconfig
├── .idea
├── jsLibraryMappings.xml
├── runConfigurations
│ ├── Server.xml
│ ├── emulate_cli.xml
│ └── emulate_cli_staging.xml
└── vcs.xml
├── .nvmrc
├── CHANGELOG.md
├── LICENSE
├── README.md
├── buildspec.yml
├── gql
└── querySchema.js
├── jest-preload.js
├── package.json
├── schema.graphql
├── secrets-development.yml
├── secrets-example.yml
├── secrets-test.yml
├── serverless.yml
├── src
├── authorizers
│ └── basic-auth.js
├── cli
│ ├── commandsHandler.js
│ └── index.js
├── constants.js
├── functions
│ ├── cache-query.js
│ ├── cache-query.test.js
│ ├── epsagon.js
│ ├── read-cache.js
│ ├── refresh-cache.js
│ ├── refresh-cache.test.js
│ ├── reset-cache.js
│ ├── reset-cache.test.js
│ └── status.js
└── utils
│ ├── apolloClient.js
│ ├── auth.js
│ ├── auth.test.js
│ ├── cache.js
│ ├── cache.test.js
│ ├── epsagon.js
│ ├── graphql.js
│ ├── graphql.test.js
│ ├── queries.test.js
│ ├── redis.js
│ ├── redis.test.js
│ └── timers.js
├── webpack.config.js
└── yarn.lock
/.babelrc:
--------------------------------------------------------------------------------
1 | {
2 | "plugins": [
3 | "source-map-support"
4 | ],
5 | "presets": [
6 | [
7 | "@babel/preset-env",
8 | {
9 | "targets": {
10 | "node": true
11 | }
12 | }
13 | ]
14 | ]
15 | }
16 |
--------------------------------------------------------------------------------
/.codeclimate.yml:
--------------------------------------------------------------------------------
1 | # XXX See https://docs.codeclimate.com/docs/advanced-configuration
2 | version: "2"
3 | checks:
4 | argument-count:
5 | enabled: true
6 | config:
7 | threshold: 4
8 | complex-logic:
9 | enabled: true
10 | config:
11 | threshold: 4
12 | file-lines:
13 | enabled: true
14 | config:
15 | threshold: 400 # 250 by default
16 | method-complexity:
17 | enabled: true
18 | config:
19 | threshold: 5
20 | method-count:
21 | enabled: true
22 | config:
23 | threshold: 20
24 | method-lines:
25 | enabled: true
26 | config:
27 | threshold: 100 # 25 by default
28 | nested-control-flow:
29 | enabled: true
30 | config:
31 | threshold: 4
32 | return-statements:
33 | enabled: true
34 | config:
35 | threshold: 4
36 |
37 | plugins:
38 | # eslint: # https://docs.codeclimate.com/docs/eslint
39 | # enabled: true
40 | # channel: "eslint-4" # Depends on installed ESLint version - See https://docs.codeclimate.com/docs/eslint#section-eslint-versions
41 | duplication: # https://docs.codeclimate.com/docs/duplication
42 | enabled: true
43 | config:
44 | languages:
45 | javascript:
46 | mass_threshold: 50 # See https://docs.codeclimate.com/docs/duplication#section-understand-the-engine
47 | fixme: # https://docs.codeclimate.com/docs/fixme
48 | enabled: true
49 | config:
50 | strings: # Skip "XXX" as we don't use it for things to fix but rather for highlighting comments (DX)
51 | - FIXME
52 | - BUG
53 | - TODO
54 | - HACK
55 | git-legal: # https://docs.codeclimate.com/docs/git-legal
56 | enabled: true
57 | # tslint: # https://docs.codeclimate.com/docs/tslint
58 | # enabled: true
59 | # config: tslint.json
60 |
61 | # See https://docs.codeclimate.com/docs/excluding-files-and-folders
62 | exclude_patterns:
63 | - "**/*.test.*"
64 | - "**/*.spec.*"
65 | - "src/svg/"
66 |
67 | # Default CC excluded paths:
68 | - "config/"
69 | - "db/"
70 | - "dist/"
71 | - "features/"
72 | - "**/node_modules/"
73 | - "script/"
74 | - "**/spec/"
75 | - "**/test/"
76 | - "**/tests/"
77 | - "**/vendor/"
78 | - "**/*.d.ts"
79 |
--------------------------------------------------------------------------------
/.editorconfig:
--------------------------------------------------------------------------------
1 | [*]
2 | charset=utf-8
3 | end_of_line=lf
4 | trim_trailing_whitespace=true
5 | insert_final_newline=true
6 | indent_style=space
7 | indent_size=2
8 |
9 | [{*.jhm,*.xslt,*.xul,*.rng,*.xsl,*.xsd,*.ant,*.tld,*.fxml,*.jrxml,*.xml,*.jnlp,*.wsdl}]
10 | indent_style=space
11 | indent_size=4
12 |
13 | [{.babelrc,.prettierrc,.stylelintrc,.eslintrc,jest.config,*.json,*.jsb3,*.jsb2,*.bowerrc,*.graphqlconfig}]
14 | indent_style=space
15 | indent_size=2
16 |
17 | [.editorconfig]
18 | indent_style=space
19 | indent_size=4
20 |
21 | [*.less]
22 | indent_style=space
23 | indent_size=2
24 |
25 | [{jshint.json,*.jshintrc}]
26 | indent_style=space
27 | indent_size=2
28 |
29 | [{*.jscs.json,*.jscsrc}]
30 | indent_style=space
31 | indent_size=2
32 |
33 | [{tsconfig.lib.json,tsconfig.spec.json,tsconfig.app.json,tsconfig.json,tsconfig.e2e.json}]
34 | indent_style=space
35 | indent_size=2
36 |
37 | [*.js.map]
38 | indent_style=space
39 | indent_size=2
40 |
41 | [*.ejs]
42 | indent_style=space
43 | indent_size=4
44 |
45 | [{.analysis_options,*.yml,*.yaml}]
46 | indent_style=space
47 | indent_size=2
48 |
49 | [*.md]
50 | indent_size = 4
51 |
--------------------------------------------------------------------------------
/.env.development:
--------------------------------------------------------------------------------
1 | # This file define environment variables used only in local development environment that will OVERRIDE env variables defined in serverless.yml
2 | # XXX Variables defined there will override variables defined in serverless.yml, useful when defining variables that are global to an environment and not specific per instance
3 |
4 | # Url where your Cache local instance is running
5 | # Required - For development/test environments only, handled through "serverless.yml" for other environments
6 | CACHE_BASE_URL=http://localhost:8085
7 |
--------------------------------------------------------------------------------
/.env.test:
--------------------------------------------------------------------------------
1 | # This file define environment variables used only in local test environment that will OVERRIDE env variables defined in serverless.yml
2 | # XXX Variables defined there will override variables defined in serverless.yml, useful when defining variables that are global to an environment and not specific per instance
3 |
4 | # Url where your Cache local instance is running
5 | # Required - For development/test environments only, handled through "serverless.yml" for other environments
6 | CACHE_BASE_URL=http://localhost:8085
7 |
--------------------------------------------------------------------------------
/.eslintrc.yml:
--------------------------------------------------------------------------------
1 | env:
2 | es6: true
3 | node: true
4 | extends:
5 | - airbnb-base
6 | - plugin:jest/recommended
7 | globals:
8 | Atomics: readonly
9 | SharedArrayBuffer: readonly
10 | parserOptions:
11 | ecmaVersion: 2018
12 | sourceType: module
13 | plugins:
14 | - jest
15 | rules: # See https://eslint.org/docs/rules
16 | semi:
17 | - error
18 | - always # Always put commas, to avoid multilines git diff when new lines are added
19 | quotes:
20 | - error
21 | - single # Prefer simple quotes
22 | - allowTemplateLiterals: true # Allow the use of `` instead of '' and don't try to replace it, even when `` isn't needed
23 | comma-spacing:
24 | - error
25 | - before: false
26 | after: true
27 | indent:
28 | - error
29 | - 2
30 | arrow-parens:
31 | - error
32 | - always
33 | max-len: 0 # Disable line length checks, because the IDE is already configured to warn about it, and it's a waste of time to check for lines that are too long, especially in comments (like this one!)
34 | strict: 'off'
35 | no-console: 2 # Shouldn't use "console", but "logger" instead
36 | allowArrowFunctions: 0
37 | no-unused-vars:
38 | - error
39 | - args: none # Allow to declare unused variables in function arguments, meant to be used later
40 | import/prefer-default-export: 0 # When there is only a single export from a module, don't enforce a default export, but rather let developer choose what's best
41 | no-else-return: 0 # Don't enforce, let developer choose. Sometimes we like to specifically use "return" for the sake of comprehensibility and avoid ambiguity
42 | no-underscore-dangle: 0 # Allow _ before/after variables and functions, convention for something meant to be "private"
43 | arrow-body-style: 0 # Don't enforce, let developer choose. Sometimes we like to specifically use "return" for ease of debugging and printing
44 | quote-props:
45 | - warn
46 | - consistent-as-needed # Enforce consistency with quotes on props, either all must be quoted, or all unquoted for a given object
47 | no-return-await: 0 # Useful before, but recent node.js enhancements make it useless on node 12+ (we use 10, but still, for consistency) - Read https://stackoverflow.com/questions/44806135/why-no-return-await-vs-const-x-await
48 | no-extra-boolean-cast: 0 # Don't enforce, let developer choose. Using "!!!" is sometimes useful (edge cases), and has a semantic value (dev intention)
49 | object-curly-newline:
50 | - warn
51 | - ObjectExpression:
52 | multiline: true
53 | minProperties: 5
54 | consistent: true
55 | ObjectPattern:
56 | multiline: true
57 | minProperties: 5
58 | consistent: true
59 | ImportDeclaration: never # Would conflict with WebStorm settings (WebStorm does the job better)
60 | ExportDeclaration:
61 | multiline: true
62 | minProperties: 5
63 | consistent: true
64 |
--------------------------------------------------------------------------------
/.github/FUNDING.yml:
--------------------------------------------------------------------------------
1 | liberapay: unlyEd
2 | github: [UnlyEd, Vadorequest]
3 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Created by https://www.gitignore.io/api/webstorm
2 |
3 | ### WebStorm ###
4 | # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm
5 | # Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
6 |
7 | # User-specific stuff
8 | .idea/**/workspace.xml
9 | .idea/**/tasks.xml
10 | .idea/**/usage.statistics.xml
11 | .idea/**/dictionaries
12 | .idea/**/shelf
13 |
14 | # Sensitive or high-churn files
15 | .idea/**/dataSources/
16 | .idea/**/dataSources.ids
17 | .idea/**/dataSources.local.xml
18 | .idea/**/sqlDataSources.xml
19 | .idea/**/dynamic.xml
20 | .idea/**/uiDesigner.xml
21 | .idea/**/dbnavigator.xml
22 |
23 | # Gradle
24 | .idea/**/gradle.xml
25 | .idea/**/libraries
26 |
27 | # Gradle and Maven with auto-import
28 | # When using Gradle or Maven with auto-import, you should exclude module files,
29 | # since they will be recreated, and may cause churn. Uncomment if using
30 | # auto-import.
31 | # .idea/modules.xml
32 | # .idea/*.iml
33 | # .idea/modules
34 |
35 | # CMake
36 | cmake-build-*/
37 |
38 | # Mongo Explorer plugin
39 | .idea/**/mongoSettings.xml
40 |
41 | # File-based project format
42 | *.iws
43 |
44 | # IntelliJ
45 | out/
46 |
47 | # mpeltonen/sbt-idea plugin
48 | .idea_modules/
49 |
50 | # JIRA plugin
51 | atlassian-ide-plugin.xml
52 |
53 | # Cursive Clojure plugin
54 | .idea/replstate.xml
55 |
56 | # Crashlytics plugin (for Android Studio and IntelliJ)
57 | com_crashlytics_export_strings.xml
58 | crashlytics.properties
59 | crashlytics-build.properties
60 | fabric.properties
61 |
62 | # Editor-based Rest Client
63 | .idea/httpRequests
64 |
65 | ### WebStorm Patch ###
66 | # Comment Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-215987721
67 |
68 | # XXX I don't like that patch, this project is meant to be forker/cloned and therefore renamed,
69 | # tracking those files messes up forks when this boilerplate's config is changed
70 | *.iml
71 | modules.xml
72 | .idea/misc.xml
73 | *.ipr
74 | $CACHE_FILE$
75 |
76 | # Don't track plugin config
77 | markdown-navigator.xml
78 |
79 | # Don't track code style, as forks may update them and we shouldn't enforce code style from the IDE
80 | .idea/codeStyles
81 |
82 | # Sonarlint plugin
83 | .idea/sonarlint
84 |
85 |
86 | # End of https://www.gitignore.io/api/webstorm
87 |
88 | ######################### CUSTOM/MANUAL #############################
89 |
90 | # See https://help.github.com/ignore-files/ for more about ignoring files.
91 |
92 | # IDE plugins
93 | .idea/markdown-navigator*/**
94 |
95 | # package directories
96 | node_modules
97 | jspm_packages
98 |
99 | # Serverless directories
100 | .serverless
101 | .webpack
102 | .next
103 | dist
104 |
105 | .DS_Store
106 | .sls-simulate-registry
107 |
108 | # Builds
109 | build
110 | .firebase
111 | coverage/
112 |
113 | # Sensitive values, do not share (development is for local personal use, not super sensitive but shouldn't be shared with the team)
114 | # XXX Feel free to track development/test/staging files if you wish so, it's usually not too sensitive if you split staging/production properly
115 | .env*
116 | !.env.development
117 | !.env.test
118 |
119 | secrets-*.yml
120 | !secrets-example.yml
121 | !secrets-development.yml
122 | !secrets-test.yml
123 |
124 | # Epsagon generated files
125 | epsagon_handlers/
126 |
--------------------------------------------------------------------------------
/.graphqlconfig:
--------------------------------------------------------------------------------
1 | {
2 | "schemaPath": "schema.graphql",
3 | "extensions": {
4 | "endpoints": {
5 | "staging": {
6 | "url": "https://api-eu-central-1.graphcms.com/v2/cjyi8gl5m00tm01e91polc50t/master",
7 | "introspect": true,
8 | "headers": {
9 | "user-agent": "JS GraphQL",
10 | "Authorization": "Bearer ${env:GRAPHCMS_TOKEN}"
11 | }
12 | },
13 | "production": {
14 | "url": "https://api-eu-central-1.graphcms.com/v2/cjyi8gl5m00tm01e91polc50t/master",
15 | "introspect": true,
16 | "headers": {
17 | "user-agent": "JS GraphQL",
18 | "Authorization": "Bearer ${env:GRAPHCMS_TOKEN}"
19 | }
20 | }
21 | }
22 | }
23 | }
24 |
--------------------------------------------------------------------------------
/.idea/jsLibraryMappings.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
--------------------------------------------------------------------------------
/.idea/runConfigurations/Server.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
18 |
19 |
20 |
--------------------------------------------------------------------------------
/.idea/runConfigurations/emulate_cli.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
19 |
20 |
21 |
--------------------------------------------------------------------------------
/.idea/runConfigurations/emulate_cli_staging.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
19 |
20 |
21 |
--------------------------------------------------------------------------------
/.idea/vcs.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/.nvmrc:
--------------------------------------------------------------------------------
1 | v10.15
2 |
--------------------------------------------------------------------------------
/CHANGELOG.md:
--------------------------------------------------------------------------------
1 | CHANGELOG
2 | ===
3 |
4 | - v2.2.1 - 2019-10-21
5 | - [Enhancement] Implement a better per-instance configuration, the previous one caused a lot of copy/paste duplicated config when managing many instances
6 | - v2.2.0 - 2019-10-21
7 | - [Feature] Allow per-instance configuration, backward-compatible but it's a whole new way of defining environment variables using secrets-staging.yml and secrets-production.yml instead of .env.staging and .env.production
8 | - v2.1.6 - 2019-10-02
9 | - [Enhancement] Handle CORS preflight request - Before today, it wasn't possible to send POST requests to the `/cache-query` endpoint due to CORS preflight check that wasn't allowed. We didn't previously encounter the issue because we were using the same domain/sub-domain. Now, it's possible to connect to the Cache from any domain
10 | - v2.1.5 - 2019-09-25
11 | - [Enhancement] Improve stability/resilience of /reset-cache endpoint, better handle redis errors - Doesn't spawn one redis connection per call anymore, more resilient to calls received in a batch as well (forces 1 reset max every ~10s)
12 | - v2.1.4 - 2019-09-12
13 | - [Enhancement] Fix `emulate:demo:production` script - _Wasn't using production environment_
14 | - v2.1.3 - 2019-09-12
15 | - [Enhancement] Check for `NetworkStatus.ready` instead of `queryResults.errors` (as status already handle those, clearer/proper) when analysing apollo client query result
16 | - v2.1.2 - 2019-09-09
17 | - [Enhancement] Reuse redis connections for the same lambda container - _A new Redis connection was created at every lambda call before_
18 | - v2.1.1 - 2019-09-09
19 | - [Enhancement] CLI - Reset cache - A new action is available in the CLI and allow to simulate the /reset-cache endpoint
20 | - [Enhancement] The /cache-query endpoint prints additional debug information regarding fetched items from Redis cache
21 | - [Enhancement] Changed `yarn lint` to run in watch mode by default, added a new `yarn lint:once` for CI
22 | - [Enhancement] Run `yarn lint` automatically when running `yarn start` for development env
23 | - v2.1.0 - 2019-09-07 - [Public release](https://github.com/UnlyEd/GraphCMS-cache-boilerplate), internally forked for our private use @Unly - _Boilerplate will be maintained from now on_
24 | - [Enhancement] Headers forwarding - Now forwards some headers alongside the GCMS query. Forwarded headers are:
25 | - All headers starting with `gcms-`
26 | - `locale` for backward compatibility with systems using the old `locale` instead of the more recent `gcms-locale`
27 | - [Feature] Cache reset for WebHooks - New endpoint `/reset-cache` that just flush the whole redis db
28 | - v2.0.0 - 2019-08-11 - Production-ready version
29 | - [Enhancement] Handle potential redis failure for all endpoint (see [README](./README.md#reliability--resilience---handling-catastrophic-failures-graphcmsredis))
30 | - [Doc] Massive documentation update
31 | - v1.0.0 - [Alpha/POC version](https://github.com/UnlyEd/graphCMS-cache-contingency-boilerplate-POC) (non-production ready)
32 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | The MIT License (MIT)
2 |
3 | Copyright (c) 2019 Unly
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
23 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 | [](https://codeclimate.com/github/UnlyEd/GraphCMS-cache-boilerplate/maintainability)
3 | [](https://codeclimate.com/github/UnlyEd/GraphCMS-cache-boilerplate/test_coverage)
4 | [](https://eu-west-1.console.aws.amazon.com/codesuite/codebuild/projects/GraphCMS-cache-boilerplate)
5 | [](https://snyk.io/test/github/UnlyEd/GraphCMS-cache-boilerplate?targetFile=package.json)
6 |
7 | # GraphCMS/GraphQL Cache Contingency service
8 |
9 | > The main goal of this service is to provide a **reliable cache contingency backup plan** in case a GraphQL endpoint is failing.
10 | This service most important priority is the **service reliability**, not the **data consistency**, which may not always be up-to-date.
11 | >
12 | > This Cache service is meant to run **on your own AWS account**, and be managed **by yourself**.
13 | > It is powered by the [Serverless Framework](https://serverless.com/).
14 | > It uses _an optional_ 3rd party tool for monitoring the service: [Epsagon](https://epsagon.com/) _(free plan is most likely enough)_.
15 | >
16 | > It is a whole service meant to be used by developers/teams who rely on GraphCMS,
17 | > and are looking for a **safe and reliable way** to provide services to their own customers.
18 | >
19 | > **You keep complete ownership of this service**, as it runs under your plan, and **you are free to make any change to fit your business**.
20 | >
21 | > **P.S**: Please share the awesome things you build with the community!
22 |
23 | ## Overview
24 | ### _"Why/when should I use this?"_
25 |
26 | - _"I don't use [GraphCMS](https://graphcms.com/), is this any useful to me?"_:
27 | - **It may be**.
28 | This service is meant to be a proxy server between you apps and your GraphQL API endpoint.
29 | It's completely useless if you don't use GraphQL in the first place. (GraphCMS relies on GraphQL)
30 | **It was build with first-class GraphCMS support in mind, but it works for any GraphQL endpoint.** _The only difference between GraphQL/GraphCMS is that this project automatically forwards GraphCMS-specific HTTP headers to the GraphQL endpoint. But you can custom headers-forwarding by forking it!_
31 | - _"I want to protect our apps against unpredictable outages from GraphCMS (or its 3rd party services), will this help me?"_:
32 | - **Yes**, it will. Read on!
33 | - _"I want to lower our GraphCMS cost by reducing the number of calls hitting their API, will this help me?"_:
34 | - **Yes**, it will. Read on!
35 | - _"I use frontend apps (react, angular, vue, etc.) and want to hide our GraphQL API credentials that are currently visible in the browser source code, will this help me?"_:
36 | - **Yes**, it will. Read on!
37 | - _"I want to improve the performance of our apps that rely on GraphQL API, will this help me?"_:
38 | - **Yes**, it will. Read on!
39 | - _"I want to run some automated data transformations after fetching those from GraphQL API, will this help me?"_:
40 | - It **could**, but it's not the main purpose.
41 | It could be a good place to start though, give it a try and see for yourself!
42 | - _"I'm just looking for something simple to setup that will just act as a temporary cache (caching mechanism on a limited period), is this still for me?"_:
43 | - **Yes**, this project provides many benefits/features, but it's possible to only use it as a **temporary caching mechanism** _(instead of cache contingency as it was meant to be at the beginning)_.
44 | See [Alternative strategy - Refresh the cache automatically by making it temporary](#alternative-strategy---refresh-the-cache-automatically-by-making-it-temporary)
45 | - _"I'm running a multi-tenants business, can you help about that?"_:
46 | - **Yes**, we do. We also run a multi-tenants business, meaning we have a dedicated service _(AWS Lambda, API GW, Redis instance, domain name)_ **for each of our customers/instances**.
47 | We therefore provide easy-to-use scripts, which allow you to manage each of those services from a centralised point.
48 |
49 | ### _"How should I use it?"_
50 |
51 | - **_"I am just curious"_**:
52 | - **Clone** the project, play around, run it on your local computer, deploy the service against your own AWS infrastructure and see how it works.
53 | _(don't forget to remove your service from AWS, once you're done playing around!)_
54 | - **_"I'm thinking using it for a professional project"_**:
55 | - **Fork** the project, build you own stuff on top of it if you need to, keep up to date with the main project if needed (new features, bug fix, etc.),
56 | you'll be in control with the ability to quickly/simply catch up if ever needed.
57 | And this project comes with [some handy built-in scripts to help you keep it in sync!](#keeping-your-fork-up-to-date-with-this-boilerplate)
58 |
59 | ---
60 | ## Benefits
61 |
62 | Using this service instead of directly hitting a GraphQL endpoint provides the following benefits:
63 |
64 | 1. **Contingency backup plan for a GraphQL endpoint**:
65 | If a GraphQL endpoint is failing (whether it's a bug, planned maintenance, etc.), then all customers would be affected at once (app crash, etc.).
66 | As we cannot let this happen for obvious reasons, the goal of this contingency cache is to take over if GraphQL is failing, using a redis cache to return data cached by a previous request instead of crashing the app.
67 | 1. **Auth proxy**:
68 | Also, this service acts as a proxy, and can be used to hide authentication credentials from the client (front-end apps).
69 | This way, credentials such as GraphQL credentials (know as "API token" on GraphCMS) are only used here, from this service, on the server side, and are therefore **safe**.
70 | 1. **Cost mitigation** _(for GraphCMS, or any provider that bills you based on the inbound/outbound of data and API requests)_:
71 | As we won't hit the GCMS API directly, but most requests are gonna be cached, the GraphCMS usage cost will decrease.
72 | On the other hand, additional costs from the AWS Lambda, API Gateway and redis will apply. (but it's much less expensive)
73 | **Additionally**, as you limit the number of requests that are handled by GraphCMS, you can go over your plan limit without suffering from your endpoint to be taken down (free plans, for instance, are automatically taken down when over limit).
74 | If you use a free GraphCMS plan to serve real customers, this service will allow you to increase your applications reliability and serve even more customers before upgrading your plan.
75 | 1. **Performances**:
76 | As we don't run a whole GraphQL query every time but just return cached results, this has benefits on performances (mostly network speed).
77 | 1. **Additional data processing**:
78 | As this service acts as a proxy, it could also perform additional data processing, such as aggregations, that aren't available natively with GraphQL.
79 | _This is a possibility, but not the main purpose. And it's out of the scope for now, but could come in handy later._
80 |
81 | ---
82 |
83 |
84 |
85 | * [Getting started](#getting-started)
86 | + [_"How do I configure my app that currently queries GCMS API directly, and use my newly created cache service instead?"_](#_how-do-i-configure-my-app-that-currently-queries-gcms-api-directly-and-use-my-newly-created-cache-service-instead_)
87 | + [How to deploy a new customer](#how-to-deploy-a-new-customer)
88 | - [Config files to update:](#config-files-to-update)
89 | - [Configure custom domains](#configure-custom-domains)
90 | - [Deploy online](#deploy-online)
91 | * [Cache workflow, and cache invalidation strategies](#cache-workflow-and-cache-invalidation-strategies)
92 | + [Cache behaviour](#cache-behaviour)
93 | - [Cache strategy](#cache-strategy)
94 | + [Cache invalidation strategies](#cache-invalidation-strategies)
95 | - [Strategy 1 - Automatically refresh all queries cached in redis, when a change is made on GraphCMS](#strategy-1---automatically-refresh-all-queries-cached-in-redis-when-a-change-is-made-on-graphcms)
96 | - [Strategy 2 - Wipe the whole cache, when a change is made on GraphCMS](#strategy-2---wipe-the-whole-cache-when-a-change-is-made-on-graphcms)
97 | - [Alternative strategy - Refresh the cache automatically by making it temporary](#alternative-strategy---refresh-the-cache-automatically-by-making-it-temporary)
98 | - [Strategy X - Open an issue if you'd like to either implement or request another strategy!](#strategy-x---open-an-issue-if-youd-like-to-either-implement-or-request-another-strategy)
99 | + [Cache version history](#cache-version-history)
100 | * [Reliability & resilience - Handling catastrophic failures (GraphCMS/Redis)](#reliability--resilience---handling-catastrophic-failures-graphcmsredis)
101 | * [Logging and debugging](#logging-and-debugging)
102 | + [Own logs](#own-logs)
103 | + [Epsagon](#epsagon)
104 | - [Pricing](#pricing)
105 | - [Opt-out](#opt-out)
106 | - [Known issues with Epsagon](#known-issues-with-epsagon)
107 | * [API endpoints and usages](#api-endpoints-and-usages)
108 | + [Cache endpoint](#cache-endpoint)
109 | + [Cache invalidation endpoint (refresh)](#cache-invalidation-endpoint-refresh)
110 | + [Cache invalidation endpoint (reset)](#cache-invalidation-endpoint-reset)
111 | + [Read cached keys/GQL queries from cache](#read-cached-keysgql-queries-from-cache)
112 | + [Status](#status)
113 | * [Advanced notions](#advanced-notions)
114 | + [Multi customer instances](#multi-customer-instances)
115 | * [Keeping your fork up to date with this boilerplate](#keeping-your-fork-up-to-date-with-this-boilerplate)
116 | * [Testing](#testing)
117 | + [Known issues with testing](#known-issues-with-testing)
118 | * [CI with AWS CodeBuild](#ci-with-aws-codebuild)
119 | * [Redis](#redis)
120 | + [Known Redis limitations](#known-redis-limitations)
121 | + [Select Subscription plan](#select-subscription-plan)
122 | + [Database configuration](#database-configuration)
123 | - [Data eviction policy](#data-eviction-policy)
124 | * [Other known limitations and considerations](#other-known-limitations-and-considerations)
125 | * [Changelog](#changelog)
126 | * [Contributing](#contributing)
127 | + [Versions](#versions)
128 | - [SemVer](#semver)
129 | - [Release a new version](#release-a-new-version)
130 | + [Code style](#code-style)
131 | + [Working on the project - IDE](#working-on-the-project---ide)
132 | - [Vulnerability disclosure](#vulnerability-disclosure)
133 | - [Contributors and maintainers](#contributors-and-maintainers)
134 | - [**[ABOUT UNLY]**](#about-unly-)
135 |
136 |
137 |
138 | ## Getting started
139 |
140 | Watch this 10mn video to understand and see it in action!
141 |
142 | [](https://youtu.be/k4Bd-XHmiBM)
143 |
144 | Clone the repo, then configure your local install:
145 |
146 | - `nvm use` or `nvm install` _(optional, just make sure to use the same node version as specified in `/.nvmrc`)_
147 | - `yarn install`
148 | - Configure your `/.env.development` and `/.env.test` files _(only the GraphCMS credentials are really necessary, if you're just playing around)_
149 | - `yarn start` # Starts at localhost:8085
150 | - Go to `/status` and `/read-cache`
151 | - `yarn emulate:local` play around with fake queries sent to `http://localhost:8085` and go to `/read-cache` to see changes
152 |
153 | **Before deploying on AWS:**
154 | - Change the [serverless.yml](serverless.yml) configuration to match your needs
155 | - If you're just playing around and aren't using a **custom domain** to deploy to, then comment out the `serverless-domain-manager` plugin
156 | - Fill in the `redis.url` for the project you meant to deploy
157 |
158 | On AWS (staging):
159 | - _You'll need to create a `.env.staging` file first_
160 | - `yarn deploy:demo` (you may want to either disable or configure the [`serverless-domain-manager`](https://github.com/amplify-education/serverless-domain-manager#readme) plugin)
161 | - `yarn emulate:client:demo` to send queries to your staging endpoint and manually test the behavior there
162 |
163 | On AWS (prod):
164 | - _You'll need to create a `.env.production` file first_
165 | - `yarn deploy:demo:production`
166 | - `yarn emulate:client:demo:production` to send queries to your production endpoint and manually test the behavior there
167 |
168 | If you've decided to clone/fork this project, please do the following:
169 | - Change [AWS CodeBuild buildspec.yml](./buildspec.yml) file _(for Continuous Integration)_:
170 | - The project is configured to use AWS CodeBuild, we also use CodeClimate for code quality.
171 | [`slack-codebuild`](https://github.com/UnlyEd/slack-codebuild) is used to send build notification to a slack channel _(it's MIT too)_
172 | - `SLACK_WEBHOOK_URL`: **Use your own or remove it**, or your build notification will appear on **our** slack channel _(please don't do that)_
173 | - `CC_TEST_REPORTER_ID`: **Use your own or remove it**, or your build results will be mixed with our own
174 | - For additional documentation, see [CI with AWS CodeBuild](#ci-with-aws-codebuild)
175 |
176 | ### _"How do I configure my app that currently queries GCMS API directly, and use my newly created cache service instead?"_
177 |
178 | It really depends on the implementation of you app here.
179 | If you're using react with Apollo for instance, it's just a matter of changing the endpoint to target your cache (`/cache-query` endpoint) rather than your GCMS endpoint, and not use any credentials (the cache doesn't need any).
180 |
181 | It should be simple and straightforward, as it's just a matter of fetching your cache `/cache-query` endpoint instead of hitting your GraphCMS endpoint directly.
182 |
183 | > Testing with a non-production application is strongly recommended to begin with.
184 | > Also, use a `QUERY` GraphCMS token, you don't need to use a token that can write, read is enough and therefore **more secure**.
185 |
186 |
187 | ### How to deploy a new customer
188 |
189 | > Follow this when you need to deploy a new customer
190 |
191 | #### Config files to update:
192 |
193 | - [`package.json`](./package.json)
194 | - Duplicate an existing customer's scripts and just replace the name (e.g: `demo`) by the new customer's name
195 | - [`serverless.yml`](./serverless.yml)
196 | - Add a new `custom.envs` section with appropriate values (basically duplicate another customer (staging + prod) and change values)
197 | - [`secrets-staging.yml`](./secrets-staging.yml) and [`secrets-production.yml`](./secrets-production.yml)
198 | - Add a new section with appropriate values (basically duplicate another customer and change values
199 | - You may need to create a [new free Redis instance](https://app.redislabs.com/)
200 |
201 | #### Configure custom domains
202 |
203 | > This is only useful if you've kept the `serverless-domain-manager` plugin and thus want to deploy your service using a custom domain
204 |
205 | You're gonna need to configure AWS Route53 and AWS Certificate Manager to create your custom domain first.
206 |
207 | - Your custom domains are those listed as `custom.env.$customer.domain.name`
208 | - You can't deploy online until your custom domains have been validated with proper certificates by AWS
209 | - The whole process is gonna take a good 60-120mn, especially on your first time
210 | - You need access to the AWS account that manage your top-level DNS, APEX domains (root domain)
211 | - For us, it's our AWS Root Account, and **only a few people people** usually have access to that **very critical account**, so make sure you have all the access permissions you'll need
212 | - If your organisation only has one AWS Account, then it's a very very bad design security-wise (i.e: read our recommendations article below), but it's gonna help you go through the setup faster
213 | - **Tip**: [Learn more about how we recommend to structure your AWS Account](https://forum.serverless.com/t/restructuring-aws-proper-way-to-configure-aws-accounts-organisations-and-profiles-when-using-serverless/5009)
214 |
215 | [Go to the tutorial](https://forum.serverless.com/t/restructuring-aws-proper-way-to-configure-aws-accounts-organisations-and-profiles-when-using-serverless/5009/12?u=vadorequest-sl)
216 |
217 | - **Tip**: Note that commands using `sls create_domain` are managed here using `yarn create:$customer` and `yarn create:$customer:production`
218 |
219 | #### Deploy online
220 |
221 | > If you use custom domains and if they aren't ready then it'll fail (check your API Gateway).
222 |
223 | - `nvm use`
224 | - `yarn deploy:$customer` - Deploy the newly created customer in staging
225 | - `yarn deploy:$customer:production` - Deploy the newly created customer in production
226 |
227 | ---
228 |
229 | ## Cache workflow, and cache invalidation strategies
230 | ### Cache behaviour
231 |
232 | > This Cache uses a mix of GraphQL query and headers as index (redis key), and GCMS API responses as values (redis value).
233 |
234 | - It uses Redis as caching engine.
235 | - A redis key can hold up to 512MB of data _(it's therefore not a limitation, we won't ever reach that much data in a GraphQL query)_
236 |
237 | #### Cache strategy
238 |
239 | > "Always reliable, eventually synchronized"
240 |
241 | This Cache service will **always return the value from the redis cache**.
242 | **_It will never check if a newer value exists on the GCMS's side._**
243 |
244 | Therefore, it may not be in-sync with the actual values held by GCMS.
245 |
246 | Due to this behaviour, this Cache service would never send fresher data on its own.
247 | That's why there is are different "**cache invalidation**" strategies.
248 |
249 | ### Cache invalidation strategies
250 |
251 | Those strategies are optional and you are not required to use any of them. You may use none, one, or several, as you decide.
252 | We implemented the **Strategy 1** first, and then switched to the **Strategy 2** which is less complex and more reliable in our use-case.
253 |
254 | #### Strategy 1 - Automatically refresh all queries cached in redis, when a change is made on GraphCMS
255 | > This strategy is very useful if you have lots of reads and very few writes.
256 | >
257 | > It is very inefficient if you write a lot in GraphCMS (like automated massive writes).
258 | > It doesn't play nice if you write a lot in GraphCMS (like automated massive writes in batches, such as massive data import).
259 |
260 | On GCMS's side, a **WebHook** is meant to **trigger** a **cache invalidation** every time a change is made in the data held by GCMS.
261 |
262 | > WebHooks can be configured from there: https://app.graphcms.com/YOURS/staging/webhooks
263 | > Each stage has its own WebHooks.
264 |
265 | The WebHook should be configured to hit the **cache invalidation endpoint** (`/refresh-cache`), which will run a query for all existing keys in the redis cache.
266 | Note that the cache will only be invalidated if the refresh query to GCMS API actually worked. So, if GCMS API is down during the cache refresh, the cache won't be changed. (there is no retry strategy)
267 | This is an important detail, as the cache should always contain reliable data.
268 |
269 | _Reminder_: The cache uses a Redis storage, with the **query** (as string) used as **key**, and the **query results** (as json) used as **value**.
270 |
271 | > In short, every time **any data is changed in GCMS**, **the whole cache is refreshed**.
272 |
273 | **N.B**: Special protection has been put in motion to avoid concurrent access of the `/refresh-cache` endpoint.
274 | Only one concurrent call is authorized, it is gracefully handled by the [`reservedConcurrency` option in serverless.yml](https://itnext.io/the-everything-guide-to-lambda-throttling-reserved-concurrency-and-execution-limits-d64f144129e5).
275 |
276 | **Known limitations**:
277 | - This strategy hasn't been designed the best way it could have been, and **suffer from some rare race conditions**.
278 | It may happen, in the case of a massive write (such as an automated import tool that performs lots of writes really fast (like 100-200 writes in 30-50 seconds))
279 | that the `/refresh-cache` endpoint will be called several times (despite the concurrency lock), because the import script takes so long, and multiple calls to `/refresh-cache` are executed.
280 |
281 | The **bad thing** is that the last call that fetches data from GraphCMS API and store them in the cache isn't necessarily executed at last,
282 | and it may happen that the data stored in the cache isn't the most recent version.
283 |
284 | The proper way to tackle this issue would be to use a `queue`, with a `debounce` strategy.
285 | Basically wait until there are no more received request and then perform the cache refresh (instead of immediately performing the cache refresh).
286 |
287 | Unfortunately, we ran out of time and didn't tackle this issue yet. (_instead, we implemented Strategy 2, which is simpler_)
288 | We're also not really familiar with queue services (SQS, SNS, EventBridge, ...) and don't know which one would be the best for the job.
289 |
290 | **Contributor help needed!**: That would be a very appreciated contribution! We'd definitely love a PR for this :)
291 | - If there are many queries stored in redis (hundreds), they may not all resolve themselves in the 30sec limit imposed by API GW.
292 | In such case, they'd likely start to fail randomly depending on GCMS API response time, and it'd become very difficult to ensure the integrity of all requests.
293 | It'd also (in the current state) be very hard to fix.
294 |
295 | One possible way to tackle this issue would be to spawn calls (async, parallel) to another lambda, who's role would be to refresh one query only
296 | We only have a handful of queries in our cache, so we're not affected by this limitation yet and aren't planning on working on it anytime soon.
297 |
298 | #### Strategy 2 - Wipe the whole cache, when a change is made on GraphCMS
299 |
300 | > This strategy is very useful if you have lots of reads and very few writes.
301 | >
302 | > It is very inefficient if you write a lot in GraphCMS (like automated massive writes).
303 |
304 | Much simpler and fixes several downsides suffered by Strategy 1, such as:
305 | - Much easier to debug (easier to reason about)
306 | - No edge case where we'd fetch data that will be updated again in a few secs (more reliable, data will always up to date after a cache reset)
307 | - Remove unused queries from the cache at a regular interval (if your queries change for instance), avoids to end up fetching queries that aren't meant to be used anymore
308 | - No timeout even if there are hundreds/thousands of queries in the cache (since they won't all be refreshed at the same time, but simply wiped from the cache all at once)
309 | - Eventually consumes less resources (CPU/RAM) > cheaper _(not significant)_
310 |
311 | **Known limitations**:
312 | - Because there is no automated refill of the cache, it will be filled when a client performs an action that generate a query.
313 | If that query is rarely executed, it may happen that it's executed during an outage, and the query would therefore fail, potentially crashing your app.
314 | - If the cache reset happens during a GCMS outage, then your app will crash anyway. **We don't check that GCMS is up and running before performing the cache reset.** _(but that'd be awesome, once they provide a way to do that!)_
315 |
316 | **Contributor help needed!**: If you know a way to detect GraphCMS status and therefore avoid a cache reset during an outage, we're very interested.
317 | To our knowledge, they don't have any automated too we could rely on to detect this before wiping all the data from the cache, but that'd definitely be an awesome addition!
318 |
319 |
320 | #### Alternative strategy - Refresh the cache automatically by making it temporary
321 |
322 | This is more a workaround than a real feature, but because all the data sent in the request `body` are used as redis key, to index a query's results, you can take advantage of that.
323 |
324 | In GraphQL, all queries _(and mutations)_ accept an `operationName`:
325 |
326 | For instance, the following GraphQL query:
327 |
328 | ```graphql
329 | query {
330 | __schema {
331 | mutationType {
332 | kind
333 | }
334 | }
335 | }
336 | ```
337 |
338 | Will yield the following request `body`:
339 | ```json
340 | {
341 | "operationName": null,
342 | "variables": {},
343 | "query": "{ __schema { mutationType { kind } }}"
344 | }
345 | ```
346 |
347 | Here, the `operationName` is `null`.
348 | But if you specify it (`query myQueryName {}`) then it will reflect in the `operationName`,
349 | and this field is also used **to index** the query in redis.
350 |
351 | So, if you wanted to automatically invalidate your cache every hour, you could just make the `operationName` **dynamic**,
352 | such as `query myQueryName_01_01_2019_11am {}`.
353 | This way, since the value would change every hours, a different GraphQL query would be sent every hour,
354 | and the key used by redis would therefore be different every hours, leading to a cache refresh because the newer query would actually be executed on GraphCMS API before being cached.
355 |
356 | This is a nice workaround that allows you to define very precisely a different strategy, which works very differently and could basically be used to ensure the cached data is refreshed periodically.
357 | On the other hand, it wouldn't protect against outages, because it wouldn't handle a fallback strategy. (if graphcms is down when a new query is executed for the first time, then it'd fail)
358 |
359 | But that's still nice to know, and perfectly fit a "simple cache strategy" use-case.
360 |
361 | #### Strategy X - Open an issue if you'd like to either implement or request another strategy!
362 |
363 | > Disclaimer: We'll likely not have the time to add another strategy if we don't need it ourselves.
364 | > But, feel free to open an issue and let's discuss it, we'll gladly advise you regarding the implementation details and discuss the specs together.
365 |
366 | ### Cache version history
367 |
368 | Using a protected endpoint `/read-cache`, you can visualise all **queries** (redis indexes) that are stored in the cache.
369 |
370 | For each query, there is a `version` and `updatedAt` fields that helps you understand when was the cached value refreshed for the last time (and how many times since it was initially added).
371 |
372 | Structure example:
373 | ```json
374 | {
375 | "createdAt": 1564566367896,
376 | "updatedAt": 1564566603538,
377 | "version": 2,
378 | "body": {
379 | "operationName": null,
380 | "variables": {},
381 | "query": "{ organisations { name __typename }}"
382 | }
383 | }
384 |
385 | ```
386 |
387 | > Good to know:
388 | > - The `body` is the object representation of the `gql` version of the query. _(basically, what's sent over the network)_
389 | > It contains a `query`, which is the string representation of the query.
390 | > - The `body.query` is sanitized and doesn't fully represent the key stored on redis (trim of `\n`, truncated (50 chars), etc.), for the sake of readability.
391 | > - There is **no way to see the data from this endpoint** _(as it could be sensitive)_, only the keys are shown. (it's also password protected in case of, see `BASIC_AUTH_PASSWORD`)
392 |
393 | ---
394 |
395 | ## Reliability & resilience - Handling catastrophic failures (GraphCMS/Redis)
396 |
397 | This service must be resilient and reliable. It relies on Redis when the GraphCMS endpoint is down.
398 |
399 | > But, what happens if Redis fails instead of GraphCMS?
400 |
401 | In such scenario, the outcome depends on the Cache API endpoint used:
402 | - `/cache-query`: A Redis error when searching for a previous query result is gracefully handled and redis is bypassed, a request is therefore sent to the GraphCMS endpoint and results are returned to the client.
403 | This makes the service very reliable, as clients will still receive proper results, even if Redis is down.
404 | In the catastrophic case where both GraphCMS and Redis are down at the same time, a 500 response will be returned to the client.
405 | - `/refresh-cache`: This endpoint cannot work without a working redis connection, and will therefore return a 500 response.
406 | - `/reset-cache`: This endpoint cannot work without a working redis connection, and will therefore return a 500 response.
407 | - `/read-cache`: This endpoint cannot work without a working redis connection, and will therefore return a 500 response.
408 |
409 | > The most important endpoint is `/cache-query`, as it's what's used by the clients that attempt to fetch data from GraphCMS.
410 | > Therefore, it's the most resilient, and will return proper results even if GraphCMS is down _(only if the query was executed previously and the query result was properly cached)_, or if Redis is down (by re-playing the query through GraphCMS).
411 | > But, it can't handle both being down simultaneously.
412 |
413 | ---
414 |
415 | ## Logging and debugging
416 |
417 | ### Own logs
418 | We use a [`logger`](https://github.com/UnlyEd/utils-simple-logger) instance of [Winston](https://github.com/winstonjs/winston#readme) which is configured to silent logs on production environments that aren't of level `error` or higher.
419 |
420 | Logs on AWS (CloudWatch) can be accessed by running:
421 | - `NODE_ENV=production yarn logs:cache-query`
422 | - `NODE_ENV=production yarn logs:read-cache`
423 | - `NODE_ENV=production yarn logs:refresh-cache`
424 | - `NODE_ENV=production yarn logs:status`
425 |
426 | If no `NODE_ENV` is defined, `staging` environment is used by default.
427 |
428 | ### Epsagon
429 |
430 | [Epsagon](https://epsagon.com/) is a tool that helps troubleshoot what happens on AWS.
431 | It allows to see what happens on the backend, by analysing I/O network calls and generates graphs, that are very helpful to pinpoint a problem's source. ([See blog](https://epsagon.com/blog/introducing-trace-search/))
432 |
433 | Traces are configured within the project, the only required information is the `EPSAGON_APP_TOKEN` environment variable.
434 | Traces are the most interesting feature of Epsagon, and what you may eventually pay for. They allow you to visually understand what happens on the backend, and get meaningful information such as delays, return codes, logs, etc.
435 |
436 | Also, Epsagon can be used as a monitoring service, through it's `setError` function. (it's manually disabled in `test` environment through the `DISABLE_EPSAGON` env variable)
437 |
438 | Errors catch through `setError` are handled by Epsagon as `Exception` and can be redirected to a slack channel using their [alerts service](https://dashboard.epsagon.com/alerts).
439 |
440 | They are very active on their slack and offer engineering-level support.
441 |
442 | #### Pricing
443 | Epsagon comes with a Free plan that enjoys 100.000 traces/month, which is more than enough for our use-case.
444 | See [their pricing page](https://epsagon.com/pricing/) for more information.
445 |
446 | #### Opt-out
447 | Epsagon will [automatically be disabled](./src/utils/epsagon.js) if you don't provide a `EPSAGON_APP_TOKEN` environment variable.
448 |
449 | > Epsagon is disabled in `test` environment, see [jest-preload.js](./jest-preload.js).
450 |
451 | #### Known issues with Epsagon
452 |
453 | - Epsagon proposes a [Serverless plugin](https://github.com/epsagon/serverless-plugin-epsagon), which **we do not use** because it doesn't play well with our `basic-auth` Authorizer. _(issue on their side with AWS API GW)_
454 | - Epsagon generates a `API GW > Lambda > API GW` infinite loop on `/refresh-cache` when used. It has therefore been disabled for that particular endpoint, see ["FIXME"](serverless.yml).
455 | If you are interested in the issue, watch [this](https://www.youtube.com/watch?v=pSAQBrr6ZtM&feature=youtu.be) and [this](https://www.youtube.com/watch?v=dbndplk_O2U), basically generated 10k calls in 1h, cost $3.
456 |
457 | ---
458 |
459 | ## API endpoints and usages
460 |
461 | ### Cache endpoint
462 |
463 | > POST `/cache-query`
464 |
465 | - Expects a GraphQL query as `body`. _(the same way it's natively handled by GCMS API)_
466 | - Forwards the query to GCMS API (if not cached already). _(will be executed by GCMS API)_
467 | - Returns the query results (from GCMS API or from the redis cache).
468 |
469 | ### Cache invalidation endpoint (refresh)
470 |
471 | > POST `/refresh-cache`
472 |
473 | - Doesn't expect any particular parameter.
474 | - Refresh all cached data by running all cached queries against GCMS API.
475 | - May be configured through your GraphCMS WebHook, so that any data modification trigger the WebHook, which will in turn refresh all cached data.
476 |
477 | > Protected by an authorization Header `GraphCMS-WebhookToken` that must contain the same token as the one defined in your REFRESH_CACHE_TOKEN environment variable
478 |
479 | ### Cache invalidation endpoint (reset)
480 |
481 | > POST `/reset-cache`
482 |
483 | - Doesn't expect any particular parameter.
484 | - Reset (wipe/flush) the whole redis cache.
485 | - May be configured through your GraphCMS WebHook, so that any data modification trigger the WebHook, which will in turn invalidate all cached data.
486 |
487 | > Protected by an authorization Header `GraphCMS-WebhookToken` that must contain the same token as the one defined in your REFRESH_CACHE_TOKEN environment variable
488 |
489 | ### Read cached keys/GQL queries from cache
490 |
491 | > GET `/read-cache`
492 |
493 | - Doesn't expect any particular parameter
494 | - Display all cached queries
495 |
496 | > Protected by Basic Auth, see `BASIC_AUTH_USERNAME` and `BASIC_AUTH_PASSWORD` env variables.
497 |
498 | ### Status
499 |
500 | > GET `/status`
501 |
502 | - Used by AWS Health Checks to warm the lambda. _(production env only)_
503 | - Can also be used to check if the lambda is running, which node version, from which git commit, etc.
504 |
505 | ---
506 |
507 | ## Advanced notions
508 |
509 | ### Multi customer instances
510 | This service also support the deployment and management of multiple redis caches - one per customer (AKA "an instance").
511 |
512 | Basically, it allow to spawn multiple Cache services, with each its own Redis connection and own GraphCMS/GraphQL connection.
513 | _You could also re-use credentials and token to re-use the same redis connection for several instances, although it's not what we recommend, it's up to you._
514 |
515 | Therefore, each instance is completely separated from others, with its own redis cache, its own Lambda and own API Gateway.
516 | It not more expensive either (_assuming you're using a free RedisLabs plan and thus ignoring Redis's costs_), since the AWS infrastructure is on-demand it'd cost the same whether all the load is on one lambda, or separated on multiple lambdas
517 | See [Limitations](#limitations).
518 |
519 | It would still be possible to use just one redis instance with different databases (one db per customer, but the same connection for all).
520 | It really depends on your Redis service. Though, separation by clusters is not handled by our Cache system. _(feel free to open a issue and propose a PR!)_
521 |
522 | ---
523 |
524 | ## Keeping your fork up to date with this boilerplate
525 |
526 | In case you forked the project and you'd like to keeping it up to date with this boilerplate, here are a few built-in scripts to help you out:
527 | - (from your fork) `yarn sync:fork` will `git pull --rebase` the boilerplate `master` branch into your own
528 | - (from your fork) `yarn sync:fork:merge` will `git pull` the boilerplate `master` branch into your own
529 |
530 | This is meant to be used manually, if you ever want to upgrade without trouble.
531 |
532 | **N.B**: Using the rebase mode will force you to force push afterwards (use it if you know what you're doing).
533 | Using merge mode will create a merge commit (ugly, but simpler). _We use the rebase mode for our own private fork._
534 |
535 | ---
536 |
537 | ## Testing
538 |
539 | You can run interactive tests using Jest with `yarn test` script.
540 |
541 | [CodeBuild](./buildspec.yml) is configured to run CI tests using `yarn test:coverage` script.
542 |
543 | ### Known issues with testing
544 |
545 | - `test:coverage` script is executed with `--detectOpenHandles --forceExit` options, because the tests aren't closing all redis connections and jest hangs and don't send the coverage report if we don't force it with `--forceExit`.
546 | We were't able to figure out the source of this, as it is very hard to see when connections are open/closed during tests. _(Note to self: May be related with `beforeEach/afterEach` that aren't executed on children `describe > test`)_
547 |
548 | ---
549 | ## CI with AWS CodeBuild
550 |
551 | > This step is useful only if you've **forked/cloned** the project and want to configure CI using AWS CodeBuild.
552 |
553 | Using the [AWS Console > CodeBuild](https://eu-west-1.console.aws.amazon.com/codesuite/codebuild/project/new?region=eu-west-1):
554 |
555 | > Watch the [video tutorial](https://www.youtube.com/watch?v=30Uikocfdp0&feature=youtu.be)
556 | >
557 | > _**Disclaimer**: We forgot to enable "**Privileged**" mode in the video, for the `Environment > Image > Additional configuration` and had to go to `Environment > Override image` to fix it._
558 |
559 | - Go to Create build project
560 | - Fill the project name and description, also, enable Build Badge (in case you ever want to use it)
561 | - **Source** :
562 | - Select, as Source provider, Github
563 | - Make sure you're logged in to GitHub
564 | - Then, select **Repository in my GitHub account** and enter the url of your project
565 | - Use Git clone depth: Full to avoid issues with failing builds due to missing fetched commits, necessary when using Code Climate coverage feature for instance
566 | - If you use git submodules, check the option (WARNING: If you are using private repositories as submodule, please use the HTTPS instead of SSH)
567 | - **Primary source webhook events**
568 | - Check **Rebuild every time a code change is pushed to this repository**
569 | - Select the events that should trigger a build (all events are recommended, push and PR created/updated/reopened and merged)
570 | - **Environment**
571 | - We advise you to use a Managed image instead of a Custom image
572 | - Also, because of the pricing, please only use Ubuntu as Operating system
573 | - Service role, select New service role
574 | - Leave default values for timeout, queued timeout, certificate and VPC
575 | - Compute, use the lowest configuration (for a lower cost) so 3GB memory, 2vCPUs
576 | - Use **Privileged** mode, necessary because we spawn a docker instance in our tests
577 | - **Buildspec**
578 | - The most explicit way to work with CI / CD is to use buildspec (instead of build command) - And it's already configured in this project
579 | - Leave default Buildspec name
580 | - **Artifacts**
581 | - No artifacts is required for CI. For CD, you can use S3 to provide files to codeDeploy.
582 | - **Logs**
583 | - Cloudwatch logs are good way to check builds and debug in case of fail.
584 | - No need to store logs data on S3
585 |
586 | ---
587 | ## Redis
588 |
589 | > We created our Redis instances on [Redis Labs](https://app.redislabs.com/#/subscription/new/plan).
590 |
591 | ### Known Redis limitations
592 |
593 | As we run on a Free plan, there are a few limitations to consider:
594 | - Data storage on redis is **limited to 30MB**, which is enough for our use-case, but may be a limitation
595 | - The free plan offers a **limited set of 30 active connections**, you'll get an email warning you when you reach a 80% threshold
596 | - Your instance will be **automatically deleted if not used for 30 days**, you'll get an email 3 days before and 24h before
597 |
598 | > Due to those limitations, we strongly recommend to run this service with one instance per customer (multi-tenants)
599 | > This way, you will avoid edge cases such as:
600 | > - CustomerA triggering too many connections, which would take down CustomerD.
601 | > - Adding a CustomerZ, which caches a bit more data that goes over the 30MB limit, hence impacting all your customers.
602 | > - Trigger a cache refresh will refresh all queries, without knowledge of "to whom" belongs the query/data, which may likely not be what you want.
603 | >
604 | > Using a dedicated redis instance per customer fixes that too.
605 |
606 | ### Select Subscription plan
607 |
608 | One important thing not to miss when creating the Subscription, is to select the right availability zone (AZ), which depends on where you're located.
609 | We selected `ue-west-1`, which is Ireland, because it's the closer from us.
610 |
611 | You won't be able to select a different AZ in free plan, so choose carefully.
612 | The database can only be created in the same region as the one selected for the subscription.
613 |
614 | ### Database configuration
615 |
616 | Once a subscription is created, you can create a database (our redis instance).
617 |
618 | #### Data eviction policy
619 | A redis instance can be configured with those values:
620 |
621 | - `noeviction`: returns error if memory limit has been reached when trying to insert more data
622 | - `allkeys-lru`: evicts the least recently used keys out of all keys
623 | - `allkeys-lfu`: evicts the least frequently used keys out of all keys
624 | - `allkeys-random`: randomly evicts keys out of all keys
625 | - `volatile-lru`: evicts the least recently used keys out of keys with an "expire" field set
626 | - `volatile-lfu`: evicts the least frequently used keys out of keys with an "expire" field set
627 | - `volatile-ttl`: evicts the shortest time-to-live and least recently used keys out of keys with an "expire" field set
628 | - `volatile-random`: randomly evicts keys with an "expire" field set
629 |
630 | > The recommended choice is `allkeys-lfu`, so that the impact of re-fetching data is minimised as much as possible.
631 |
632 | ---
633 |
634 | ## Other known limitations and considerations
635 |
636 | - The `/refresh-cache` endpoint has a timeout of 30 seconds.
637 | There is no built-in way to handle workload longer than 30s yet.
638 | This can be an issue if there are too many GraphCMS queries in the redis cache (which will trigger a TimeOut error),
639 | as they may not all be updated when trying to invalidate the redis cache.
640 | If a timeout happens, you can know which keys have been updated by looking at `/read-cache` `updatedAt` data,
641 | but there is no built-in way to automatically handle that limitation (yet).
642 | Also, it's very likely that even if you run `/refresh-cache` multiple times, since the redis keys are gonna be refreshed in the same order,
643 | it therefore should fail for the same keys across multiple attempts. (but it also depends on how fast GCMS API replies to each API calls, and that's not predictable at all)
644 | - When the `/refresh-cache` or `/read-cache` are called, the `redis.keys` method is used, which is blocking and not recommended for production applications.
645 | A better implementation should be made there, probably following [this](https://github.com/luin/ioredis#streamify-scanning).
646 | It is not such a concern though, since those endpoints should rarely be called, and it won't be an issue if the redis store doesn't contain lots of keys anyway.
647 |
648 | ---
649 |
650 | ## Changelog
651 |
652 | > Updates are consolidated in our [CHANGELOG](./CHANGELOG.md) file.
653 |
654 | It's meant to be a developer-friendly way to know what benefits you'll get from updating your clone/fork, and provides a update history.
655 |
656 | ---
657 |
658 | ## Contributing
659 |
660 | ### Versions
661 |
662 | #### SemVer
663 |
664 | We use Semantic Versioning for this project: https://semver.org/. (`vMAJOR.MINOR.PATCH`: `v1.0.1`)
665 |
666 | - Major version: Must be changed when Breaking Changes are made (public API isn't backward compatible).
667 | - A function has been renamed/removed from the public API
668 | - A function's return value has changed and may break existing implementations
669 | - Something has changed that will cause the app to behave differently with the same configuration
670 | - Minor version: Must be changed when a new feature is added or updated (without breaking change nor behavioral change)
671 | - Patch version: Must be changed when any change is made that isn't either Major nor Minor. (Misc, doc, etc.)
672 |
673 | #### Release a new version
674 |
675 | > Note: You should write the CHANGELOG.md doc before releasing the version.
676 | This way, it'll be included in the same commit as the built files and version update
677 |
678 | Then, release a new version:
679 |
680 | - `yarn run release`
681 |
682 | This command will prompt you for the version to update to, create a git tag, build the files and commit/push everything automatically.
683 |
684 | > Don't forget we are using SemVer, please follow our SemVer rules.
685 |
686 | **Pro hint**: use `beta` tag if you're in a work-in-progress (or unsure) to avoid releasing WIP versions that looks legit
687 |
688 | ### Code style
689 |
690 | Code style is enforced by `.editorconfig` and files within the `.idea/` folder.
691 | We also use EsLint, and extend AirBnb code style.
692 |
693 | ### Working on the project - IDE
694 |
695 | WebStorm IDE is the preferred IDE for this project, as it is already configured with debug configurations, code style rules.
696 | Only common configuration files (meant to be shared) have been tracked on git. (see [`.gitignore`](./.gitignore))
697 |
698 | # Vulnerability disclosure
699 |
700 | [See our policy](https://github.com/UnlyEd/Unly).
701 |
702 | ---
703 |
704 | # Contributors and maintainers
705 |
706 | This project is being maintained by:
707 | - [Unly] Ambroise Dhenain ([Vadorequest](https://github.com/vadorequest)) **(active)**
708 | - [Contributor] Hugo Martin ([Demmonius](https://github.com/Demmonius)) **(active)**
709 |
710 | ---
711 |
712 | # **[ABOUT UNLY]**
713 |
714 | > [Unly](https://unly.org) is a socially responsible company, fighting inequality and facilitating access to higher education.
715 | > Unly is committed to making education more inclusive, through responsible funding for students.
716 | We provide technological solutions to help students find the necessary funding for their studies.
717 |
718 | We proudly participate in many TechForGood initiatives. To support and learn more about our actions to make education accessible, visit :
719 | - https://twitter.com/UnlyEd
720 | - https://www.facebook.com/UnlyEd/
721 | - https://www.linkedin.com/company/unly
722 | - [Interested to work with us?](https://jobs.zenploy.io/unly/about)
723 |
724 | Tech tips and tricks from our CTO on our [Medium page](https://medium.com/unly-org/tech/home)!
725 |
726 | #TECHFORGOOD #EDUCATIONFORALL
727 |
--------------------------------------------------------------------------------
/buildspec.yml:
--------------------------------------------------------------------------------
1 | version: 0.2
2 |
3 | env:
4 | # Please refer to https://github.com/UnlyEd/slack-codebuild
5 | variables: # TODO Change SLACK_WEBHOOK_URL to match your own and CC_TEST_REPORTER_ID (CodeClimate), or remove usage of "slack-codebuild" within this file
6 | SLACK_WEBHOOK_URL: "https://hooks.slack.com/services/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"
7 | CODEBUILD_NOTIFY_ONLY_IF_FAIL: 1
8 | CC_TEST_REPORTER_ID: c5805dbb2421dd9e7a3f9f26f4382d7ea9bfdf8c9e228e29b80a2fe15c2bf7e1
9 |
10 | phases:
11 | install:
12 | runtime-versions:
13 | docker: 18
14 | nodejs: 10
15 | commands:
16 | - nohup /usr/local/bin/dockerd --host=unix:///var/run/docker.sock --host=tcp://127.0.0.1:2375 --storage-driver=overlay2&
17 | - timeout 15 sh -c "until docker info; do echo .; sleep 1; done"
18 | - yarn --production=false
19 | - yarn global add @unly/slack-codebuild
20 | - echo Installing codebuild-extras... # Install and execute aws-codebuild-extra, which adds env variables necessary on CodeBuild (including some for CodeClimate)
21 | - curl -fsSL https://raw.githubusercontent.com/UnlyEd/aws-codebuild-extras/master/install >> extras.sh
22 | - . ./extras.sh
23 |
24 | # See https://github.com/codeclimate/test-reporter/issues/379 for additional info regarding how to setup CodeBuild with CodeClimate
25 | pre_build:
26 | commands:
27 | - curl -L https://codeclimate.com/downloads/test-reporter/test-reporter-latest-linux-amd64 > ./cc-test-reporter
28 | - chmod +x ./cc-test-reporter
29 | - ./cc-test-reporter before-build
30 |
31 | build:
32 | commands:
33 | - yarn lint:once
34 | - yarn test:coverage
35 |
36 | post_build:
37 | commands:
38 | - ./cc-test-reporter format-coverage -t lcov --prefix ${CODEBUILD_SRC_DIR} # Looks for ./coverage/lcov.info
39 | - ./cc-test-reporter after-build --debug -t lcov --exit-code $? # Uploads ./coverage/lcov.info and ./coverage/codeclimate.json
40 | finally:
41 | - slack-codebuild
42 |
--------------------------------------------------------------------------------
/gql/querySchema.js:
--------------------------------------------------------------------------------
1 | import gql from 'graphql-tag';
2 |
3 | export const querySchemaData = gql`
4 | query exampleQuery1{
5 | __schema {
6 | mutationType {
7 | kind
8 | }
9 | }
10 | }
11 | `;
12 |
13 | export const querySchemaData2 = gql`
14 | query exampleQuery2{
15 | __schema {
16 | mutationType {
17 | description
18 | enumValues {
19 | isDeprecated
20 | }
21 | }
22 | }
23 | }
24 | `;
25 |
--------------------------------------------------------------------------------
/jest-preload.js:
--------------------------------------------------------------------------------
1 | require('dotenv').config({ path: '.serverless/.env' });
2 | process.env.DISABLE_EPSAGON = true; // XXX Avoids tests to crash because we don't init epsagon
3 |
4 | if (process.env.NODE_ENV !== 'test') {
5 | throw Error('Non-test environment');
6 | }
7 |
8 | jest.setTimeout(30000); // XXX Avoids tests to crash on slow networks
9 |
10 | global.console = {
11 | log: jest.fn(), // XXX Avoid noise when running tests, such as errors that are meant to be logged (Winston uses console.log instead of console.error for errors)
12 |
13 | error: console.error,
14 | warn: console.warn,
15 | info: console.info,
16 | debug: console.debug,
17 | };
18 |
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "gcms-cache-boilerplate",
3 | "description": "",
4 | "license": "MIT",
5 | "version": "2.2.1",
6 | "scripts": {
7 | "sls:dotenv": "NODE_ENV=${NODE_ENV:-development}; sls dotenv",
8 | "sls:login": "serverless login",
9 | "start": "yarn delete:redis && cross-env-shell 'concurrently -p '{name}' -n 'redis,server,eslint' -c 'gray.bgWhite,yellow.bgBlue,orange.bgWhite' \"yarn start:redis\" \"yarn start:server\" \"yarn lint\"'",
10 | "start:server": "NODE_ENV=${NODE_ENV:-development}; SLS_DEBUG=* yarn sls:dotenv && serverless offline start",
11 | "emulate:local": "NODE_ENV=development; yarn sls:dotenv && node -r esm -r dotenv/config src/cli/index.js dotenv_config_path=./.serverless/.env",
12 | "create:redis": "NODE_ENV=${NODE_ENV:-development}; (docker run --name cache_tfp_redis_${NODE_ENV} -d -p 6379:6379 redis redis-server --requirepass localpass || true) && yarn stop:redis ;",
13 | "start:redis": "NODE_ENV=${NODE_ENV:-development}; yarn create:redis ; docker start -a cache_tfp_redis_${NODE_ENV}",
14 | "start:redis:daemon": "NODE_ENV=${NODE_ENV:-development}; yarn create:redis && docker start cache_tfp_redis_${NODE_ENV}",
15 | "stop:redis": "NODE_ENV=${NODE_ENV:-development}; docker stop cache_tfp_redis_${NODE_ENV}",
16 | "delete:redis": "NODE_ENV=${NODE_ENV:-development}; yarn stop:redis && docker rm cache_tfp_redis_${NODE_ENV} -f || true",
17 | "flush:redis": "NODE_ENV=${NODE_ENV:-development}; export `cat .env.${NODE_ENV} | sed -e '/^[ \\t]*#/d' | tr -d ' '` && REDIS_PORT=`echo $REDIS_URL | cut -d':' -f2` && echo FLUSHALL | npx redis-cli -h `echo $REDIS_URL | cut -d':' -f1` -a $REDIS_PASSWORD -p $REDIS_PORT",
18 | "deploy:all:all": "yarn deploy:all && yarn deploy:all:production",
19 | "deploy:all": "yarn deploy:demo && yarn deploy:skema && yarn deploy:gem && yarn deploy:essec",
20 | "deploy:all:production": "yarn deploy:demo:production && yarn deploy:skema:production && yarn deploy:gem:production && yarn deploy:essec:production",
21 | "deploy": "yarn deploy:demo",
22 | "check:demo": "NODE_ENV=staging sls print -s demoStaging",
23 | "check:demo:production": "NODE_ENV=production sls print -s demo",
24 | "deploy:demo": "NODE_ENV=staging sls deploy -s demoStaging",
25 | "deploy:demo:production": "NODE_ENV=production sls deploy -s demo",
26 | "emulate:demo": "NODE_ENV=staging yarn sls:dotenv -s demoStaging && node -r esm -r dotenv/config src/cli/index.js dotenv_config_path=./.serverless/.env",
27 | "emulate:demo:production": "NODE_ENV=production yarn sls:dotenv -s demo && node -r esm -r dotenv/config src/cli/index.js dotenv_config_path=./.serverless/.env",
28 | "logs:demo:cache-query": "NODE_ENV=staging sls logs -f cache-query -s demoStaging",
29 | "logs:demo:cache-query:production": "NODE_ENV=production sls logs -f cache-query -s demo",
30 | "logs:demo:refresh-cache": "NODE_ENV=staging sls logs -f refresh-cache -s demoStaging",
31 | "logs:demo:refresh-cache:production": "NODE_ENV=production sls logs -f refresh-cache -s demo",
32 | "logs:demo:read-cache": "NODE_ENV=staging sls logs -f read-cache -s demoStaging",
33 | "logs:demo:read-cache:production": "NODE_ENV=production sls logs -f read-cache -s demo",
34 | "logs:demo:status": "NODE_ENV=staging sls logs -f status -s demoStaging",
35 | "logs:demo:status:production": "NODE_ENV=production sls logs -f status -s demo",
36 | "create:demo": "NODE_ENV=staging sls create_domain -s demoStaging",
37 | "create:demo:production": "NODE_ENV=production sls create_domain -s demo",
38 | "remove:demo": "NODE_ENV=staging sls remove -s demoStaging",
39 | "remove:demo:production": "NODE_ENV=production sls remove -s demo",
40 | "info:demo": "NODE_ENV=staging sls info -s demoStaging",
41 | "info:demo:production": "NODE_ENV=production sls info -s demo",
42 | "preversion": "yarn doc:toc && yarn lint:once && yarn test:once",
43 | "release": "yarn bump --prompt --commit --tag && git add CHANGELOG.md README.md && git commit --amend --no-edit && git push && git push --tags",
44 | "doc:toc": "yarn markdown-toc --maxdepth 4 -i README.md",
45 | "sync:fork": "BRANCH=${BRANCH:-master}; git pull --rebase git@github.com:UnlyEd/GraphCMS-cache-boilerplate.git ${BRANCH}",
46 | "sync:fork:merge": "BRANCH=${BRANCH:-master}; git pull git@github.com:UnlyEd/GraphCMS-cache-boilerplate.git ${BRANCH}",
47 | "lint": "esw src/** -w",
48 | "lint:once": "eslint src/**",
49 | "lint:fix": "eslint src/** --fix",
50 | "lint:fix:preview": "eslint src/** --fix-dry-run",
51 | "test": "NODE_ENV=test yarn sls:dotenv && yarn delete:redis ; yarn start:redis:daemon && jest --runInBand --watchAll",
52 | "test:noredis": "NODE_ENV=test yarn sls:dotenv && jest --runInBand --watchAll",
53 | "test:once": "NODE_ENV=test yarn sls:dotenv && yarn delete:redis ; yarn start:redis:daemon && jest --runInBand --detectOpenHandles --forceExit && yarn stop:redis",
54 | "test:coverage": "NODE_ENV=test yarn sls:dotenv && yarn delete:redis ; yarn start:redis:daemon && jest --coverage --runInBand --detectOpenHandles --forceExit && yarn stop:redis"
55 | },
56 | "jest": {
57 | "setupFilesAfterEnv": [
58 | "./jest-preload.js",
59 | "jest-extended"
60 | ],
61 | "verbose": true
62 | },
63 | "dependencies": {
64 | "@unly/utils-simple-logger": "1.0.2",
65 | "apollo-cache-inmemory": "1.6.1",
66 | "apollo-client": "2.6.1",
67 | "apollo-link-http": "1.5.14",
68 | "deepmerge": "4.0.0",
69 | "dotenv": "8.0.0",
70 | "epsagon": "1.31.7",
71 | "esm": "3.2.25",
72 | "graphql": "14.3.1",
73 | "graphql-tag": "2.10.1",
74 | "inquirer": "6.3.1",
75 | "ioredis": "4.14.0",
76 | "lodash.filter": "4.6.0",
77 | "lodash.foreach": "4.5.0",
78 | "lodash.get": "4.4.2",
79 | "lodash.includes": "4.3.0",
80 | "lodash.map": "4.6.0",
81 | "lodash.startswith": "4.2.1",
82 | "log-symbols": "3.0.0",
83 | "moment": "2.24.0",
84 | "node-fetch": "2.6.0",
85 | "source-map-support": "0.5.12",
86 | "winston": "3.2.1"
87 | },
88 | "devDependencies": {
89 | "@babel/cli": "7.4.4",
90 | "@babel/core": "7.4.5",
91 | "@babel/preset-env": "7.4.5",
92 | "@unly/serverless-env-copy-plugin": "1.0.3",
93 | "babel-jest": "24.8.0",
94 | "babel-loader": "8.0.6",
95 | "babel-plugin-source-map-support": "2.0.1",
96 | "concurrently": "4.1.0",
97 | "cross-env": "5.2.0",
98 | "eslint": "6.3.0",
99 | "eslint-config-airbnb-base": "14.0.0",
100 | "eslint-plugin-import": "2.18.2",
101 | "eslint-plugin-jest": "22.17.0",
102 | "eslint-watch": "6.0.0",
103 | "git-revision-webpack-plugin": "3.0.3",
104 | "jest": "24.8.0",
105 | "jest-extended": "0.11.1",
106 | "markdown-toc": "1.2.0",
107 | "redis-cli": "1.3.1",
108 | "serverless": "1.74.0",
109 | "serverless-domain-manager": "3.2.2",
110 | "serverless-dotenv-plugin": "2.0.1",
111 | "serverless-offline": "5.7.2",
112 | "serverless-webpack": "5.3.1",
113 | "version-bump-prompt": "4.2.2",
114 | "wait-for-expect": "1.2.0",
115 | "webpack": "4.34.0",
116 | "webpack-node-externals": "1.7.2"
117 | }
118 | }
--------------------------------------------------------------------------------
/schema.graphql:
--------------------------------------------------------------------------------
1 | # This file was generated based on ".graphqlconfig". Do not edit manually.
2 |
3 | schema {
4 | query: Query
5 | mutation: Mutation
6 | }
7 |
8 | "An object with an ID"
9 | interface Node {
10 | "The id of the object."
11 | id: ID!
12 | "The Stage of an object"
13 | stage: Stage!
14 | }
15 |
16 | type Aggregate {
17 | count: Int!
18 | }
19 |
20 | "Asset system model"
21 | type Asset implements Node {
22 | "The time the document was created"
23 | createdAt(
24 | "Variation of DateTime field to return, allows value from base document, current localization, or combined by returning the newer value of both"
25 | variation: SystemDateTimeFieldVariation! = COMBINED
26 | ): DateTime!
27 | "Get the document in other stages"
28 | documentInStages(
29 | "Decides if the current stage should be included or not"
30 | includeCurrent: Boolean! = false,
31 | "Decides if the documents should match the parent documents locale or should use the fallback order defined in the tree"
32 | inheritLocale: Boolean! = false,
33 | "Potential stages that should be returned"
34 | stages: [Stage!]! = [PUBLISHED, DRAFT]
35 | ): [Asset!]!
36 | "The file name"
37 | fileName: String!
38 | "The file handle"
39 | handle: String!
40 | "The height of the file"
41 | height: Float
42 | "List of Asset versions"
43 | history(
44 | limit: Int! = 10,
45 | skip: Int! = 0,
46 | "This is optional and can be used to fetch the document version history for a specific stage instead of the current one"
47 | stageOverride: Stage
48 | ): [Version!]!
49 | "The unique identifier"
50 | id: ID!
51 | "System Locale field"
52 | locale: Locale!
53 | "Get the other localizations for this document"
54 | localizations(
55 | "Decides if the current locale should be included or not"
56 | includeCurrent: Boolean! = false,
57 | "Potential locales that should be returned"
58 | locales: [Locale!]! = [en]
59 | ): [Asset!]!
60 | "The mime type of the file"
61 | mimeType: String
62 | "The time the document was published. Null on documents in draft stage."
63 | publishedAt(
64 | "Variation of DateTime field to return, allows value from base document, current localization, or combined by returning the newer value of both"
65 | variation: SystemDateTimeFieldVariation! = COMBINED
66 | ): DateTime
67 | "The file size"
68 | size: Float
69 | "System stage field"
70 | stage: Stage!
71 | "The time the document was updated"
72 | updatedAt(
73 | "Variation of DateTime field to return, allows value from base document, current localization, or combined by returning the newer value of both"
74 | variation: SystemDateTimeFieldVariation! = COMBINED
75 | ): DateTime!
76 | "Get the url for the asset with provided transformations applied."
77 | url(transformation: AssetTransformationInput): String!
78 | "The file width"
79 | width: Float
80 | }
81 |
82 | "A connection to a list of items."
83 | type AssetConnection {
84 | aggregate: Aggregate!
85 | "A list of edges."
86 | edges: [AssetEdge!]!
87 | "Information to aid in pagination."
88 | pageInfo: PageInfo!
89 | }
90 |
91 | "An edge in a connection."
92 | type AssetEdge {
93 | "A cursor for use in pagination."
94 | cursor: String!
95 | "The item at the end of the edge."
96 | node: Asset!
97 | }
98 |
99 | type BatchPayload {
100 | "The number of nodes that have been affected by the Batch operation."
101 | count: Long!
102 | }
103 |
104 | "Representing a color value comprising of HEX, RGBA and css color values"
105 | type Color {
106 | css: String!
107 | hex: Hex!
108 | rgba: RGBA!
109 | }
110 |
111 | type DocumentVersion {
112 | createdAt: DateTime!
113 | data: Json
114 | id: ID!
115 | revision: Int!
116 | stage: Stage!
117 | }
118 |
119 | "Representing a geolocation point with latitude and longitude"
120 | type Location {
121 | distance(from: LocationInput!): Float!
122 | latitude: Float!
123 | longitude: Float!
124 | }
125 |
126 | type Mutation {
127 | "Create one asset"
128 | createAsset(data: AssetCreateInput!): Asset @deprecated(reason : "Asset mutations will be overhauled soon")
129 | "Delete one asset from _all_ existing stages. Returns deleted document."
130 | deleteAsset(
131 | "Document to delete"
132 | where: AssetWhereUniqueInput!
133 | ): Asset
134 | "Delete many Asset documents"
135 | deleteManyAssets(
136 | "Documents to delete"
137 | where: AssetManyWhereInput
138 | ): BatchPayload! @deprecated(reason : "Please use the new paginated many mutation (deleteManyAssetsConnection)")
139 | "Delete many Asset documents, return deleted documents"
140 | deleteManyAssetsConnection(
141 | after: ID,
142 | before: ID,
143 | first: Int,
144 | last: Int,
145 | skip: Int,
146 | "Documents to delete"
147 | where: AssetManyWhereInput
148 | ): AssetConnection!
149 | "Publish one asset"
150 | publishAsset(
151 | "Optional localizations to publish"
152 | locales: [Locale!],
153 | "Whether to publish the base document"
154 | publishBase: Boolean = true,
155 | "Publishing target stage"
156 | to: [Stage!]! = [PUBLISHED],
157 | "Document to publish"
158 | where: AssetWhereUniqueInput!,
159 | "Whether to include the default locale when publishBase is set"
160 | withDefaultLocale: Boolean = true
161 | ): Asset
162 | "Publish many Asset documents"
163 | publishManyAssets(
164 | "Document localizations to publish"
165 | locales: [Locale!],
166 | "Whether to publish the base document"
167 | publishBase: Boolean = true,
168 | "Stages to publish documents to"
169 | to: [Stage!]! = [PUBLISHED],
170 | "Identifies documents in each stage to be published"
171 | where: AssetManyWhereInput,
172 | "Whether to include the default locale when publishBase is true"
173 | withDefaultLocale: Boolean = true
174 | ): BatchPayload! @deprecated(reason : "Please use the new paginated many mutation (publishManyAssetsConnection)")
175 | "Publish many Asset documents"
176 | publishManyAssetsConnection(
177 | after: ID,
178 | before: ID,
179 | first: Int,
180 | "Stage to find matching documents in"
181 | from: Stage = DRAFT,
182 | last: Int,
183 | "Document localizations to publish"
184 | locales: [Locale!],
185 | "Whether to publish the base document"
186 | publishBase: Boolean = true,
187 | skip: Int,
188 | "Stages to publish documents to"
189 | to: [Stage!]! = [PUBLISHED],
190 | "Identifies documents in each stage to be published"
191 | where: AssetManyWhereInput,
192 | "Whether to include the default locale when publishBase is true"
193 | withDefaultLocale: Boolean = true
194 | ): AssetConnection!
195 | "Unpublish one asset from selected stages. Unpublish either the complete document with its relations, localizations and base data or specific localizations only."
196 | unpublishAsset(
197 | "Stages to unpublish document from"
198 | from: [Stage!]! = [PUBLISHED],
199 | "Optional locales to unpublish. Unpublishing the default locale will completely remove the document from the selected stages"
200 | locales: [Locale!],
201 | "Unpublish complete document including default localization and relations from stages. Can be disabled."
202 | unpublishBase: Boolean = true,
203 | "Document to unpublish"
204 | where: AssetWhereUniqueInput!
205 | ): Asset
206 | "Unpublish many Asset documents"
207 | unpublishManyAssets(
208 | "Stages to unpublish documents from"
209 | from: [Stage!]! = [PUBLISHED],
210 | "Locales to unpublish"
211 | locales: [Locale!],
212 | "Whether to unpublish the base document and default localization"
213 | unpublishBase: Boolean = true,
214 | "Identifies documents in each stage"
215 | where: AssetManyWhereInput
216 | ): BatchPayload! @deprecated(reason : "Please use the new paginated many mutation (unpublishManyAssetsConnection)")
217 | "Find many Asset documents that match criteria in specified stage and unpublish from target stages"
218 | unpublishManyAssetsConnection(
219 | after: ID,
220 | before: ID,
221 | first: Int,
222 | "Stages to unpublish documents from"
223 | from: [Stage!]! = [PUBLISHED],
224 | last: Int,
225 | "Locales to unpublish"
226 | locales: [Locale!],
227 | skip: Int,
228 | "Stage to find matching documents in"
229 | stage: Stage = DRAFT,
230 | "Whether to unpublish the base document and default localization"
231 | unpublishBase: Boolean = true,
232 | "Identifies documents in draft stage"
233 | where: AssetManyWhereInput
234 | ): AssetConnection!
235 | "Update one asset"
236 | updateAsset(data: AssetUpdateInput!, where: AssetWhereUniqueInput!): Asset
237 | "Update many assets"
238 | updateManyAssets(
239 | "Updates to document content"
240 | data: AssetUpdateManyInput!,
241 | "Documents to apply update on"
242 | where: AssetManyWhereInput
243 | ): BatchPayload! @deprecated(reason : "Please use the new paginated many mutation (updateManyAssetsConnection)")
244 | "Update many Asset documents"
245 | updateManyAssetsConnection(
246 | after: ID,
247 | before: ID,
248 | "Updates to document content"
249 | data: AssetUpdateManyInput!,
250 | first: Int,
251 | last: Int,
252 | skip: Int,
253 | "Documents to apply update on"
254 | where: AssetManyWhereInput
255 | ): AssetConnection!
256 | "Upsert one asset"
257 | upsertAsset(upsert: AssetUpsertInput!, where: AssetWhereUniqueInput!): Asset
258 | }
259 |
260 | "Information about pagination in a connection."
261 | type PageInfo {
262 | "When paginating forwards, the cursor to continue."
263 | endCursor: String
264 | "When paginating forwards, are there more items?"
265 | hasNextPage: Boolean!
266 | "When paginating backwards, are there more items?"
267 | hasPreviousPage: Boolean!
268 | "Number of items in the current page."
269 | pageSize: Int
270 | "When paginating backwards, the cursor to continue."
271 | startCursor: String
272 | }
273 |
274 | type Query {
275 | "Retrieve a single asset"
276 | asset(
277 | """
278 |
279 | Defines which locales should be returned.
280 |
281 | Note that `Asset` will be affected directly by this argument, as well as any other related models with localized fields in the query's subtree.
282 | The first locale matching the provided list will be returned, entries with non matching locales will be filtered out.
283 |
284 | This argument may be overwritten by another locales definition in a relational child field, this will effectively use the overwritten argument for the affected query's subtree.
285 | """
286 | locales: [Locale!]! = [en],
287 | stage: Stage! = PUBLISHED,
288 | where: AssetWhereUniqueInput!
289 | ): Asset
290 | "Retrieve document version"
291 | assetVersion(where: VersionWhereInput!): DocumentVersion
292 | "Retrieve multiple assets"
293 | assets(
294 | after: String,
295 | before: String,
296 | first: Int,
297 | last: Int,
298 | """
299 |
300 | Defines which locales should be returned.
301 |
302 | Note that `Asset` will be affected directly by this argument, as well as any other related models with localized fields in the query's subtree.
303 | The first locale matching the provided list will be returned, entries with non matching locales will be filtered out.
304 |
305 | This argument may be overwritten by another locales definition in a relational child field, this will effectively use the overwritten argument for the affected query's subtree.
306 | """
307 | locales: [Locale!]! = [en],
308 | orderBy: AssetOrderByInput,
309 | skip: Int,
310 | stage: Stage! = PUBLISHED,
311 | where: AssetWhereInput
312 | ): [Asset!]!
313 | "Retrieve multiple assets using the Relay connection interface"
314 | assetsConnection(
315 | after: String,
316 | before: String,
317 | first: Int,
318 | last: Int,
319 | """
320 |
321 | Defines which locales should be returned.
322 |
323 | Note that `Asset` will be affected directly by this argument, as well as any other related models with localized fields in the query's subtree.
324 | The first locale matching the provided list will be returned, entries with non matching locales will be filtered out.
325 |
326 | This argument may be overwritten by another locales definition in a relational child field, this will effectively use the overwritten argument for the affected query's subtree.
327 | """
328 | locales: [Locale!]! = [en],
329 | orderBy: AssetOrderByInput,
330 | skip: Int,
331 | stage: Stage! = PUBLISHED,
332 | where: AssetWhereInput
333 | ): AssetConnection!
334 | "Fetches an object given its ID"
335 | node(
336 | "The ID of an object"
337 | id: ID!,
338 | """
339 |
340 | Defines which locales should be returned.
341 |
342 | Note that `Node` is a model without localized fields and will not be affected directly by this argument, however the locales will be passed on to any relational fields in the query's subtree for filtering.
343 | For related models with localized fields in the query's subtree, the first locale matching the provided list of locales will be returned, entries with non matching locales will be filtered out.
344 |
345 | This argument may be overwritten by another locales definition in a relational child field, this will effectively use the overwritten argument for the affected query's subtree.
346 | """
347 | locales: [Locale!]! = [en],
348 | stage: Stage! = PUBLISHED
349 | ): Node
350 | }
351 |
352 | "Representing a RGBA color value: https://developer.mozilla.org/en-US/docs/Web/CSS/color_value#rgb()_and_rgba()"
353 | type RGBA {
354 | a: RGBATransparency!
355 | b: RGBAHue!
356 | g: RGBAHue!
357 | r: RGBAHue!
358 | }
359 |
360 | "Custom type representing a rich text value comprising of raw rich text ast, html, markdown and text values"
361 | type RichText {
362 | "Returns HTMl representation"
363 | html: String!
364 | "Returns Markdown representation"
365 | markdown: String!
366 | "Returns AST representation"
367 | raw: RichTextAST!
368 | "Returns plain-text contents of RichText"
369 | text: String!
370 | }
371 |
372 | type Version {
373 | createdAt: DateTime!
374 | id: ID!
375 | revision: Int!
376 | stage: Stage!
377 | }
378 |
379 | enum AssetOrderByInput {
380 | createdAt_ASC
381 | createdAt_DESC
382 | fileName_ASC
383 | fileName_DESC
384 | handle_ASC
385 | handle_DESC
386 | height_ASC
387 | height_DESC
388 | id_ASC
389 | id_DESC
390 | mimeType_ASC
391 | mimeType_DESC
392 | publishedAt_ASC
393 | publishedAt_DESC
394 | size_ASC
395 | size_DESC
396 | updatedAt_ASC
397 | updatedAt_DESC
398 | width_ASC
399 | width_DESC
400 | }
401 |
402 | enum DocumentFileTypes {
403 | doc
404 | docx
405 | html
406 | jpg
407 | odp
408 | ods
409 | odt
410 | pdf
411 | png
412 | ppt
413 | pptx
414 | svg
415 | txt
416 | webp
417 | xls
418 | xlsx
419 | }
420 |
421 | enum ImageFit {
422 | "Resizes the image to fit within the specified parameters without distorting, cropping, or changing the aspect ratio."
423 | clip
424 | "Resizes the image to fit the specified parameters exactly by removing any parts of the image that don't fit within the boundaries."
425 | crop
426 | "Resizes the image to fit within the parameters, but as opposed to 'fit:clip' will not scale the image if the image is smaller than the output size."
427 | max
428 | "Resizes the image to fit the specified parameters exactly by scaling the image to the desired size. The aspect ratio of the image is not respected and the image can be distorted using this method."
429 | scale
430 | }
431 |
432 | "Locale system enumeration"
433 | enum Locale {
434 | en
435 | }
436 |
437 | "Stage system enumeration"
438 | enum Stage {
439 | "The Draft is the default stage for all your content."
440 | DRAFT
441 | "The Published stage is where you can publish your content to."
442 | PUBLISHED
443 | }
444 |
445 | enum SystemDateTimeFieldVariation {
446 | BASE
447 | COMBINED
448 | LOCALIZATION
449 | }
450 |
451 | "System User Kind"
452 | enum UserKind {
453 | MEMBER
454 | PAT
455 | PUBLIC
456 | WEBHOOK
457 | }
458 |
459 | enum _FilterKind {
460 | AND
461 | NOT
462 | OR
463 | contains
464 | contains_all
465 | contains_none
466 | contains_some
467 | ends_with
468 | eq
469 | eq_not
470 | gt
471 | gte
472 | in
473 | lt
474 | lte
475 | not_contains
476 | not_ends_with
477 | not_in
478 | not_starts_with
479 | relational_every
480 | relational_none
481 | relational_single
482 | relational_some
483 | search
484 | starts_with
485 | }
486 |
487 | enum _MutationInputFieldKind {
488 | enum
489 | relation
490 | richText
491 | scalar
492 | union
493 | virtual
494 | }
495 |
496 | enum _MutationKind {
497 | create
498 | delete
499 | deleteMany
500 | publish
501 | publishMany
502 | unpublish
503 | unpublishMany
504 | update
505 | updateMany
506 | upsert
507 | }
508 |
509 | enum _OrderDirection {
510 | asc
511 | desc
512 | }
513 |
514 | enum _RelationInputCardinality {
515 | many
516 | one
517 | }
518 |
519 | enum _RelationInputKind {
520 | create
521 | update
522 | }
523 |
524 | enum _RelationKind {
525 | regular
526 | union
527 | }
528 |
529 | enum _SystemDateTimeFieldVariation {
530 | base
531 | combined
532 | localization
533 | }
534 |
535 | input AssetCreateInput {
536 | createdAt: DateTime
537 | fileName: String!
538 | handle: String!
539 | height: Float
540 | "Inline mutations for managing document localizations excluding the default locale"
541 | localizations: AssetCreateLocalizationsInput
542 | mimeType: String
543 | size: Float
544 | updatedAt: DateTime
545 | width: Float
546 | }
547 |
548 | input AssetCreateLocalizationDataInput {
549 | createdAt: DateTime
550 | fileName: String!
551 | handle: String!
552 | height: Float
553 | mimeType: String
554 | size: Float
555 | updatedAt: DateTime
556 | width: Float
557 | }
558 |
559 | input AssetCreateLocalizationInput {
560 | "Localization input"
561 | data: AssetCreateLocalizationDataInput!
562 | locale: Locale!
563 | }
564 |
565 | input AssetCreateLocalizationsInput {
566 | "Create localizations for the newly-created document"
567 | create: [AssetCreateLocalizationInput!]
568 | }
569 |
570 | "Identifies documents"
571 | input AssetManyWhereInput {
572 | "Logical AND on all given filters."
573 | AND: [AssetWhereInput!]
574 | "Logical NOT on all given filters combined by AND."
575 | NOT: [AssetWhereInput!]
576 | "Logical OR on all given filters."
577 | OR: [AssetWhereInput!]
578 | "Contains search across all appropriate fields."
579 | _search: String
580 | createdAt: DateTime
581 | "All values greater than the given value."
582 | createdAt_gt: DateTime
583 | "All values greater than or equal the given value."
584 | createdAt_gte: DateTime
585 | "All values that are contained in given list."
586 | createdAt_in: [DateTime!]
587 | "All values less than the given value."
588 | createdAt_lt: DateTime
589 | "All values less than or equal the given value."
590 | createdAt_lte: DateTime
591 | "All values that are not equal to given value."
592 | createdAt_not: DateTime
593 | "All values that are not contained in given list."
594 | createdAt_not_in: [DateTime!]
595 | id: ID
596 | "All values containing the given string."
597 | id_contains: ID
598 | "All values ending with the given string."
599 | id_ends_with: ID
600 | "All values that are contained in given list."
601 | id_in: [ID!]
602 | "All values that are not equal to given value."
603 | id_not: ID
604 | "All values not containing the given string."
605 | id_not_contains: ID
606 | "All values not ending with the given string"
607 | id_not_ends_with: ID
608 | "All values that are not contained in given list."
609 | id_not_in: [ID!]
610 | "All values not starting with the given string."
611 | id_not_starts_with: ID
612 | "All values starting with the given string."
613 | id_starts_with: ID
614 | publishedAt: DateTime
615 | "All values greater than the given value."
616 | publishedAt_gt: DateTime
617 | "All values greater than or equal the given value."
618 | publishedAt_gte: DateTime
619 | "All values that are contained in given list."
620 | publishedAt_in: [DateTime!]
621 | "All values less than the given value."
622 | publishedAt_lt: DateTime
623 | "All values less than or equal the given value."
624 | publishedAt_lte: DateTime
625 | "All values that are not equal to given value."
626 | publishedAt_not: DateTime
627 | "All values that are not contained in given list."
628 | publishedAt_not_in: [DateTime!]
629 | updatedAt: DateTime
630 | "All values greater than the given value."
631 | updatedAt_gt: DateTime
632 | "All values greater than or equal the given value."
633 | updatedAt_gte: DateTime
634 | "All values that are contained in given list."
635 | updatedAt_in: [DateTime!]
636 | "All values less than the given value."
637 | updatedAt_lt: DateTime
638 | "All values less than or equal the given value."
639 | updatedAt_lte: DateTime
640 | "All values that are not equal to given value."
641 | updatedAt_not: DateTime
642 | "All values that are not contained in given list."
643 | updatedAt_not_in: [DateTime!]
644 | }
645 |
646 | "Transformations for Assets"
647 | input AssetTransformationInput {
648 | document: DocumentTransformationInput
649 | image: ImageTransformationInput
650 | "Pass true if you want to validate the passed transformation parameters"
651 | validateOptions: Boolean = false
652 | }
653 |
654 | input AssetUpdateInput {
655 | fileName: String
656 | handle: String
657 | height: Float
658 | "Manage document localizations"
659 | localizations: AssetUpdateLocalizationsInput
660 | mimeType: String
661 | size: Float
662 | width: Float
663 | }
664 |
665 | input AssetUpdateLocalizationDataInput {
666 | fileName: String
667 | handle: String
668 | height: Float
669 | mimeType: String
670 | size: Float
671 | width: Float
672 | }
673 |
674 | input AssetUpdateLocalizationInput {
675 | data: AssetUpdateLocalizationDataInput!
676 | locale: Locale!
677 | }
678 |
679 | input AssetUpdateLocalizationsInput {
680 | "Localizations to create"
681 | create: [AssetCreateLocalizationInput!]
682 | "Localizations to delete"
683 | delete: [Locale!]
684 | "Localizations to update"
685 | update: [AssetUpdateLocalizationInput!]
686 | upsert: [AssetUpsertLocalizationInput!]
687 | }
688 |
689 | input AssetUpdateManyInput {
690 | fileName: String
691 | height: Float
692 | "Optional updates to localizations"
693 | localizations: AssetUpdateManyLocalizationsInput
694 | mimeType: String
695 | size: Float
696 | width: Float
697 | }
698 |
699 | input AssetUpdateManyLocalizationDataInput {
700 | fileName: String
701 | height: Float
702 | mimeType: String
703 | size: Float
704 | width: Float
705 | }
706 |
707 | input AssetUpdateManyLocalizationInput {
708 | data: AssetUpdateManyLocalizationDataInput!
709 | locale: Locale!
710 | }
711 |
712 | input AssetUpdateManyLocalizationsInput {
713 | "Localizations to update"
714 | update: [AssetUpdateManyLocalizationInput!]
715 | }
716 |
717 | input AssetUpdateManyWithNestedWhereInput {
718 | "Update many input"
719 | data: AssetUpdateManyInput!
720 | "Document search"
721 | where: AssetWhereInput!
722 | }
723 |
724 | input AssetUpdateWithNestedWhereUniqueInput {
725 | "Document to update"
726 | data: AssetUpdateInput!
727 | "Unique document search"
728 | where: AssetWhereUniqueInput!
729 | }
730 |
731 | input AssetUpsertInput {
732 | "Create document if it didn't exist"
733 | create: AssetCreateInput!
734 | "Update document if it exists"
735 | update: AssetUpdateInput!
736 | }
737 |
738 | input AssetUpsertLocalizationInput {
739 | create: AssetCreateLocalizationDataInput!
740 | locale: Locale!
741 | update: AssetUpdateLocalizationDataInput!
742 | }
743 |
744 | input AssetUpsertWithNestedWhereUniqueInput {
745 | "Upsert data"
746 | data: AssetUpsertInput!
747 | "Unique document search"
748 | where: AssetWhereUniqueInput!
749 | }
750 |
751 | "Identifies documents"
752 | input AssetWhereInput {
753 | "Logical AND on all given filters."
754 | AND: [AssetWhereInput!]
755 | "Logical NOT on all given filters combined by AND."
756 | NOT: [AssetWhereInput!]
757 | "Logical OR on all given filters."
758 | OR: [AssetWhereInput!]
759 | "Contains search across all appropriate fields."
760 | _search: String
761 | createdAt: DateTime
762 | "All values greater than the given value."
763 | createdAt_gt: DateTime
764 | "All values greater than or equal the given value."
765 | createdAt_gte: DateTime
766 | "All values that are contained in given list."
767 | createdAt_in: [DateTime!]
768 | "All values less than the given value."
769 | createdAt_lt: DateTime
770 | "All values less than or equal the given value."
771 | createdAt_lte: DateTime
772 | "All values that are not equal to given value."
773 | createdAt_not: DateTime
774 | "All values that are not contained in given list."
775 | createdAt_not_in: [DateTime!]
776 | fileName: String
777 | "All values containing the given string."
778 | fileName_contains: String
779 | "All values ending with the given string."
780 | fileName_ends_with: String
781 | "All values that are contained in given list."
782 | fileName_in: [String!]
783 | "All values that are not equal to given value."
784 | fileName_not: String
785 | "All values not containing the given string."
786 | fileName_not_contains: String
787 | "All values not ending with the given string"
788 | fileName_not_ends_with: String
789 | "All values that are not contained in given list."
790 | fileName_not_in: [String!]
791 | "All values not starting with the given string."
792 | fileName_not_starts_with: String
793 | "All values starting with the given string."
794 | fileName_starts_with: String
795 | handle: String
796 | "All values containing the given string."
797 | handle_contains: String
798 | "All values ending with the given string."
799 | handle_ends_with: String
800 | "All values that are contained in given list."
801 | handle_in: [String!]
802 | "All values that are not equal to given value."
803 | handle_not: String
804 | "All values not containing the given string."
805 | handle_not_contains: String
806 | "All values not ending with the given string"
807 | handle_not_ends_with: String
808 | "All values that are not contained in given list."
809 | handle_not_in: [String!]
810 | "All values not starting with the given string."
811 | handle_not_starts_with: String
812 | "All values starting with the given string."
813 | handle_starts_with: String
814 | height: Float
815 | "All values greater than the given value."
816 | height_gt: Float
817 | "All values greater than or equal the given value."
818 | height_gte: Float
819 | "All values that are contained in given list."
820 | height_in: [Float!]
821 | "All values less than the given value."
822 | height_lt: Float
823 | "All values less than or equal the given value."
824 | height_lte: Float
825 | "All values that are not equal to given value."
826 | height_not: Float
827 | "All values that are not contained in given list."
828 | height_not_in: [Float!]
829 | id: ID
830 | "All values containing the given string."
831 | id_contains: ID
832 | "All values ending with the given string."
833 | id_ends_with: ID
834 | "All values that are contained in given list."
835 | id_in: [ID!]
836 | "All values that are not equal to given value."
837 | id_not: ID
838 | "All values not containing the given string."
839 | id_not_contains: ID
840 | "All values not ending with the given string"
841 | id_not_ends_with: ID
842 | "All values that are not contained in given list."
843 | id_not_in: [ID!]
844 | "All values not starting with the given string."
845 | id_not_starts_with: ID
846 | "All values starting with the given string."
847 | id_starts_with: ID
848 | mimeType: String
849 | "All values containing the given string."
850 | mimeType_contains: String
851 | "All values ending with the given string."
852 | mimeType_ends_with: String
853 | "All values that are contained in given list."
854 | mimeType_in: [String!]
855 | "All values that are not equal to given value."
856 | mimeType_not: String
857 | "All values not containing the given string."
858 | mimeType_not_contains: String
859 | "All values not ending with the given string"
860 | mimeType_not_ends_with: String
861 | "All values that are not contained in given list."
862 | mimeType_not_in: [String!]
863 | "All values not starting with the given string."
864 | mimeType_not_starts_with: String
865 | "All values starting with the given string."
866 | mimeType_starts_with: String
867 | publishedAt: DateTime
868 | "All values greater than the given value."
869 | publishedAt_gt: DateTime
870 | "All values greater than or equal the given value."
871 | publishedAt_gte: DateTime
872 | "All values that are contained in given list."
873 | publishedAt_in: [DateTime!]
874 | "All values less than the given value."
875 | publishedAt_lt: DateTime
876 | "All values less than or equal the given value."
877 | publishedAt_lte: DateTime
878 | "All values that are not equal to given value."
879 | publishedAt_not: DateTime
880 | "All values that are not contained in given list."
881 | publishedAt_not_in: [DateTime!]
882 | size: Float
883 | "All values greater than the given value."
884 | size_gt: Float
885 | "All values greater than or equal the given value."
886 | size_gte: Float
887 | "All values that are contained in given list."
888 | size_in: [Float!]
889 | "All values less than the given value."
890 | size_lt: Float
891 | "All values less than or equal the given value."
892 | size_lte: Float
893 | "All values that are not equal to given value."
894 | size_not: Float
895 | "All values that are not contained in given list."
896 | size_not_in: [Float!]
897 | updatedAt: DateTime
898 | "All values greater than the given value."
899 | updatedAt_gt: DateTime
900 | "All values greater than or equal the given value."
901 | updatedAt_gte: DateTime
902 | "All values that are contained in given list."
903 | updatedAt_in: [DateTime!]
904 | "All values less than the given value."
905 | updatedAt_lt: DateTime
906 | "All values less than or equal the given value."
907 | updatedAt_lte: DateTime
908 | "All values that are not equal to given value."
909 | updatedAt_not: DateTime
910 | "All values that are not contained in given list."
911 | updatedAt_not_in: [DateTime!]
912 | width: Float
913 | "All values greater than the given value."
914 | width_gt: Float
915 | "All values greater than or equal the given value."
916 | width_gte: Float
917 | "All values that are contained in given list."
918 | width_in: [Float!]
919 | "All values less than the given value."
920 | width_lt: Float
921 | "All values less than or equal the given value."
922 | width_lte: Float
923 | "All values that are not equal to given value."
924 | width_not: Float
925 | "All values that are not contained in given list."
926 | width_not_in: [Float!]
927 | }
928 |
929 | "References Asset record uniquely"
930 | input AssetWhereUniqueInput {
931 | id: ID
932 | }
933 |
934 | "Accepts either HEX or RGBA color value. At least one of hex or rgba value should be passed. If both are passed RGBA is used."
935 | input ColorInput {
936 | hex: Hex
937 | rgba: RGBAInput
938 | }
939 |
940 | input ConnectPositionInput {
941 | "Connect document after specified document"
942 | after: ID
943 | "Connect document before specified document"
944 | before: ID
945 | "Connect document at last position"
946 | end: Boolean
947 | "Connect document at first position"
948 | start: Boolean
949 | }
950 |
951 | input DocumentOutputInput {
952 | """
953 |
954 | Transforms a document into a desired file type.
955 | See this matrix for format support:
956 |
957 | PDF: jpg, odp, ods, odt, png, svg, txt, and webp
958 | DOC: docx, html, jpg, odt, pdf, png, svg, txt, and webp
959 | DOCX: doc, html, jpg, odt, pdf, png, svg, txt, and webp
960 | ODT: doc, docx, html, jpg, pdf, png, svg, txt, and webp
961 | XLS: jpg, pdf, ods, png, svg, xlsx, and webp
962 | XLSX: jpg, pdf, ods, png, svg, xls, and webp
963 | ODS: jpg, pdf, png, xls, svg, xlsx, and webp
964 | PPT: jpg, odp, pdf, png, svg, pptx, and webp
965 | PPTX: jpg, odp, pdf, png, svg, ppt, and webp
966 | ODP: jpg, pdf, png, ppt, svg, pptx, and webp
967 | BMP: jpg, odp, ods, odt, pdf, png, svg, and webp
968 | GIF: jpg, odp, ods, odt, pdf, png, svg, and webp
969 | JPG: jpg, odp, ods, odt, pdf, png, svg, and webp
970 | PNG: jpg, odp, ods, odt, pdf, png, svg, and webp
971 | WEBP: jpg, odp, ods, odt, pdf, png, svg, and webp
972 | TIFF: jpg, odp, ods, odt, pdf, png, svg, and webp
973 | AI: jpg, odp, ods, odt, pdf, png, svg, and webp
974 | PSD: jpg, odp, ods, odt, pdf, png, svg, and webp
975 | SVG: jpg, odp, ods, odt, pdf, png, and webp
976 | HTML: jpg, odt, pdf, svg, txt, and webp
977 | TXT: jpg, html, odt, pdf, svg, and webp
978 | """
979 | format: DocumentFileTypes
980 | }
981 |
982 | "Transformations for Documents"
983 | input DocumentTransformationInput {
984 | "Changes the output for the file."
985 | output: DocumentOutputInput
986 | }
987 |
988 | input ImageResizeInput {
989 | "The default value for the fit parameter is fit:clip."
990 | fit: ImageFit
991 | "The height in pixels to resize the image to. The value must be an integer from 1 to 10000."
992 | height: Int
993 | "The width in pixels to resize the image to. The value must be an integer from 1 to 10000."
994 | width: Int
995 | }
996 |
997 | "Transformations for Images"
998 | input ImageTransformationInput {
999 | "Resizes the image"
1000 | resize: ImageResizeInput
1001 | }
1002 |
1003 | "Input for a geolocation point with latitude and longitude"
1004 | input LocationInput {
1005 | latitude: Float!
1006 | longitude: Float!
1007 | }
1008 |
1009 | input PublishLocaleInput {
1010 | "Locales to publish"
1011 | locale: Locale!
1012 | "Stages to publish selected locales to"
1013 | stages: [Stage!]!
1014 | }
1015 |
1016 | "Input type representing a RGBA color value: https://developer.mozilla.org/en-US/docs/Web/CSS/color_value#rgb()_and_rgba()"
1017 | input RGBAInput {
1018 | a: RGBATransparency!
1019 | b: RGBAHue!
1020 | g: RGBAHue!
1021 | r: RGBAHue!
1022 | }
1023 |
1024 | input UnpublishLocaleInput {
1025 | "Locales to unpublish"
1026 | locale: Locale!
1027 | "Stages to unpublish selected locales from"
1028 | stages: [Stage!]!
1029 | }
1030 |
1031 | input VersionWhereInput {
1032 | id: ID!
1033 | revision: Int!
1034 | stage: Stage!
1035 | }
1036 |
1037 |
1038 | "Raw JSON value"
1039 | scalar Json
1040 |
1041 | "A date string, such as 2007-12-03 (YYYY-MM-DD), compliant with ISO 8601 standard for representation of dates using the Gregorian calendar."
1042 | scalar Date
1043 |
1044 | "The Long scalar type represents non-fractional signed whole numeric values. Long can represent values between -(2^63) and 2^63 - 1."
1045 | scalar Long
1046 |
1047 | "A date-time string at UTC, such as 2007-12-03T10:15:30Z, compliant with the date-timeformat outlined in section 5.6 of the RFC 3339 profile of the ISO 8601 standard for representationof dates and times using the Gregorian calendar."
1048 | scalar DateTime
1049 |
1050 | scalar RGBATransparency
1051 |
1052 | scalar RGBAHue
1053 |
1054 | "Slate-compatible RichText AST"
1055 | scalar RichTextAST
1056 |
1057 | scalar Hex
1058 |
--------------------------------------------------------------------------------
/secrets-development.yml:
--------------------------------------------------------------------------------
1 | # XXX Used in localhost, for development environment (when running "yarn start")
2 | development:
3 | # Local redis endpoint url
4 | REDIS_URL: 'localhost:6379'
5 | REDIS_PASSWORD: 'localpass'
6 |
7 | # Required - GraphCMS credentials
8 | # XXX You don't need write capacity, use a READ ONLY (QUERY) token is preferred!
9 | GRAPHCMS_ENDPOINT: 'https://api-euwest.graphcms.com/v1/cjyi8gl5m00tm01e91polc50t/master'
10 | GRAPHCMS_TOKEN: 'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJ2ZXJzaW9uIjoxLCJ0b2tlbklkIjoiZWI4NTg3YjUtYjZhOS00ODE4LTliYjUtNWE4NDYwYjNmN2IxIn0.UQjYshD_MXM0eN1Jx0m6ODz3bvn6f9zhSJ45pbtEXKs'
11 |
12 | # Optional - Authentication token (headers) required when performing API operations (/refresh-cache and /reset-cache endpoints)
13 | REFRESH_CACHE_TOKEN: 'oCsaDgNY6YUe7x03GY8DoIBCYdGKn2UvKnO2Vb7uYhZbdxs7UbYi2yYy23vOytSteGStUrydyhJu1nTlC8jPNdUQgaqY6NKf2Z0F'
14 |
15 | # Optional - Used to access "/read-cache" endpoint, mostly for debug. XXX No need to make it too complicated, there are no sensitive data accessible from there.
16 | BASIC_AUTH_USERNAME: 'admin'
17 | BASIC_AUTH_PASSWORD: 'not-admin!'
18 |
19 | # ---- Common to all instances ----
20 |
21 | # Optional - Your epsagon token (UUID), available at https://dashboard.epsagon.com/settings/
22 | # If not provided, epsagon will be disabled
23 | EPSAGON_APP_TOKEN: ''
24 |
--------------------------------------------------------------------------------
/secrets-example.yml:
--------------------------------------------------------------------------------
1 | demo:
2 | # XXX Your redis endpoint url
3 | REDIS_URL: '' # Using redisLab, should be something like redis-aaa.aa.eu-west-1-2.ec2.cloud.redislabs.com:11111
4 | REDIS_PASSWORD: ''
5 |
6 | # Required - GraphCMS credentials
7 | # XXX You don't need write capacity, use a READ ONLY (QUERY) token is preferred!
8 | GRAPHCMS_ENDPOINT: ''
9 | GRAPHCMS_TOKEN: ''
10 |
11 | # Optional - Authentication token (headers) required when performing API operations (/refresh-cache and /reset-cache endpoints)
12 | REFRESH_CACHE_TOKEN: ''
13 |
14 | # Optional - Used to access "/read-cache" endpoint, mostly for debug. XXX No need to make it too complicated, there are no sensitive data accessible from there.
15 | BASIC_AUTH_USERNAME: ''
16 | BASIC_AUTH_PASSWORD: ''
17 |
18 | # ---- Common to all instances ----
19 |
20 | # Optional - Your epsagon token (UUID), available at https://dashboard.epsagon.com/settings/
21 | # If not provided, epsagon will be disabled
22 | EPSAGON_APP_TOKEN: ''
23 |
24 |
--------------------------------------------------------------------------------
/secrets-test.yml:
--------------------------------------------------------------------------------
1 | # XXX Used with manual tests and CI, for test environment (when running "yarn test")
2 | development: # XXX The "development" stage is the default one and the one that's used when running tests
3 | # Local redis endpoint url
4 | REDIS_URL: 'localhost:6379'
5 | REDIS_PASSWORD: 'localpass'
6 |
7 | # Required - GraphCMS credentials
8 | # XXX You don't need write capacity, use a READ ONLY (QUERY) token is preferred!
9 | GRAPHCMS_ENDPOINT: 'https://api-eu-central-1.graphcms.com/v2/cjyi8gl5m00tm01e91polc50t/master'
10 | GRAPHCMS_TOKEN: 'eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCIsImtpZCI6ImdjbXMtbWFpbi1wcm9kdWN0aW9uIn0.eyJ2ZXJzaW9uIjozLCJpYXQiOjE2MDYyMTAwNTEsImF1ZCI6WyJodHRwczovL2FwaS1ldS1jZW50cmFsLTEuZ3JhcGhjbXMuY29tL3YyL2NqeWk4Z2w1bTAwdG0wMWU5MXBvbGM1MHQvbWFzdGVyIl0sImlzcyI6Imh0dHBzOi8vbWFuYWdlbWVudC5ncmFwaGNtcy5jb20vIiwic3ViIjoiZDcxZmIzYzEtZjdhMi00Yzc1LTg2NDYtMmM4N2U1YzE1MDliIiwianRpIjoiY2todnJ6cmJmbWtnZzAxeGxmYzJoNmRxcyJ9.HtPgJyb_yoDzs_Qq1H4YeHspi1Y88fA5GMWzxE23bSI9fyA8TLoF5X1166rfZftEq0BSt3Bm-PUEJ6iKK14fp_EFWeHLq1a7R6UDY_v4_WCT5eRZYJ7NIvRRGTm_L3eIh5TVywttSMaGyD4SSIk7cbjzuLdSvQ8QIhKdouWtGaR2nwFludIQIi3BCU7MJ0xVbaRtuTmHXmyS4nSzV1fWRFn4N6IZcsbzqs4vPJbEj7HJwzHZad-tkhg0a761MtTRE_svoluunk1csf-lJY26_wMxkxkj8ptUZLEAM9nRO1O9Uizo18JttYZlYIJ_CwrmSgiNj7f3fa70YpaaT89ZrB8xmESACTOsKqFKCahtxJggB0xCojrRngBXl4JDxIKBDVtIWEZGoCMIyD94gh3O7kXp9S1zU9VFmRnoC3yIzWGn91XTWCUD679lucuM1FWnFzm6CeSLY3ECsAoX_Q3JpIAceMyA1miNi22eGzEfj3dg7GhMmfobl5yAaSJAs0DjdJCP8ZvrN2u0QzFIWZATlo462BCMT9rVx5q3jsAv0hjl-KYy8UASbScyy6JURJkA6jxGEs3sOCIwjEwh0lhZxrXy63YpsBcgD9QJ-S6u0Pe3OX4lF6OeYBU6UjunixQz-Ui5YrcuC5pdq34HcmsUlsLOWo49VeNhbnXJ0QX3BPo'
11 |
12 | # Optional - Authentication token (headers) required when performing API operations (/refresh-cache and /reset-cache endpoints)
13 | REFRESH_CACHE_TOKEN: 'oCsaDgNY6YUe7x03GY8DoIBCYdGKn2UvKnO2Vb7uYhZbdxs7UbYi2yYy23vOytSteGStUrydyhJu1nTlC8jPNdUQgaqY6NKf2Z0F'
14 |
15 | # Optional - Used to access "/read-cache" endpoint, mostly for debug. XXX No need to make it too complicated, there are no sensitive data accessible from there.
16 | BASIC_AUTH_USERNAME: 'admin'
17 | BASIC_AUTH_PASSWORD: 'not-admin!'
18 |
19 | # ---- Common to all instances ----
20 |
21 | # Optional - Your epsagon token (UUID), available at https://dashboard.epsagon.com/settings/
22 | # If not provided, epsagon will be disabled
23 | EPSAGON_APP_TOKEN: ''
24 |
--------------------------------------------------------------------------------
/serverless.yml:
--------------------------------------------------------------------------------
1 | service: gcms-tfp-cache
2 | frameworkVersion: "=1.74.0"
3 |
4 | plugins:
5 | # - serverless-plugin-epsagon # https://github.com/epsagon/serverless-plugin-epsagon XXX We don't use it because it's not compatible with the basic_auth authorizer, used by /read-cache
6 | - serverless-webpack # https://github.com/serverless-heaven/serverless-webpack
7 | - serverless-offline # See https://github.com/dherault/serverless-offline
8 | - '@unly/serverless-env-copy-plugin' # See https://github.com/UnlyEd/serverless-env-copy-plugin
9 | - serverless-domain-manager # See https://github.com/amplify-education/serverless-domain-manager
10 | - serverless-dotenv-plugin # See https://www.npmjs.com/package/serverless-dotenv-plugin
11 |
12 | package:
13 | individually: true
14 |
15 | custom:
16 | serverless-offline:
17 | port: 8085 # Update .env.development and .env.test if you ever change this
18 | showDuration: true
19 | environment: ${env:NODE_ENV, 'development'}
20 | envs:
21 | development: # XXX Used by both "development" and "test" environments
22 | profile:
23 | domain:
24 | name: ''
25 | memorySize:
26 | demoStaging:
27 | profile: # TODO AWS profile to use for this environment - Useful if your staging and production apps don't live in the same AWS Account! (which is our case)
28 | domain:
29 | name: '' # TODO Your staging domain name
30 | memorySize: 256
31 | demo:
32 | profile: # TODO AWS profile to use for this environment - Useful if your staging and production apps don't live in the same AWS Account! (which is our case)
33 | domain:
34 | name: '' # TODO Your production domain name
35 | memorySize: 256
36 | webpack:
37 | webpackConfig: 'webpack.config.js'
38 | includeModules:
39 | forceExclude:
40 | - aws-sdk
41 | packager: 'yarn'
42 | packagerOptions: {}
43 | excludeFiles: src/**/*.test.js
44 | keepOutputDirectory: true
45 | customDomain:
46 | enabled: true
47 | domainName: ${self:custom.envs.${self:provider.stage}.domain.name}
48 | stage: ${self:provider.stage}
49 | createRoute53Record: true
50 |
51 | provider:
52 | name: aws
53 | runtime: nodejs10.x # AWS keeps up to date with the latest v10 version - See https://forum.serverless.com/t/node-10-lambdas-on-aws/8302/2
54 | versionFunctions: false # See https://serverless.com/framework/docs/providers/aws/guide/functions#versioning-deployed-functions
55 | logRetentionInDays: 60
56 | stage: ${opt:stage, 'development'}
57 | region: eu-west-1
58 | memorySize: ${self:custom.envs.${self:provider.stage}.memorySize, '128'}
59 | timeout: 30 # 30sec timeout, we perform long-running lambda sometimes (such as /refresh-cache)
60 | logs:
61 | restApi: true # Enable logs in other services, such as API GW - See https://serverless.com/blog/framework-release-v142/
62 | deploymentBucket:
63 | serverSideEncryption: AES256
64 | environment:
65 | NODE_ENV: ${self:custom.environment}
66 | REDIS_URL: ${file(./secrets-${self:custom.environment}.yml):${self:provider.stage}.REDIS_URL}
67 | REDIS_PASSWORD: ${file(./secrets-${self:custom.environment}.yml):${self:provider.stage}.REDIS_PASSWORD}
68 | GRAPHCMS_ENDPOINT: ${file(./secrets-${self:custom.environment}.yml):${self:provider.stage}.GRAPHCMS_ENDPOINT}
69 | GRAPHCMS_TOKEN: ${file(./secrets-${self:custom.environment}.yml):${self:provider.stage}.GRAPHCMS_TOKEN}
70 | REFRESH_CACHE_TOKEN: ${file(./secrets-${self:custom.environment}.yml):${self:provider.stage}.REFRESH_CACHE_TOKEN}
71 | BASIC_AUTH_USERNAME: ${file(./secrets-${self:custom.environment}.yml):${self:provider.stage}.BASIC_AUTH_USERNAME}
72 | BASIC_AUTH_PASSWORD: ${file(./secrets-${self:custom.environment}.yml):${self:provider.stage}.BASIC_AUTH_PASSWORD}
73 | EPSAGON_APP_TOKEN: ${file(./secrets-${self:custom.environment}.yml):EPSAGON_APP_TOKEN}
74 | EPSAGON_APP_NAME: ${self:service}-${self:provider.stage}-${self:custom.environment}
75 | CACHE_BASE_URL: https://${self:custom.envs.${self:provider.stage}.domain.name} # Overridden in development/test by .env.X files
76 | profile: ${self:custom.envs.${self:provider.stage}.profile, ''}
77 | stackTags:
78 | env: ${self:custom.environment}
79 | stage: ${self:provider.stage}
80 | region: ${self:provider.region}
81 | service: ${self:service}
82 | service-type: api
83 | runtime: ${self:provider.runtime}
84 |
85 | functions:
86 | cache-query:
87 | handler: src/functions/epsagon.cacheQuery
88 | events:
89 | - http:
90 | method: POST
91 | path: /cache-query
92 | cors: # XXX See https://serverless.com/blog/cors-api-gateway-survival-guide/
93 | origin: '*' # Same as "Access-Control-Allow-Origin"
94 | headers: # Same as "Access-Control-Allow-Headers"
95 | # Standard default headers
96 | - Content-Type
97 | - X-Amz-Date
98 | - Authorization
99 | - X-Api-Key
100 | - X-Amz-Security-Token
101 | - X-Amz-User-Agent
102 | # Additional headers - Specific to this app
103 | - gcms-locale
104 | - gcms-locale-no-default
105 | allowCredentials: false
106 |
107 | reset-cache:
108 | handler: src/functions/epsagon.resetCache
109 | # XXX Ensures the lambda always has one slot available, and never use more than one lambda instance at once.
110 | # Avoids GraphCMS webhooks to abuse our lambda (GCMS will trigger the webhook once per create/update/delete operation)
111 | # This makes sure only one instance of that lambda can run at once, to avoid refreshing the cache with parallel runs
112 | # Avoids flushing the redis DB multiple times at the same time
113 | # See https://itnext.io/the-everything-guide-to-lambda-throttling-reserved-concurrency-and-execution-limits-d64f144129e5
114 | reservedConcurrency: 1
115 | events:
116 | - http:
117 | method: POST
118 | path: /reset-cache
119 | cors: true
120 |
121 | refresh-cache:
122 | handler: src/functions/refresh-cache.refreshCache # FIXME Don't use epsagon wrapper - creates an infinite loop (see slack)
123 | # XXX Ensures the lambda always has one slot available, and never use more than one lambda instance at once.
124 | # Avoids GraphCMS webhooks to abuse our lambda (GCMS will trigger the webhook once per create/update/delete operation)
125 | # This makes sure only one instance of that lambda can run at once, to avoid refreshing the cache with parallel runs
126 | # Avoids spawning tons of API calls (most of them would timeout anyway, around 80%)
127 | # See https://itnext.io/the-everything-guide-to-lambda-throttling-reserved-concurrency-and-execution-limits-d64f144129e5
128 | reservedConcurrency: 1
129 | events:
130 | - http:
131 | method: POST
132 | path: /refresh-cache
133 | cors: true
134 |
135 | status:
136 | handler: src/functions/status.status # Don't trace status route
137 | events:
138 | - http:
139 | method: GET
140 | path: /status
141 | cors: true
142 |
143 | read-cache:
144 | handler: src/functions/epsagon.readCache
145 | events:
146 | - http:
147 | method: GET
148 | path: /read-cache
149 | cors: true
150 | authorizer: # See https://medium.com/@Da_vidgf/http-basic-auth-with-api-gateway-and-serverless-5ae14ad0a270
151 | name: authorizer
152 | resultTtlInSeconds: 0
153 | identitySource: method.request.header.Authorization
154 | type: request
155 | authorizer:
156 | handler: src/authorizers/basic-auth.handler # A Basic-Auth authentication is required in non-local environments
157 |
158 | resources:
159 | Conditions:
160 | IsProduction:
161 | Fn::Equals:
162 | - ${self:custom.environment}
163 | - "production"
164 | Resources:
165 | GatewayResponse:
166 | Type: 'AWS::ApiGateway::GatewayResponse'
167 | Properties:
168 | ResponseParameters:
169 | gatewayresponse.header.WWW-Authenticate: "'Basic'"
170 | ResponseType: UNAUTHORIZED
171 | RestApiId:
172 | Ref: 'ApiGatewayRestApi'
173 | StatusCode: '401'
174 | # XXX Health check is meant to check if /status endpoint replies properly, it doesn't check internal behaviour.
175 | # Also, it doesn't warm any endpoint but /status. And it won't configure alerts automatically.
176 | # Feel free to enable it if you need it, it will increase your cost, though.
177 | # XXX P.S: We are looking for a better way to perform health checks, AWS is too "heavy", too many useless calls are sent and we couldn't configure alerts automatically to hit our slack channel - If you know of a better way, please share!
178 | # healthCheck:
179 | # Condition: IsProduction # Only deploy HealthCheck in production environment
180 | # Type: "AWS::Route53::HealthCheck"
181 | # Properties:
182 | # HealthCheckConfig:
183 | # EnableSNI: true
184 | # FailureThreshold: "3"
185 | # FullyQualifiedDomainName: ${self:custom.envs.${self:provider.stage}.domain.name}
186 | # ResourcePath: "/status" # XXX Optimize cost by hitting a simple endpoint which doesn't download anything and avoid cost due to data transfer (S3, API Gateway)
187 | # MeasureLatency: false # XXX Cost $1/stage/month ($2/month for staging + production, for a single customer)
188 | # Port: "80"
189 | # Regions: # XXX Only check from UE/US (minimum 3 regions is required), cost much less money and not useful to check from all datacenters in the world
190 | # - 'eu-west-1' # Ireland
191 | # - 'us-west-1' # California
192 | # - 'us-east-1' # Virginia
193 | # RequestInterval: "30" # 30 is the max allowed
194 | # Type: "HTTPS"
195 | # HealthCheckTags:
196 | # - Key: "env"
197 | # Value: ${self:custom.environment}
198 | # - Key: "stage"
199 | # Value: ${self:provider.stage}
200 | # - Key: "region"
201 | # Value: ${self:provider.region}
202 | # - Key: "service"
203 | # Value: ${self:service}
204 | # - Key: "service-type"
205 | # Value: api
206 |
--------------------------------------------------------------------------------
/src/authorizers/basic-auth.js:
--------------------------------------------------------------------------------
1 | import { createLogger } from '@unly/utils-simple-logger';
2 |
3 | import epsagon from '../utils/epsagon';
4 |
5 | const logger = createLogger({
6 | label: 'Basic Auth',
7 | });
8 |
9 | const UNAUTHORIZED_HTTP_RESPONSE = 'Unauthorized'; // Returns special HTTP string that's expected by the browser, must not be used to display an error message. Will force re-prompt credentials dialog box to the end-user.
10 |
11 | if (!process.env.BASIC_AUTH_USERNAME || !process.env.BASIC_AUTH_PASSWORD) {
12 | throw Error(`No "BASIC_AUTH_USERNAME" or "BASIC_AUTH_PASSWORD" defined as environment variable, please make sure you've defined both in the .env.${process.env.NODE_ENV} file.`);
13 | }
14 |
15 | // See https://medium.com/@Da_vidgf/http-basic-auth-with-api-gateway-and-serverless-5ae14ad0a270
16 | function buildAllowAllPolicy(event, principalId) {
17 | const tmp = event.methodArn.split(':');
18 | const apiGatewayArnTmp = tmp[5].split('/');
19 | const awsAccountId = tmp[4];
20 | const awsRegion = tmp[3];
21 | const restApiId = apiGatewayArnTmp[0];
22 | const stage = apiGatewayArnTmp[1];
23 | const apiArn = `arn:aws:execute-api:${awsRegion}:${awsAccountId}:${
24 | restApiId}/${stage}/*/*`;
25 |
26 | return {
27 | principalId,
28 | policyDocument: {
29 | Version: '2012-10-17',
30 | Statement: [
31 | {
32 | Action: 'execute-api:Invoke',
33 | Effect: 'Allow',
34 | Resource: [apiArn],
35 | },
36 | ],
37 | },
38 | };
39 | }
40 |
41 | exports.handler = function handler(event, context, callback) {
42 | if (process.env.NODE_ENV === 'development') {
43 | // XXX In dev env, we don't want to be bothered with basic-auth (which isn't handled properly and display 401, because there is no API GW on our local machine)
44 | return callback(null, buildAllowAllPolicy(event, 'bypass-security')); // Send any non-empty username bypasses the policy, and force the authorizer to allow
45 | }
46 |
47 | const authorizationHeader = event.headers.Authorization;
48 |
49 | if (!authorizationHeader) {
50 | const error = `[Basic-Auth] Authentication failure - No credentials provided`;
51 | epsagon.setError(Error(error));
52 | logger.error(error);
53 |
54 | return callback(UNAUTHORIZED_HTTP_RESPONSE);
55 | }
56 |
57 | const encodedCreds = authorizationHeader.split(' ')[1];
58 | const plainCreds = (new Buffer(encodedCreds, 'base64')).toString().split(':');// eslint-disable-line no-buffer-constructor
59 | const username = plainCreds[0];
60 | const password = plainCreds[1];
61 |
62 | if (!(username === process.env.BASIC_AUTH_USERNAME && password === process.env.BASIC_AUTH_PASSWORD)) {
63 | const error = `[Basic-Auth] Authentication failure - Wrong credentials provided (username: "${username}" | password: "${password}")`;
64 | epsagon.setError(Error(error));
65 | logger.error(error);
66 |
67 | return callback(UNAUTHORIZED_HTTP_RESPONSE);
68 | }
69 |
70 | return callback(null, buildAllowAllPolicy(event, username));
71 | };
72 |
--------------------------------------------------------------------------------
/src/cli/commandsHandler.js:
--------------------------------------------------------------------------------
1 | import { createLogger } from '@unly/utils-simple-logger';
2 | import map from 'lodash.map';
3 | import logSymbols from 'log-symbols';
4 | import fetch from 'node-fetch';
5 |
6 | const logger = createLogger({
7 | label: 'Emulated client (CLI)',
8 | });
9 |
10 | const cacheQueryRoute = '/cache-query';
11 | const refreshCacheRoute = '/refresh-cache';
12 | const resetCacheRoute = '/reset-cache';
13 |
14 | if (!process.env.CACHE_BASE_URL) {
15 | logger.info(`${logSymbols.error} No "CACHE_BASE_URL" defined as environment variable, please make sure you've defined "CACHE_BASE_URL" in the .env.${process.env.NODE_ENV} file.`);
16 | process.exit();
17 | }
18 |
19 | export async function exit() {
20 | logger.info(`${logSymbols.success} Quitting client`);
21 | process.exit();
22 | }
23 |
24 | export async function sendQuery(query, locale) {
25 | logger.info(`${logSymbols.info} Querying cache ...`);
26 |
27 | try {
28 | const { ApolloClientFactory } = require('../utils/apolloClient'); // eslint-disable-line global-require
29 | const cacheEndpoint = process.env.CACHE_BASE_URL + cacheQueryRoute;
30 | const headers = {
31 | 'gcms-locale': locale,
32 | 'gcms-locale-no-default': false,
33 | };
34 | const client = ApolloClientFactory(headers, cacheEndpoint);
35 | query = { query }; // eslint-disable-line no-param-reassign
36 | logger.info(`${logSymbols.info} Sending the following query: \n${JSON.stringify(query, null, 2)}`);
37 | logger.info(`${logSymbols.info} With headers: \n${JSON.stringify(headers, null, 2)}`);
38 | const queryResult = await client.query(query);
39 |
40 | if (queryResult.data === undefined) {
41 | throw Error(`Error when requesting data from ${cacheEndpoint}`);
42 | }
43 | logger.info(`${logSymbols.info} Received the following response: \n${JSON.stringify(queryResult)}`);
44 | logger.info(`${logSymbols.success} OK - Query was executed successfully`);
45 | } catch (e) {
46 | logger.error(`${logSymbols.error} ERROR - ${e}`);
47 | throw e;
48 | }
49 | }
50 |
51 | export async function refreshCache() {
52 | const url = process.env.CACHE_BASE_URL + refreshCacheRoute;
53 | const options = { method: 'POST', headers: { 'GraphCMS-WebhookToken': process.env.REFRESH_CACHE_TOKEN } };
54 |
55 | logger.debug(`Sending [${options.method}] "${url}" with headers "${JSON.stringify(options.headers)}"`);
56 | const result = await fetch(url, options)
57 | .then((res) => res.json())
58 | .catch((e) => {
59 | logger.debug(`An unexpected error happened while fetching "${url}".`);
60 | logger.error(e);
61 | throw e;
62 | });
63 |
64 | if (result.errors) {
65 | logger.error(`${result.errors.length} error(s) were returned by "${url}":`);
66 | map(result.errors, (e, index) => logger.error(` - [${index}] "${e.message}"`));
67 | } else {
68 | logger.info(`${logSymbols.success} OK - Cached queries were refreshed`);
69 | logger.info(`${logSymbols.info} Result:\n${JSON.stringify(result, null, 2)}`);
70 | }
71 | }
72 |
73 | export async function resetCache() {
74 | const url = process.env.CACHE_BASE_URL + resetCacheRoute;
75 | const options = { method: 'POST', headers: { 'GraphCMS-WebhookToken': process.env.REFRESH_CACHE_TOKEN } };
76 |
77 | logger.debug(`Sending [${options.method}] "${url}" with headers "${JSON.stringify(options.headers)}"`);
78 | const result = await fetch(url, options)
79 | .then((res) => res.json())
80 | .catch((e) => {
81 | logger.debug(`An unexpected error happened while fetching "${url}".`);
82 | logger.error(e);
83 | throw e;
84 | });
85 |
86 | if (result.errors) {
87 | logger.error(`${result.errors.length} error(s) were returned by "${url}":`);
88 | map(result.errors, (e, index) => logger.error(` - [${index}] "${e.message}"`));
89 | } else {
90 | logger.info(`${logSymbols.success} OK - Cache was reset/wiped`);
91 | logger.info(`${logSymbols.info} Result:\n${JSON.stringify(result, null, 2)}`);
92 | }
93 | }
94 |
--------------------------------------------------------------------------------
/src/cli/index.js:
--------------------------------------------------------------------------------
1 | import { createLogger } from '@unly/utils-simple-logger';
2 | import inquirer from 'inquirer';
3 | import logSymbols from 'log-symbols';
4 |
5 | import { querySchemaData, querySchemaData2 } from '../../gql/querySchema';
6 | import { exit, refreshCache, resetCache, sendQuery } from './commandsHandler';
7 |
8 | const logger = createLogger({
9 | label: 'Cache handler',
10 | });
11 |
12 | const promptObj = {
13 | type: 'list',
14 | name: 'actions',
15 | message: 'What do you want to do?',
16 | choices: [
17 | {
18 | name: 'Send query 1 - EN, FR',
19 | value: 'send-cache1-en',
20 | callback: sendQuery,
21 | args: [querySchemaData, 'EN, FR'],
22 | },
23 | {
24 | name: 'Send query 1 - FR, EN',
25 | value: 'send-cache1-fr',
26 | callback: sendQuery,
27 | args: [querySchemaData, 'FR, EN'],
28 | },
29 | {
30 | name: 'Send query 2 - EN, FR',
31 | value: 'send-cache2-en',
32 | callback: sendQuery,
33 | args: [querySchemaData2, 'EN, FR'],
34 | },
35 | {
36 | name: 'Send query 2 - FR, EN',
37 | value: 'send-cache2-fr',
38 | callback: sendQuery,
39 | args: [querySchemaData2, 'FR, EN'],
40 | },
41 | {
42 | name: 'Refresh all cache',
43 | value: 'refresh-cache',
44 | callback: refreshCache,
45 | args: [],
46 | },
47 | {
48 | name: 'Reset all cache',
49 | value: 'reset-cache',
50 | callback: resetCache,
51 | args: [],
52 | },
53 | {
54 | name: 'Quit client',
55 | value: 'exit',
56 | callback: exit,
57 | args: [],
58 | },
59 | ],
60 | };
61 |
62 | async function cli() {
63 | const answer = await inquirer.prompt([promptObj]);
64 | const commandHandler = promptObj.choices.filter((el) => el.value === answer.actions);
65 |
66 | if (commandHandler.length !== 1 || typeof commandHandler[0].callback === 'undefined') {
67 | logger.error(`${logSymbols.error}Unknown command '${answer.actions}', please make sure you have set a callback function`);
68 | } else {
69 | const { args } = commandHandler[0];
70 | await commandHandler[0].callback.apply(null, args);
71 | }
72 |
73 | await cli();
74 | }
75 |
76 | try {
77 | cli();
78 | } catch (e) {
79 | logger.error(logSymbols.error + e);
80 | }
81 |
--------------------------------------------------------------------------------
/src/constants.js:
--------------------------------------------------------------------------------
1 | export const responseSchemaData = {
2 | data: {
3 | __schema: {
4 | __typename: '__Schema',
5 | mutationType: {
6 | __typename: '__Type',
7 | kind: 'OBJECT',
8 | },
9 | },
10 | },
11 | loading: false,
12 | networkStatus: 7,
13 | stale: false,
14 | };
15 |
16 | export const eventExample = {
17 | body: `{"operationName":null,"variables":{},"query":"{__schema { mutationType { kind }}}"}`,
18 | headers: {
19 | 'gcms-locale': 'EN',
20 | },
21 | };
22 |
--------------------------------------------------------------------------------
/src/functions/cache-query.js:
--------------------------------------------------------------------------------
1 | import { createLogger } from '@unly/utils-simple-logger';
2 | import { NetworkStatus } from 'apollo-client';
3 | import get from 'lodash.get';
4 | import map from 'lodash.map';
5 |
6 | import { eventExample } from '../constants';
7 | import { addItemToCache, extractQueryResultsFromItem, printCachedItem } from '../utils/cache';
8 | import epsagon from '../utils/epsagon';
9 | import { extractHeadersToForward, handleGraphCMSCompatibleErrorResponse, runQuery } from '../utils/graphql';
10 | import { generateRedisKey, getClient } from '../utils/redis';
11 |
12 | const logger = createLogger({
13 | label: 'Cache handler',
14 | });
15 | let redisClient = getClient(); // XXX Init redis connection from outside the lambda handler in order to share the connection - See https://www.jeremydaly.com/reuse-database-connections-aws-lambda/
16 |
17 | const responseHeaders = { // XXX Necessary to make CORS requests work - See https://serverless.com/blog/cors-api-gateway-survival-guide/
18 | 'Access-Control-Allow-Origin': '*',
19 | 'Access-Control-Allow-Credentials': false,
20 | };
21 |
22 | /**
23 | * This cache endpoint is meant to be resilient and always return the data from either the redis cache,
24 | * or fallback to GCMS API if the value isn't found in the cache, or if redis is down (outage).
25 | *
26 | * Cache resolution algorithm:
27 | * - Fetch the value indexed by the query (index is composed of query body and headers), from the redis cache
28 | * - If an existing value exists, then returns it
29 | * - If not, then execute the query on GraphCMS endpoint
30 | * - Once the query results are received, store them in the cache, using the query (string) as key
31 | * - Do not wait for cache to be updated before returning the query results
32 | * - If the query results cannot be saved to the cache, then generate an alert through Epsagon + log
33 | * - If any errors are returned by GCMS, then return the response, but don't update the cache
34 | *
35 | * @param event
36 | * @param context
37 | * @return {Promise<{body: (*), statusCode: number}>}
38 | */
39 | export const cacheQuery = async (event, context) => {
40 | logger.debug('Lambda "cacheQuery" called.');
41 | const _headers = event.headers; // Contains all headers sent by the client
42 | const forwardedHeaders = extractHeadersToForward(_headers); // Contains only the headers meant to be forwarded with the GCMS query
43 |
44 | // Override redis client when testing, to provide potentially different credentials
45 | if (process.env.NODE_ENV === 'test') {
46 | redisClient = getClient(event.__TEST_REDIS_URL || process.env.REDIS_URL, event.__TEST_REDIS_PASSWORD || process.env.REDIS_PASSWORD, 1);
47 | }
48 |
49 | const body = get(event, 'body'); // The body contains the GraphCMS query, as a string (respect GraphCMS API standards)
50 | const redisKey = generateRedisKey(body, forwardedHeaders);
51 | epsagon.label('body', body); // We store the query in Epsagon logging so that it'll be reported if we report errors to Epsagon
52 | let query = null;
53 |
54 | try { // Try to parse the query and fail early if we can't
55 | logger.debug('Parsing the request body...');
56 | query = JSON.parse(body);
57 | epsagon.label('query', query);
58 | logger.debug('The body was parsed successfully into a GraphCMS query.');
59 | logger.debug(`OperationName: "${query.operationName}"`);
60 | logger.debug(`Forwarded headers: ${JSON.stringify(forwardedHeaders)}`);
61 | } catch (e) {
62 | // XXX If we can't parse the query, then we immediately return an error, since it is considered as a FATAL error from which we can't do anything
63 | logger.debug('An error occurred when parsing the body, an error will now be sent to the client.', 'FATAL');
64 | logger.error(e);
65 | epsagon.setError(e);
66 |
67 | // We return the error message directly, to help with debug (and it's not sensitive)
68 | return {
69 | statusCode: 500,
70 | headers: responseHeaders,
71 | body: handleGraphCMSCompatibleErrorResponse(`Could not parse the given query, please provide a proper GraphCMS query as "request body". (hint: Body must contain "operationName", "variables" and "query" properties, as a stringified JSON object, such as "${eventExample.body}") \nRequest body: "${body}"`),
72 | };
73 | }
74 |
75 | // Fetch a potential query result for the given query, if it exists in the cache already
76 | let cachedItem;
77 |
78 | try {
79 | logger.debug(`Fetching GraphCMS query from redis cache...`);
80 | // XXX If fetching data from redis fails, we will fall back to running the query against GraphCMS API in order to ensure the client gets the data anyway
81 | cachedItem = await redisClient.get(redisKey);
82 | } catch (e) {
83 | logger.debug(`An exception occurred while fetching redis cache.`);
84 | logger.error(e);
85 | epsagon.setError(e);
86 | }
87 |
88 | // If the query is cached, return the results from the cache
89 | if (cachedItem) {
90 | logger.debug(`The query was found in the redis cache, here is the item from the redis cache (you can customise this output for debug purpose):`);
91 | // Change true/false to see the full dataset returned by the cache (for debug) - Or use a custom object to strip specific keys
92 | printCachedItem(cachedItem, true);
93 |
94 | logger.debug(`The cached result will now be sent to client.`);
95 | return {
96 | statusCode: 200,
97 | headers: responseHeaders,
98 | body: JSON.stringify(
99 | extractQueryResultsFromItem(cachedItem),
100 | ),
101 | };
102 | } else {
103 | logger.debug(`The query was not found in the redis cache.`);
104 | // If the query isn't cached yet, execute it (through GraphCMS API)
105 | let queryResults;
106 |
107 | try {
108 | logger.debug(`Executing GraphCMS query on GraphCMS API...`);
109 | const { ApolloClientFactory } = require('../utils/apolloClient'); // eslint-disable-line global-require
110 | let client;
111 |
112 | if (process.env.NODE_ENV === 'test') {
113 | // Changing variables when running tests on a dedicated test GraphCMS endpoint
114 | client = ApolloClientFactory(forwardedHeaders, event.__TEST_GRAPHCMS_ENDPOINT, event.__TEST_GRAPHCMS_TOKEN);
115 | } else {
116 | client = ApolloClientFactory(forwardedHeaders);
117 | }
118 | queryResults = await runQuery(query, client);
119 | } catch (e) {
120 | logger.debug(`An exception occurred while fetching GraphCMS API.`);
121 | logger.error(e);
122 | epsagon.setError(e);
123 |
124 | return {
125 | statusCode: 500,
126 | headers: responseHeaders,
127 | body: handleGraphCMSCompatibleErrorResponse(String(e)),
128 | };
129 | }
130 |
131 | // XXX If a GraphCMS query returns any kind of error we don't add it to the cache, to avoid storing persistent data that aren't reliable
132 | // So, we only cache data when they're reliable, to avoid storing data that may not be reliable
133 | // (and allow owner to fix it from GCMS, thus ensuring proper caching the next time that query is executed)
134 | if (queryResults.networkStatus === NetworkStatus.ready) { // See https://github.com/apollographql/apollo-client/blob/master/packages/apollo-client/src/core/networkStatus.ts
135 | // If the query was executed successfully, update the cache
136 | // XXX Asynchronous on purpose - Do not wait for the cache to be updated before returning the query results (perf++)
137 | addItemToCache(redisClient, body, forwardedHeaders, queryResults)
138 | .then((result) => {
139 | if (result !== 'OK') {
140 | const message = `Redis couldn't save the newer query results to the cache: "${result}"`;
141 | logger.error(message);
142 | epsagon.setError(Error(message));
143 | }
144 | })
145 | .catch((error) => {
146 | const message = `Redis couldn't save the newer query results to the cache, an error happened: "${error}"`;
147 | logger.error(message);
148 | epsagon.setError(Error(error));
149 | });
150 | logger.debug(`The GraphCMS query was executed successfully. Results are now sent to the client.`);
151 | } else {
152 | // XXX GraphCMS returns an array of "errors" when errors happen, even if there was only one error thrown
153 | // The error may be partial, or fatal (there may be data fetched, alongside errors, or just errors with no data)
154 | const queryErrors = get(queryResults, 'errors', []);
155 |
156 | map(queryErrors, (gcmsError) => {
157 | const error = `Response status: "${queryResults.networkStatus}" - An error occurred on GraphCMS when running the query, the results were therefore not cached to avoid storing non-reliable information. \nGraphCMS Error: "${gcmsError.message}"`;
158 | logger.error(error);
159 | epsagon.setError(Error(error));
160 | });
161 | logger.debug(`Full GraphCMS API response: \n${JSON.stringify(queryResults, null, 2)}`);
162 | logger.debug(`The GraphCMS query returned one or more errors. Results are now sent to the client.`);
163 | }
164 |
165 | // XXX Will return the value ASAP (don't wait for the cache to be updated)
166 | // If the query failed, will return the results anyway, because it's possible for a GraphQL query to partially fail, but yield results anyway
167 | // (it can contains both "data" and "errors") - See https://blog.apollographql.com/full-stack-error-handling-with-graphql-apollo-5c12da407210
168 | return {
169 | statusCode: 200,
170 | headers: responseHeaders,
171 | body: JSON.stringify(queryResults),
172 | };
173 | }
174 | };
175 |
--------------------------------------------------------------------------------
/src/functions/cache-query.test.js:
--------------------------------------------------------------------------------
1 | import waitForExpect from 'wait-for-expect';
2 |
3 | import { eventExample, responseSchemaData } from '../constants';
4 | import { addItemToCache, extractMetadataFromItem, extractQueryResultsFromItem } from '../utils/cache';
5 | import { generateRedisKey, getClient } from '../utils/redis';
6 | import { cacheQuery } from './cache-query';
7 |
8 | const GCMS_INVALID_TOKEN = 'The Token you passed is Invalid!';
9 | let redisClient;
10 |
11 | describe('functions/cache-query.js', () => {
12 | beforeAll(() => {
13 | redisClient = getClient();
14 | });
15 |
16 | afterAll(async () => {
17 | await redisClient.quit();
18 | });
19 |
20 | describe('cacheQuery should return the cached data', () => {
21 | test('when the query is not cached yet (empty cache)', async () => {
22 | await redisClient.flushdb();
23 |
24 | const result = await cacheQuery(eventExample, null);
25 | expect(result).toBeObject();
26 | expect(JSON.parse(result.body)).toEqual(responseSchemaData);
27 | });
28 |
29 | test('when the query is cached manually using "addItemToCache"', async () => {
30 | // We use another client to connect to localhost instead of graphCMS
31 | const flushResult = await redisClient.flushdb();
32 | expect(flushResult).toEqual('OK'); // Check that redis flushed the DB correctly
33 |
34 | const response = await addItemToCache(redisClient, eventExample.body, eventExample.headers, 'test-value');
35 | expect(response).not.toBeNull();
36 |
37 | const redisKey = generateRedisKey(eventExample.body, eventExample.headers);
38 | const item = await redisClient.get(redisKey);
39 | const queryResults = extractQueryResultsFromItem(item);
40 | expect(queryResults).toEqual('test-value');
41 |
42 | const metadata = extractMetadataFromItem(item);
43 | expect(metadata.updatedAt).toBeNull();
44 | expect(metadata.version).toEqual(0);
45 | });
46 |
47 | test('when the query is cached automatically using "cacheQuery"', async () => {
48 | const flushResult = await redisClient.flushdb();
49 | expect(flushResult).toEqual('OK'); // Check that redis flushed the DB correctly
50 |
51 | // Check the response contains the expected data
52 | const result = await cacheQuery(eventExample, null);
53 | expect(result).toBeObject();
54 | const { statusCode, body } = result;
55 | const { data, error } = JSON.parse(body);
56 | expect(statusCode).toBe(200); // Should return good status
57 | expect(data).toBeObject(); // Should contain data
58 | expect(error).toBeUndefined(); // Shouldn't contain error
59 |
60 | setTimeout(() => {
61 | // Wait for the redis cache to be updated (asynchronous non-blocking)
62 | }, 1000);
63 |
64 | await waitForExpect(async () => { // See https://github.com/TheBrainFamily/wait-for-expect#readme
65 | // Check the cache contains the expected data as well (should have been updated by cacheQuery)
66 | const redisKey = generateRedisKey(eventExample.body, eventExample.headers);
67 | const item = await redisClient.get(redisKey);
68 | const queryResults = extractQueryResultsFromItem(item);
69 | expect(queryResults).toBeObject();
70 |
71 | const metadata = extractMetadataFromItem(item);
72 | expect(metadata.updatedAt).toBeNull();
73 | expect(metadata.version).toEqual(0);
74 | });
75 | });
76 | });
77 |
78 | describe('cacheQuery should return an error', () => {
79 | describe('when the cache is empty and', () => {
80 | beforeEach(async () => {
81 | const flushResult = await redisClient.flushdb();
82 | expect(flushResult).toEqual('OK'); // Check that redis flushed the DB correctly
83 | });
84 |
85 | test('when graphCMS is down', async () => {
86 | const { body } = await cacheQuery({
87 | ...eventExample,
88 | __TEST_GRAPHCMS_ENDPOINT: '',
89 | }, null);
90 | const result = JSON.parse(body);
91 | expect(result).toBeObject();
92 | expect(result).toMatchObject({
93 | data: {},
94 | errors: [{ message: '[Cache] Error: Network error: Only absolute URLs are supported' }],
95 | });
96 | });
97 |
98 | test('on using bad token for GraphCMS', async () => {
99 | const { body, statusCode } = await cacheQuery({
100 | ...eventExample,
101 | __TEST_GRAPHCMS_TOKEN: '',
102 | }, null);
103 | expect(statusCode).toEqual(200);
104 | const result = JSON.parse(body);
105 | expect(result).toBeObject();
106 | expect(result).toMatchObject({
107 | errors: [{ message: GCMS_INVALID_TOKEN }],
108 | loading: false,
109 | networkStatus: 7,
110 | stale: false,
111 | });
112 | });
113 | });
114 |
115 | test('when the request body does not contain a valid GraphQL query', async () => {
116 | const { body, statusCode } = await cacheQuery('not a valid query', null);
117 | expect(statusCode).toEqual(500);
118 | const result = JSON.parse(body);
119 | expect(result).toBeObject();
120 | expect(result.errors).toBeArray();
121 | expect(result.errors.length).toEqual(1);
122 | });
123 | });
124 |
125 | describe('cacheQuery should return data', () => {
126 | describe('when the cache is filled and', () => {
127 | beforeEach(async () => {
128 | await redisClient.flushdb();
129 | await addItemToCache(redisClient, eventExample.body, eventExample.headers, responseSchemaData);
130 | });
131 |
132 | test('even when GraphCMS is down (bad token, simulates a 401)', async () => {
133 | const { body } = await cacheQuery({
134 | ...eventExample,
135 | __TEST_GRAPHCMS_ENDPOINT: '',
136 | }, null);
137 | expect(JSON.parse(body)).toBeObject();
138 | expect(JSON.parse(body)).toEqual(responseSchemaData);
139 | });
140 |
141 | test('even when redis is down (bad url, simulates a 404)', async () => {
142 | const { body } = await cacheQuery({
143 | ...eventExample,
144 | __TEST_REDIS_URL: '',
145 | }, null);
146 | expect(JSON.parse(body)).toBeObject();
147 | expect(JSON.parse(body)).toEqual(responseSchemaData);
148 | });
149 |
150 | test('even when redis connection fails (bad password, simulates a 401)', async () => {
151 | const { body } = await cacheQuery({
152 | ...eventExample,
153 | __TEST_REDIS_PASSWORD: '',
154 | }, null);
155 | expect(JSON.parse(body)).toBeObject();
156 | expect(JSON.parse(body)).toEqual(responseSchemaData);
157 | });
158 | });
159 | });
160 | });
161 |
--------------------------------------------------------------------------------
/src/functions/epsagon.js:
--------------------------------------------------------------------------------
1 | import epsagon from '../utils/epsagon';
2 | import { cacheQuery as cacheQueryHandler } from './cache-query';
3 | import { readCache as readCacheHandler } from './read-cache';
4 | import { resetCache as resetCacheHandler } from './reset-cache';
5 |
6 | export const readCache = epsagon.lambdaWrapper(readCacheHandler);
7 | export const cacheQuery = epsagon.lambdaWrapper(cacheQueryHandler);
8 | // export const refreshCache = epsagon.lambdaWrapper(refreshCacheHandler); // XXX Not compatible, creates an infinite loop for some reason
9 | export const resetCache = epsagon.lambdaWrapper(resetCacheHandler);
10 |
--------------------------------------------------------------------------------
/src/functions/read-cache.js:
--------------------------------------------------------------------------------
1 | import { createLogger } from '@unly/utils-simple-logger';
2 | import map from 'lodash.map';
3 |
4 | import { extractMetadataFromItem } from '../utils/cache';
5 | import epsagon from '../utils/epsagon';
6 | import { handleGraphCMSCompatibleErrorResponse, makeQueryHumanFriendly } from '../utils/graphql';
7 | import { extractDataFromRedisKey, getClient } from '../utils/redis';
8 |
9 | const logger = createLogger({
10 | label: 'Read cache',
11 | });
12 | const redisClient = getClient(); // XXX Init redis connection from outside the lambda handler in order to share the connection - See https://www.jeremydaly.com/reuse-database-connections-aws-lambda/
13 |
14 | export const readCache = async (event, context) => {
15 | logger.debug('Lambda "readCache" called.');
16 | const queriesData = [];
17 | let redisKeys;
18 |
19 | try {
20 | logger.debug('Fetching all GraphCMS queries (stored as keys in redis).');
21 | // XXX If fetching data from redis fails, we can't do anything about it since we're supposed to read them from the cache.
22 | // We could execute the queries again but there would be no interest in doing so
23 | // since this endpoint is mostly for debug purpose and meant to test if the redis cache works properly
24 | redisKeys = await redisClient.keys('*'); // TODO I/O blocking, not an immediate concern but could/should be improved - See https://github.com/luin/ioredis#streamify-scanning
25 | logger.debug('Redis keys (queries) were fetched successfully.');
26 | } catch (e) {
27 | logger.debug('An error occurred when fetching the redis keys, an error will now be sent to the client.', 'FATAL');
28 | logger.error(e);
29 | epsagon.setError(e);
30 |
31 | return {
32 | statusCode: 500,
33 | body: handleGraphCMSCompatibleErrorResponse(`Internal server error. See server logs for debug.`),
34 | };
35 | }
36 |
37 | // The cache is indexed by query, stored as strings - Each key contain a GraphCMS query
38 | logger.debug('Fetching the previously cached data (from GraphCMS API) of each GraphCMS query from redis...');
39 | const values = await Promise.all(
40 | map(redisKeys, async (redisKey) => {
41 | try {
42 | return redisClient.get(redisKey);
43 | } catch (e) {
44 | logger.error(e);
45 | epsagon.setError(e);
46 | return null;
47 | }
48 | }),
49 | );
50 | logger.debug('Fetched all data from redis successfully.');
51 |
52 | map(values, (value, index) => {
53 | const redisKey = redisKeys[index];
54 | const { body, headers } = extractDataFromRedisKey(redisKey);
55 |
56 | queriesData.push({
57 | ...extractMetadataFromItem(value),
58 | body: JSON.parse(body),
59 | headers,
60 | });
61 | });
62 |
63 | logger.debug('Results are now sent to the client.');
64 | return {
65 | statusCode: 200,
66 | body: JSON.stringify(map(queriesData, (queryData) => {
67 | return {
68 | ...queryData,
69 | body: {
70 | ...queryData.body,
71 | query: makeQueryHumanFriendly(queryData.body.query),
72 | },
73 | };
74 | })),
75 | };
76 | };
77 |
--------------------------------------------------------------------------------
/src/functions/refresh-cache.js:
--------------------------------------------------------------------------------
1 | import { createLogger } from '@unly/utils-simple-logger';
2 | import { NetworkStatus } from 'apollo-client';
3 | import map from 'lodash.map';
4 |
5 | import { headerAuthentication, noTokenProvidedMessage } from '../utils/auth';
6 | import { updateItemInCache } from '../utils/cache';
7 | import epsagon from '../utils/epsagon';
8 | import { handleGraphCMSCompatibleErrorResponse, makeQueryHumanFriendly, runQuery } from '../utils/graphql';
9 | import { extractDataFromRedisKey, getClient } from '../utils/redis';
10 | import { wait } from '../utils/timers';
11 |
12 | const logger = createLogger({
13 | label: 'Refresh cache',
14 | });
15 |
16 | const redisClient = getClient(); // XXX Init redis connection from outside the lambda handler in order to share the connection - See https://www.jeremydaly.com/reuse-database-connections-aws-lambda/
17 | export const gcmsWebhookTokenKey = 'GraphCMS-WebhookToken';
18 |
19 | /**
20 | * Refresh the cache (redis)
21 | * Executes all queries that are stored in the redis store in order to refresh all cached values
22 | * The cache must only be refreshed if there was no error during the query
23 | * If an error happens during a query, the cache must not be updated, because the goal of this caching mechanism is to ensure stability/robustness, even at the price of data integrity
24 | *
25 | * @type {Function}
26 | * @param event
27 | * @param context
28 | * @return {Promise<{body: *, statusCode: number}>}
29 | */
30 | export const refreshCache = async (event, context) => {
31 | logger.debug('Lambda "refreshCache" called.');
32 |
33 | if (!headerAuthentication(event.headers)) {
34 | logger.debug('Caller is not authorized to connect, an error will now be sent to the client.', 'FATAL');
35 |
36 | return {
37 | statusCode: 401,
38 | body: handleGraphCMSCompatibleErrorResponse(noTokenProvidedMessage),
39 | };
40 | }
41 |
42 | let redisKeys;
43 | try {
44 | logger.debug('Fetching all GraphCMS queries (stored as keys in redis).');
45 | // XXX If fetching data from redis fails, we can't do anything about it since it's a requirement in order to update the data
46 | redisKeys = await redisClient.keys('*'); // TODO I/O blocking, not an immediate concern but could/should be improved - See https://github.com/luin/ioredis#streamify-scanning
47 | logger.debug('Redis keys (queries) were fetched successfully.');
48 | } catch (e) {
49 | logger.debug(`An exception occurred while fetching redis cache.`, 'FATAL');
50 | logger.error(e);
51 | epsagon.setError(e);
52 |
53 | return {
54 | statusCode: 500,
55 | body: handleGraphCMSCompatibleErrorResponse(`Internal server error. See server logs for debug.`),
56 | };
57 | }
58 |
59 | const sleep = 10000;
60 | logger.debug(`Pausing script for ${sleep}ms before continuing (naive debounce-ish AKA cross-fingers)`);
61 | await wait(sleep);
62 |
63 | const { ApolloClientFactory } = require('../utils/apolloClient'); // eslint-disable-line global-require
64 | const queriesPromises = redisKeys.map(async (redisKey) => {
65 | const { body, headers } = extractDataFromRedisKey(redisKey);
66 | const client = ApolloClientFactory(headers);
67 | return runQuery(JSON.parse(body), client);
68 | });
69 |
70 | // Fetch all queries in parallel and await them to be all finished
71 | logger.debug('Running all GraphCMS queries against GraphCMS API...');
72 | const queriesResults = await Promise.all(queriesPromises).catch((error) => {
73 | logger.error(error);
74 | epsagon.setError(error);
75 | });
76 | const updatedResults = [];
77 | const failedResults = [];
78 |
79 | map(queriesResults, (queryResult, index) => {
80 | // XXX If the query failed (whether completely or partially), we don't update the cache
81 | if (queryResult.networkStatus !== NetworkStatus.ready) { // See https://github.com/apollographql/apollo-client/blob/master/packages/apollo-client/src/core/networkStatus.ts
82 | // When a query returns an error, we don't update the cache
83 | epsagon.setError(Error(`Cache refresh failed with "${JSON.stringify(queryResult.errors)}"`));
84 | logger.error(JSON.stringify(queryResult.errors, null, 2), 'graphcms-query-error');
85 | failedResults.push(queryResult);
86 | } else {
87 | const redisKey = redisKeys[index];
88 | const { body, headers } = extractDataFromRedisKey(redisKey);
89 | logger.debug(`Updating redis cache at index ${index} for query "${makeQueryHumanFriendly(body)}" and headers "${JSON.stringify(headers)}".`);
90 |
91 | // Otherwise, update the existing entry with the new values
92 | // XXX Executed async, no need to wait for result to continue
93 | updateItemInCache(redisClient, body, headers, queryResult)
94 | .then((result) => {
95 | if (result !== 'OK') {
96 | throw new Error(result);
97 | } else {
98 | logger.debug(`Redis key at index ${index} with query "${makeQueryHumanFriendly(body)}" and headers "${JSON.stringify(headers)}" was successfully updated (new data size: ${JSON.stringify(queryResult).length}).`);
99 | }
100 | })
101 | .catch((error) => {
102 | epsagon.setError(error);
103 | logger.error(`Redis key at index ${index} with query "${makeQueryHumanFriendly(body)}" and headers "${JSON.stringify(headers)}" failed to update (key size: ${JSON.stringify(body).length}).`);
104 | });
105 |
106 | updatedResults.push(queryResult);
107 | }
108 | });
109 |
110 | logger.debug('Results are now sent to the client.');
111 | return {
112 | statusCode: 200,
113 | body: JSON.stringify({
114 | status: true,
115 | message: `${updatedResults.length} entries updated, ${failedResults.length} entries failed`,
116 | updatedEntries: updatedResults.length,
117 | failedEntries: failedResults.length,
118 | }),
119 | };
120 | };
121 |
--------------------------------------------------------------------------------
/src/functions/refresh-cache.test.js:
--------------------------------------------------------------------------------
1 | import waitForExpect from 'wait-for-expect';
2 |
3 | import { eventExample } from '../constants';
4 | import { addItemToCache } from '../utils/cache';
5 | import { generateRedisKey, getClient } from '../utils/redis';
6 | import { refreshCache } from './refresh-cache';
7 |
8 | const fakeEvent = { headers: { 'GraphCMS-WebhookToken': process.env.REFRESH_CACHE_TOKEN } };
9 | let redisClient;
10 |
11 | describe('functions/refresh-cache.js', () => {
12 | beforeEach(async () => {
13 | const flushResult = await redisClient.flushdb();
14 | expect(flushResult).toEqual('OK'); // Check that redis flushed the DB correctly
15 | });
16 |
17 | beforeAll(() => {
18 | redisClient = getClient();
19 | });
20 |
21 | afterAll(async () => {
22 | await redisClient.quit();
23 | });
24 |
25 | test('should return "Unauthorized" (401) when not providing a token', async () => {
26 | const { statusCode } = await refreshCache({
27 | headers: {},
28 | });
29 | expect(statusCode).toBe(401);
30 | });
31 |
32 | test('should return "Unauthorized" (401) when providing a wrong token', async () => {
33 | const { statusCode } = await refreshCache({
34 | headers: { Authorization: 'i-am-wrong' },
35 | });
36 | expect(statusCode).toBe(401);
37 | });
38 |
39 | test('should be authorized (200) when providing a proper token', async () => {
40 | const { statusCode } = await refreshCache(fakeEvent);
41 | expect(statusCode).toBe(200);
42 | });
43 |
44 | describe('when the redis cache is empty, it', () => {
45 | test('should do nothing when refreshing the cache', async () => {
46 | await refreshCache(fakeEvent);
47 | const getResult = await redisClient.keys('*');
48 | expect(getResult.length).toEqual(0);
49 | });
50 | });
51 |
52 | describe('when the redis cache is filled with correct queries, it', () => {
53 | beforeEach(async () => {
54 | const setResult = await addItemToCache(redisClient, eventExample.body, eventExample.headers, 'test-value');
55 | expect(setResult).toEqual('OK'); // Check that redis set data correctly
56 | });
57 |
58 | test('should refresh all items in the cache', async () => {
59 | const refreshResult = await refreshCache(fakeEvent);
60 | expect(refreshResult.statusCode).toEqual(200);
61 | expect(JSON.parse(refreshResult.body).updatedEntries).toEqual(1);
62 | expect(JSON.parse(refreshResult.body).failedEntries).toEqual(0);
63 |
64 | setTimeout(() => {
65 | // Wait for the redis cache to be updated (asynchronous non-blocking)
66 | }, 1000);
67 |
68 | await waitForExpect(async () => { // See https://github.com/TheBrainFamily/wait-for-expect#readme
69 | const redisKey = generateRedisKey(eventExample.body, eventExample.headers);
70 | const results = JSON.parse(await redisClient.get(redisKey));
71 | expect(results.queryResults).not.toEqual('test-value');
72 | expect(results.version).toEqual(1);
73 | });
74 | });
75 | });
76 | });
77 |
--------------------------------------------------------------------------------
/src/functions/reset-cache.js:
--------------------------------------------------------------------------------
1 | import { createLogger } from '@unly/utils-simple-logger';
2 |
3 | import { headerAuthentication, noTokenProvidedMessage } from '../utils/auth';
4 | import epsagon from '../utils/epsagon';
5 | import { handleGraphCMSCompatibleErrorResponse } from '../utils/graphql';
6 | import { getClient } from '../utils/redis';
7 | import { wait } from '../utils/timers';
8 |
9 | const logger = createLogger({
10 | label: 'Reset cache',
11 | });
12 |
13 | const redisClient = getClient(); // XXX Init redis connection from outside the lambda handler in order to share the connection - See https://www.jeremydaly.com/reuse-database-connections-aws-lambda/
14 |
15 | /**
16 | * Reset the cache (redis)
17 | * Completely wipes the whole cache (flush)
18 | *
19 | * @type {Function}
20 | * @param event
21 | * @param context
22 | * @return {Promise<{body: *, statusCode: number}>}
23 | */
24 | export const resetCache = async (event, context) => {
25 | logger.debug('Lambda "resetCache" called.');
26 |
27 | if (!headerAuthentication(event.headers)) {
28 | logger.debug('Caller is not authorized to connect, an error will now be sent to the client.', 'FATAL');
29 |
30 | return {
31 | statusCode: 401,
32 | body: handleGraphCMSCompatibleErrorResponse(noTokenProvidedMessage),
33 | };
34 | }
35 |
36 | let flushResult;
37 | try {
38 | flushResult = await redisClient.flushdb();
39 | } catch (e) {
40 | logger.debug(`An exception occurred while flushing redis cache.`, 'FATAL');
41 | logger.error(e);
42 | epsagon.setError(e);
43 | }
44 | const status = flushResult === 'OK';
45 |
46 | const sleep = 10000;
47 | logger.debug(`Pausing script for ${sleep}ms - Forces no more than one cache reset once in a little while`);
48 | await wait(sleep);
49 |
50 | logger.debug('Results are now sent to the client.');
51 | return {
52 | statusCode: 200,
53 | body: JSON.stringify({
54 | status,
55 | message: status ? `The cache was reset successfully` : `An error happened during cache reset, see logs for more details`,
56 | }),
57 | };
58 | };
59 |
--------------------------------------------------------------------------------
/src/functions/reset-cache.test.js:
--------------------------------------------------------------------------------
1 | import waitForExpect from 'wait-for-expect';
2 | import { eventExample } from '../constants';
3 | import { addItemToCache } from '../utils/cache';
4 | import { generateRedisKey, getClient } from '../utils/redis';
5 | import { resetCache } from './reset-cache';
6 |
7 | const fakeEvent = { headers: { 'GraphCMS-WebhookToken': process.env.REFRESH_CACHE_TOKEN } };
8 | let redisClient;
9 |
10 | describe('functions/reset-cache.js', () => {
11 | beforeEach(async () => {
12 | const flushResult = await redisClient.flushdb();
13 | expect(flushResult).toEqual('OK'); // Check that redis flushed the DB correctly
14 | });
15 |
16 | beforeAll(() => {
17 | redisClient = getClient();
18 | });
19 |
20 | afterAll(async () => {
21 | await redisClient.quit();
22 | });
23 |
24 | describe('should check authentication before performing any action, and', () => {
25 | test('should return "Unauthorized" (401) when not providing a token', async () => {
26 | const { statusCode } = await resetCache({
27 | headers: {},
28 | });
29 | expect(statusCode).toBe(401);
30 | });
31 |
32 | test('should return "Unauthorized" (401) when providing a wrong token', async () => {
33 | const { statusCode } = await resetCache({
34 | headers: { Authorization: 'i-am-wrong' },
35 | });
36 | expect(statusCode).toBe(401);
37 | });
38 |
39 | test('should be authorized (200) when providing a proper token', async () => {
40 | const { statusCode } = await resetCache(fakeEvent);
41 | expect(statusCode).toBe(200);
42 | });
43 | });
44 |
45 | describe('when the redis cache is empty, it', () => {
46 | test('should reset the cache (which does not change anything because there were no entries anyway)', async () => {
47 | await resetCache(fakeEvent);
48 | const getResult = await redisClient.keys('*');
49 | expect(getResult.length).toEqual(0);
50 | });
51 | });
52 |
53 | describe('when the redis cache is filled with correct queries, it', () => {
54 | beforeEach(async () => {
55 | const setResult = await addItemToCache(redisClient, eventExample.body, eventExample.headers, 'test-value');
56 | expect(setResult).toEqual('OK'); // Check that redis set data correctly
57 | });
58 |
59 | test('should remove all items in the cache', async () => {
60 | const redisKey = generateRedisKey(eventExample.body, eventExample.headers);
61 | const results = JSON.parse(await redisClient.get(redisKey));
62 | expect(results).toBeDefined(); // Check the value exists in the cache before reset
63 | expect(results.queryResults).toEqual('test-value');
64 | expect(results.version).toEqual(0);
65 |
66 | const refreshResult = await resetCache(fakeEvent);
67 | expect(refreshResult.statusCode).toEqual(200);
68 | expect(JSON.parse(refreshResult.body).status).toEqual(true);
69 |
70 | setTimeout(() => {
71 | // Wait for the redis cache to be updated (asynchronous non-blocking)
72 | }, 1000);
73 |
74 | await waitForExpect(async () => { // See https://github.com/TheBrainFamily/wait-for-expect#readme
75 | const redisKeys = await redisClient.keys('*'); // Check that all the items have been removed from the cache
76 | expect(redisKeys).toBeArray();
77 | expect(redisKeys.length).toEqual(0);
78 | });
79 | });
80 | });
81 | });
82 |
--------------------------------------------------------------------------------
/src/functions/status.js:
--------------------------------------------------------------------------------
1 | import moment from 'moment';
2 |
3 | export const status = async (event, context) => {
4 | return {
5 | body: JSON.stringify({
6 | status: 'OK',
7 | processNodeEnv: process.env.NODE_ENV,
8 | time: moment().toISOString(),
9 | appName: process.env.APP_NAME,
10 | release: process.env.GIT_COMMIT_VERSION,
11 | branch: process.env.GIT_BRANCH,
12 | releasedAt: process.env.DEPLOY_TIME,
13 | nodejs: process.version,
14 | }),
15 | };
16 | };
17 |
--------------------------------------------------------------------------------
/src/utils/apolloClient.js:
--------------------------------------------------------------------------------
1 | import { InMemoryCache } from 'apollo-cache-inmemory';
2 | import ApolloClient from 'apollo-client';
3 | import { createHttpLink } from 'apollo-link-http';
4 | import fetch from 'node-fetch';
5 |
6 | import epsagon from './epsagon';
7 |
8 | /*
9 | * XXX This file should be imported using "require" and not "import", because it relies on ENV variables that aren't necessarily set when using "import"
10 | * (especially for tests)
11 | * But that's also because we weren't returning a function but an object before, but now we do (ApolloClientFactory) so it may be refactored and use consistent imports
12 | */
13 |
14 | const graphCMSError = `Unable to connect to GraphCMS due to misconfiguration. Endpoint: "${process.env.GRAPHCMS_ENDPOINT}", Token: "${!!process.env.GRAPHCMS_TOKEN}"`;
15 |
16 | if (!process.env.GRAPHCMS_ENDPOINT || !process.env.GRAPHCMS_TOKEN) {
17 | epsagon.setError(Error(graphCMSError));
18 | throw Error(graphCMSError);
19 | }
20 |
21 | /**
22 | * Creates a new instance of Apollo Client
23 | * Used to perform queries against a GraphCMS endpoint (URI)
24 | *
25 | * @param headers Custom headers to send to the GraphCMS API
26 | * @param uri URI of the GraphCMS API
27 | * @param token Authentication token of the GraphCMS API, will be send in headers
28 | * @return {ApolloClient}
29 | * @constructor
30 | */
31 | export const ApolloClientFactory = (headers = {}, uri = process.env.GRAPHCMS_ENDPOINT, token = process.env.GRAPHCMS_TOKEN) => {
32 | const apolloClientInstance = new ApolloClient({
33 | link: createHttpLink({
34 | uri,
35 | headers: {
36 | Authorization: `Bearer ${token}`,
37 | ...headers,
38 | },
39 | fetch,
40 | }),
41 | cache: new InMemoryCache(),
42 | defaultOptions: {
43 | watchQuery: {
44 | fetchPolicy: 'no-cache',
45 | errorPolicy: 'ignore',
46 | },
47 | query: {
48 | fetchPolicy: 'no-cache',
49 | errorPolicy: 'all',
50 | },
51 | },
52 | });
53 |
54 | return apolloClientInstance;
55 | };
56 |
57 | export default ApolloClientFactory;
58 |
--------------------------------------------------------------------------------
/src/utils/auth.js:
--------------------------------------------------------------------------------
1 | import { createLogger } from '@unly/utils-simple-logger';
2 | import get from 'lodash.get';
3 | import epsagon from './epsagon';
4 |
5 | const logger = createLogger({
6 | label: 'Authentication utils',
7 | });
8 |
9 | export const gcmsWebhookTokenKey = 'GraphCMS-WebhookToken';
10 | export const noTokenProvidedMessage = `Authentication failed`;
11 |
12 | /**
13 | * Attempts to authenticate using a token in the headers
14 | *
15 | * @param headers
16 | * @return {boolean}
17 | */
18 | export const headerAuthentication = (headers) => {
19 | logger.debug('Validating authentication credentials...');
20 | const GCMSWebhookToken = get(headers, gcmsWebhookTokenKey, get(headers, gcmsWebhookTokenKey.toLowerCase(), null));
21 |
22 | // Check first if a correct token was provided - Security to avoid unauthenticated callers to DDoS GCMS API by spawning a refresh loop
23 | if (GCMSWebhookToken !== process.env.REFRESH_CACHE_TOKEN) {
24 | const errorMessage = `Attempt to refresh cache with wrong token: "${GCMSWebhookToken}" (access refused)`;
25 | epsagon.setError(Error(errorMessage));
26 | logger.error(errorMessage);
27 |
28 | return false;
29 | }
30 |
31 | logger.debug('Authentication successful.');
32 | return true;
33 | };
34 |
--------------------------------------------------------------------------------
/src/utils/auth.test.js:
--------------------------------------------------------------------------------
1 | import { headerAuthentication } from './auth';
2 |
3 | const authenticatedEvent = { headers: { 'GraphCMS-WebhookToken': process.env.REFRESH_CACHE_TOKEN } };
4 | const authenticatedEventLC = { headers: { 'graphcms-webhooktoken': process.env.REFRESH_CACHE_TOKEN } };
5 | const unauthenticatedEvent = { headers: { 'GraphCMS-WebhookToken': 'i-am-wrong' } };
6 | const unauthenticatedEventEmpty = { headers: {} };
7 |
8 | describe('utils/auth.js', () => {
9 | describe('headerAuthentication', () => {
10 | test('should authenticate properly when providing a correct authentication header', () => {
11 | const result = headerAuthentication(authenticatedEvent.headers);
12 | expect(result).toEqual(true);
13 | });
14 |
15 | test('should authenticate properly when providing a correct authentication header, when the header has been lower cased', () => {
16 | const result = headerAuthentication(authenticatedEventLC.headers);
17 | expect(result).toEqual(true);
18 | });
19 |
20 | test('should not authenticate when providing an incorrect authentication header', async () => {
21 | const result = headerAuthentication(unauthenticatedEvent.headers);
22 | expect(result).toEqual(false);
23 | });
24 |
25 | test('should not authenticate when providing an empty authentication header', async () => {
26 | const result = headerAuthentication(unauthenticatedEventEmpty.headers);
27 | expect(result).toEqual(false);
28 | });
29 | });
30 | });
31 |
--------------------------------------------------------------------------------
/src/utils/cache.js:
--------------------------------------------------------------------------------
1 | import { createLogger } from '@unly/utils-simple-logger';
2 | import deepmerge from 'deepmerge';
3 |
4 | import { generateRedisKey } from './redis';
5 |
6 | const logger = createLogger({
7 | label: 'Cache utils',
8 | });
9 |
10 | /**
11 | * Build an item meant to be added to the cache
12 | *
13 | * @param queryResults
14 | * @return {{createdAt: *, queryResults: *, version: number, updatedAt: null}}
15 | */
16 | export const buildItem = (queryResults) => {
17 | return {
18 | createdAt: Date.now(),
19 | updatedAt: null,
20 | version: 0,
21 | queryResults,
22 | };
23 | };
24 |
25 | /**
26 | * Returns a usable version of an item information
27 | * Basically parses it if it's a string (the way it's stored in redis)
28 | * Or do nothing if it's already parsed
29 | *
30 | * @param item
31 | * @return {any}
32 | */
33 | export const extractCachedItem = (item) => {
34 | if (typeof item === 'string') {
35 | try {
36 | return JSON.parse(item);
37 | } catch (e) {
38 | logger.error(`Failed to parse item "${item}".`);
39 |
40 | // XXX Item can't be parsed, so it's likely a string that was meant to be the query results
41 | // Returns the item as query results and cross fingers, this shouldn't happen,
42 | // but may happen if dealing with an item that was stored before the items became an object
43 | return {
44 | queryResults: item,
45 | };
46 | }
47 | } else {
48 | return item;
49 | }
50 | };
51 |
52 | /**
53 | * Returns only the metadata contained in an item, doesn't return the actual data
54 | *
55 | * @param item
56 | * @return {{createdAt: *, version: *, updatedAt: *}|null}
57 | */
58 | export const extractMetadataFromItem = (item) => {
59 | if (item === null) {
60 | return null;
61 | }
62 | const { createdAt, updatedAt, version } = extractCachedItem(item);
63 |
64 | return {
65 | createdAt,
66 | updatedAt,
67 | version,
68 | };
69 | };
70 |
71 | /**
72 | * Adds an item to the cache
73 | * An item is composed of metadata and query results
74 | * Automatically add metadata
75 | *
76 | * @param redisClient
77 | * @param query
78 | * @param headers
79 | * @param queryResults
80 | * @return {Promise}
81 | */
82 | export const addItemToCache = async (redisClient, query, headers, queryResults) => {
83 | const redisKey = generateRedisKey(query, headers);
84 |
85 | return await redisClient.set(redisKey, JSON.stringify(buildItem(queryResults)));
86 | };
87 |
88 | /**
89 | * Updates an item that already exists in the cache
90 | * Automatically handles "version" (auto-increment) and "updatedAt" metadata
91 | *
92 | * @param redisClient
93 | * @param query
94 | * @param headers
95 | * @param queryResults
96 | * @return {Promise<*>}
97 | */
98 | export const updateItemInCache = async (redisClient, query, headers, queryResults) => {
99 | const redisKey = generateRedisKey(query, headers);
100 | const oldValue = await redisClient.get(redisKey);
101 | const metadata = extractMetadataFromItem(oldValue);
102 | metadata.version += 1;
103 | metadata.updatedAt = Date.now(); // Override
104 |
105 | return await redisClient.set(redisKey, JSON.stringify({
106 | ...metadata,
107 | queryResults,
108 | }));
109 | };
110 |
111 | /**
112 | * Returns the query results object
113 | *
114 | * @param item
115 | * @return {any.queryResults}
116 | */
117 | export const extractQueryResultsFromItem = (item) => {
118 | if (item === null) {
119 | return null;
120 | }
121 | if (typeof item === 'undefined') {
122 | throw Error('"undefined" item was provided, this is likely due to providing a bad key to redis.get()');
123 | }
124 | const { queryResults } = extractCachedItem(item);
125 |
126 | return queryResults;
127 | };
128 |
129 | /**
130 | * Helper for debugging the item resolved from the Redis cache
131 | * Print the whole item by default, but can override keys to avoid leaking data in the logs, or just print metadata to avoid the noise
132 | * XXX Beware printing sensitive information in production environment
133 | *
134 | * @example Will print metadata and the whole item
135 | * printCachedItem(cachedItem, false);
136 | * @example Will only print metadata
137 | * printCachedItem(cachedItem, true);
138 | * @example Will print metadata and will "strip" both subQueryKey1 and subQueryKey2
139 | * printCachedItem(cachedItem, { subQueryKey1: undefined, subQueryKey2: undefined });
140 | *
141 | * @param cachedItem Item to print
142 | * @param stripData Either boolean (show/hide data) or an object that may hide only particular data keys (sub queries results)
143 | */
144 | export const printCachedItem = (cachedItem, stripData = false) => {
145 | // If object, use object as stripper, otherwise convert to boolean
146 | const stripper = typeof stripData === 'object' ? { queryResults: { data: stripData } } : !!stripData ? { queryResults: { data: undefined } } : {}; // eslint-disable-line no-nested-ternary
147 | logger.debug(JSON.stringify(deepmerge(extractCachedItem(cachedItem), stripper), null, 2));
148 | };
149 |
--------------------------------------------------------------------------------
/src/utils/cache.test.js:
--------------------------------------------------------------------------------
1 | import { extractCachedItem, extractMetadataFromItem, extractQueryResultsFromItem } from './cache';
2 |
3 | describe('utils/cache.js', () => {
4 | describe('when we extract item from cache', () => {
5 | test('it should return the extracted item if the item is a string', () => {
6 | const testObject = { foo: 'bar' };
7 | expect(extractCachedItem(JSON.stringify(testObject))).toMatchObject(testObject);
8 | });
9 |
10 | test('return the main item if the item is not a string', () => {
11 | const testObject = { foo: 'bar' };
12 | expect(extractCachedItem(testObject)).toMatchObject(testObject);
13 | });
14 | });
15 |
16 | describe('when we extract query from extracted item cache', () => {
17 | test('return null if item is null', () => {
18 | expect(extractQueryResultsFromItem(null)).toBeNull();
19 | });
20 |
21 | test('throw an Error if item is undefined', () => {
22 | expect(() => {
23 | extractQueryResultsFromItem();
24 | }).toThrow(Error);
25 | });
26 | });
27 |
28 | describe('when we extract metadata from an item', () => {
29 | test('return null if item is null', () => {
30 | expect(extractMetadataFromItem(null)).toBeNull();
31 | });
32 |
33 | test('throw an Error if item is undefined', () => {
34 | expect(() => {
35 | extractQueryResultsFromItem();
36 | }).toThrow(Error);
37 | });
38 | });
39 | });
40 |
--------------------------------------------------------------------------------
/src/utils/epsagon.js:
--------------------------------------------------------------------------------
1 | import epsagon from 'epsagon';
2 |
3 | const token = process.env.EPSAGON_APP_TOKEN;
4 |
5 | if (token) {
6 | epsagon.init({
7 | token,
8 | appName: process.env.EPSAGON_APP_NAME,
9 | metadataOnly: false, // Optional, send more trace data
10 | });
11 | } else {
12 | process.env.DISABLE_EPSAGON = true;
13 | }
14 |
15 | export default epsagon;
16 |
--------------------------------------------------------------------------------
/src/utils/graphql.js:
--------------------------------------------------------------------------------
1 | import { createLogger } from '@unly/utils-simple-logger';
2 | import gql from 'graphql-tag';
3 | import filter from 'lodash.filter';
4 | import startsWith from 'lodash.startswith';
5 | import includes from 'lodash.includes';
6 |
7 | const logger = createLogger({ // eslint-disable-line no-unused-vars
8 | label: 'GraphQL utils',
9 | });
10 |
11 | export const GCMS_HEADER_PREFIX = 'gcms-'; // Headers matching this prefix are meant to be forwarded XXX Headers will be automatically forwarded, but if you're subject to CORS requests, then you'll need to manually allow them in serverless.yml for "cache-query" lambda
12 | export const GCMS_HEADERS_WHITELIST = [ // Whitelist of specific headers meant to be forwarded
13 | 'locale', // Keep "locale" header, for backward-compatibility with GCMS previous versions
14 | ];
15 |
16 | /**
17 | * Executes a GraphQL query on the GraphCMS API endpoint.
18 | *
19 | * @param query Query to perform
20 | * @param client Apollo client instance to use to send the query, specifies extra configuration, such as headers (authentication, etc.)
21 | * @return {Promise>}
22 | */
23 | export const runQuery = async (query, client) => {
24 | if (!client) {
25 | throw Error('An Apollo client is required to perform the query');
26 | }
27 |
28 | if (typeof query === 'string') {
29 | throw Error('The "query" parameter should be a JS object - Did you forgot to JSON.parse() it?');
30 | }
31 |
32 | query.query = gql`${query.query}`; // eslint-disable-line no-param-reassign
33 | return client.query(query);
34 | };
35 |
36 | /**
37 | * Build an error that is ISO with GraphCMS API
38 | * Errors must be of the same structure as GraphCMS errors, so clients can handle them the same way
39 | *
40 | * @param message
41 | * @return {{message: *}}
42 | */
43 | export const buildGraphCMSError = (message) => {
44 | return {
45 | message: `[Cache] ${message}`,
46 | };
47 | };
48 |
49 | /**
50 | * Build a HTTP response body that is ISO with GraphCMS API.
51 | * The HTTP response of the cache must be similar to GraphCMS API responses, so that clients handles them the same way.
52 | *
53 | * Errors must therefore follow the following structure:
54 | * errors: (array)
55 | * error: (object)
56 | * message (string)
57 | *
58 | * Other fields, such as "locations" are not required (debug purpose, optional)
59 | *
60 | * @example GraphCMS error example (fetching non-existing field)
61 | *
62 | {
63 | "loading": false,
64 | "networkStatus": 7,
65 | "stale": false,
66 | "errors": [
67 | {
68 | "message": "Cannot query field \"names\" on type \"Organisation\". Did you mean \"name\"?",
69 | "locations": [
70 | {
71 | "line": 3,
72 | "column": 5
73 | }
74 | ]
75 | }
76 | ]
77 | }
78 | *
79 | * @param error
80 | * @return {string}
81 | */
82 | export const handleGraphCMSCompatibleErrorResponse = (error) => {
83 | return JSON.stringify({
84 | data: {},
85 | errors: [buildGraphCMSError(error)],
86 | });
87 | };
88 |
89 | /**
90 | * Make a GraphQL query readable by removing all unnecessary information and ony keep a limited set of characters.
91 | * Strip all \n and non-useful characters.
92 | *
93 | * @param query
94 | * @param maxLength
95 | * @param truncatedSuffix
96 | * @return {string}
97 | */
98 | export const makeQueryHumanFriendly = (query, maxLength = 50, truncatedSuffix = ' ... (truncated)') => {
99 | // Simplify the displayed query to make it more readable, remove \n, convert multiple spaces to single spaces and limit the length
100 | let queryString = query.split('\n').join('').split(' ').join(' ').split(' ').join(' '); // eslint-disable-line newline-per-chained-call
101 |
102 | if (queryString.length > maxLength) {
103 | queryString = queryString.substring(0, maxLength) + truncatedSuffix;
104 | }
105 |
106 | return queryString;
107 | };
108 |
109 | /**
110 | * Filter out all headers that aren't meant to be forwarded to a GraphCMS endpoint when running a query.
111 | *
112 | * @param headers
113 | */
114 | export const extractHeadersToForward = (headers) => {
115 | const forwardedHeaders = {};
116 |
117 | filter(headers, (value, key) => {
118 | if (startsWith(key, GCMS_HEADER_PREFIX)) { // Keep headers starting with GCMS_HEADER_PREFIX
119 | forwardedHeaders[key] = value;
120 | } else if (includes(GCMS_HEADERS_WHITELIST, key)) { // Keep headers that are whitelisted specifically
121 | forwardedHeaders[key] = value;
122 | }
123 | });
124 |
125 | return forwardedHeaders;
126 | };
127 |
--------------------------------------------------------------------------------
/src/utils/graphql.test.js:
--------------------------------------------------------------------------------
1 | import { eventExample } from '../constants';
2 | import { buildGraphCMSError, extractHeadersToForward, handleGraphCMSCompatibleErrorResponse, makeQueryHumanFriendly, runQuery } from './graphql';
3 |
4 | describe('utils/graphql.js', () => {
5 | describe('runQuery', () => {
6 | test('should throw an error when the query is provided as a string instead of an object', async () => {
7 | const { ApolloClientFactory } = require('../utils/apolloClient'); // eslint-disable-line global-require
8 | const client = ApolloClientFactory({});
9 | await expect(runQuery('not-a-query', client)).rejects.toThrow(Error);
10 | });
11 |
12 | test('should throw an error when no apollo client is provided', async () => {
13 | await expect(runQuery('not-a-query')).rejects.toThrow(Error);
14 | });
15 | });
16 |
17 | describe('buildGraphCMSError', () => {
18 | test('should convert an error message to an error object that is ISO with GraphCMS API response', async () => {
19 | const error = buildGraphCMSError('some error message');
20 | expect(error.message).toContain('some error message');
21 | });
22 |
23 | test('should add a namespace to errors', async () => {
24 | const error = buildGraphCMSError('some error message');
25 | expect(error.message).toContain('[Cache]');
26 | });
27 | });
28 |
29 | describe('handleGraphCMSCompatibleErrorResponse', () => {
30 | test('should return a GraphCMS API compatible HTTP response body', async () => {
31 | const body = JSON.parse(handleGraphCMSCompatibleErrorResponse('some error message'));
32 | expect(body.data).toBeObject();
33 | expect(body.errors).toBeArray();
34 | expect(body.errors[0].message).toContain('some error message');
35 | });
36 | });
37 |
38 | describe('makeQueryHumanFriendly', () => {
39 | test('should strip unnecessary characters', async () => {
40 | const unfriendlyQuery = JSON.parse(eventExample.body).query;
41 | const friendlyQuery = makeQueryHumanFriendly(unfriendlyQuery, 9, '');
42 | expect(friendlyQuery).toEqual('{__schema');
43 | });
44 | });
45 |
46 | describe('extractHeadersToForward', () => {
47 | test('should remove non-whitelisted headers', async () => {
48 | const headers = {
49 | 'accept': '*/*',
50 | 'content-type': 'application/json',
51 | 'locale': 'EN',
52 | 'gcms-locale': 'EN, FR',
53 | 'gcms-locale-no-default': 'false',
54 | 'Content-Length': '146',
55 | 'User-Agent': 'node-fetch/1.0 (+https://github.com/bitinn/node-fetch)',
56 | 'Accept-Encoding': 'gzip,deflate',
57 | 'Connection': 'close',
58 | 'Host': 'localhost:8085',
59 | };
60 | const forwardedHeaders = extractHeadersToForward(headers);
61 | expect(forwardedHeaders).toEqual({
62 | 'locale': 'EN',
63 | 'gcms-locale': 'EN, FR',
64 | 'gcms-locale-no-default': 'false',
65 | });
66 | });
67 | });
68 | });
69 |
--------------------------------------------------------------------------------
/src/utils/queries.test.js:
--------------------------------------------------------------------------------
1 | import { eventExample } from '../constants';
2 |
3 | describe('utils/queries.test.js', () => {
4 | describe('values used for tests', () => {
5 | test('must be compliant and data properly formatted', async () => {
6 | expect(JSON.stringify(eventExample)).toBe('{"body":"{\\"operationName\\":null,\\"variables\\":{},\\"query\\":\\"{__schema { mutationType { kind }}}\\"}","headers":{"gcms-locale":"EN"}}');
7 | });
8 | });
9 | });
10 |
--------------------------------------------------------------------------------
/src/utils/redis.js:
--------------------------------------------------------------------------------
1 | import { createLogger } from '@unly/utils-simple-logger';
2 | import Redis from 'ioredis';
3 | import epsagon from './epsagon';
4 |
5 | const logger = createLogger({
6 | label: 'Redis client',
7 | });
8 |
9 | /**
10 | * Creates a redis client
11 | *
12 | * @param url Url of the redis client, must contain the port number and be of the form "localhost:6379"
13 | * @param password Password of the redis client
14 | * @param maxRetriesPerRequest By default, all pending commands will be flushed with an error every 20 retry attempts.
15 | * That makes sure commands won't wait forever when the connection is down.
16 | * Set to null to disable this behavior, and every command will wait forever until the connection is alive again.
17 | * @return {Redis}
18 | */
19 | export const getClient = (url = process.env.REDIS_URL, password = process.env.REDIS_PASSWORD, maxRetriesPerRequest = 20) => {
20 | const client = new Redis(`redis://${url}`, {
21 | password,
22 | showFriendlyErrorStack: true, // See https://github.com/luin/ioredis#error-handling
23 | lazyConnect: true, // XXX Don't attempt to connect when initializing the client, in order to properly handle connection failure on a use-case basis
24 | maxRetriesPerRequest,
25 | });
26 |
27 | client.on('connect', () => {
28 | logger.info('Connected to redis instance');
29 | });
30 |
31 | client.on('ready', () => {
32 | logger.info('Redis instance is ready (data loaded from disk)');
33 | });
34 |
35 | // Handles redis connection temporarily going down without app crashing
36 | // If an error is handled here, then redis will attempt to retry the request based on maxRetriesPerRequest
37 | client.on('error', (e) => {
38 | logger.error(`Unexpected error from redis client: "${e}"`);
39 | logger.error(e);
40 | epsagon.setError(e);
41 | });
42 |
43 | return client;
44 | };
45 |
46 | /**
47 | * Generate a redis key (string) based on different data
48 | * Basically creates a string from a JSON object
49 | *
50 | * @param body
51 | * @param headers
52 | * @return {string}
53 | */
54 | export const generateRedisKey = (body, headers) => {
55 | return JSON.stringify({
56 | body,
57 | headers,
58 | });
59 | };
60 |
61 | /**
62 | * Extract all data contained within a redis key
63 | * Basically parses the redis key, which is a JSON string
64 | *
65 | * @param redisKey
66 | * @return {any}
67 | */
68 | export const extractDataFromRedisKey = (redisKey) => {
69 | try {
70 | return JSON.parse(redisKey);
71 | } catch (e) {
72 | logger.error(`Failed to parse redisKey "${redisKey}".`);
73 |
74 | // XXX Redis key can't be parsed, so it's likely a string that was meant to be the query body
75 | // Returns the redis key as body and cross fingers, this shouldn't happen,
76 | // but may happen if dealing with keys that were stored before the redis key became a JSON string
77 | return {
78 | body: redisKey,
79 | };
80 | }
81 | };
82 |
--------------------------------------------------------------------------------
/src/utils/redis.test.js:
--------------------------------------------------------------------------------
1 | import { buildItem } from './cache';
2 | import { extractDataFromRedisKey, generateRedisKey, getClient } from './redis';
3 |
4 | let redisClient;
5 | let redisClientFailure;
6 |
7 | describe('utils/redis.js', () => {
8 | beforeAll(() => {
9 | redisClient = getClient();
10 | redisClientFailure = getClient('localhost:5555', null, 0); // XXX This shouldn't throw an error because we're using lazyConnect:true which doesn't automatically connect to redis
11 | });
12 |
13 | afterAll(async () => {
14 | await redisClient.quit();
15 | await redisClientFailure.quit();
16 | });
17 |
18 | describe('should successfully init the redis client', () => {
19 | test('when provided connection info are correct', async () => {
20 | // Environment variables are from the .env.test file - This tests a localhost connection only
21 | expect(redisClient.options.host).toEqual(process.env.REDIS_URL.split(':')[0]);
22 | expect(redisClient.options.port).toEqual(parseInt(process.env.REDIS_URL.split(':')[1], 10));
23 | expect(redisClient.options.password).toEqual(process.env.REDIS_PASSWORD);
24 | });
25 |
26 | test('when connection info are incorrect', async () => {
27 | expect(redisClientFailure.options.host).toEqual('localhost');
28 | expect(redisClientFailure.options.port).toEqual(5555);
29 | });
30 | });
31 |
32 | describe('should successfully perform native operations (read/write/delete/update)', () => {
33 | test('when using async/await (using native node.js promises)', async () => {
34 | const redisKey = generateRedisKey('key-1', {});
35 | const item = buildItem('value-1');
36 | const setResult = await redisClient.set(redisKey, JSON.stringify(item));
37 | expect(setResult).toEqual('OK');
38 |
39 | const result = await redisClient.get(redisKey);
40 | expect(JSON.parse(result)).toMatchObject(item);
41 |
42 | const delResult = await redisClient.del(redisKey);
43 | expect(delResult).toEqual(1);
44 |
45 | const itemB = buildItem('value-1b');
46 | const setResultB = await redisClient.set(redisKey, JSON.stringify(itemB));
47 | expect(setResultB).toEqual('OK');
48 |
49 | const resultB = await redisClient.get(redisKey);
50 | expect(JSON.parse(resultB)).toMatchObject(itemB);
51 |
52 | const itemC = buildItem('value-1c');
53 | const setResultC = await redisClient.set(redisKey, JSON.stringify(itemC));
54 | expect(setResultC).toEqual('OK');
55 |
56 | const resultC = await redisClient.get(redisKey);
57 | expect(JSON.parse(resultC)).toMatchObject(itemC);
58 | });
59 | });
60 |
61 | describe('should allow to catch an error when failing to open a connection to redis, in order to gracefully handle the error instead of crashing the app', () => {
62 | test('when connection info are incorrect', async () => {
63 | expect(redisClientFailure.options.host).toEqual('localhost');
64 | expect(redisClientFailure.options.port).toEqual(5555);
65 |
66 | try {
67 | await redisClientFailure.set('key-1', 'value-1'); // This should throw an error, because the connection to redis will be made when executing the
68 | expect(true).toBe(false); // This shouldn't be called, or the test will fail
69 | } catch (e) {
70 | expect(e).toBeDefined();
71 | expect(e.message).toContain('Reached the max retries per request limit');
72 | }
73 | await redisClientFailure.quit();
74 | });
75 | });
76 |
77 | describe('generateRedisKey and extractDataFromRedisKey', () => {
78 | test('should generate and extract a redis key', async () => {
79 | const body = 'some body';
80 | const headers = { locale: 'FR' };
81 | const expectedRedisKey = JSON.stringify({ body, headers });
82 | expect(generateRedisKey(body, headers)).toEqual(expectedRedisKey);
83 |
84 | expect(extractDataFromRedisKey(expectedRedisKey)).toEqual({ body, headers });
85 | });
86 | });
87 | });
88 |
--------------------------------------------------------------------------------
/src/utils/timers.js:
--------------------------------------------------------------------------------
1 | export function wait(ms) {
2 | return new Promise((resolve) => setTimeout(resolve, ms));
3 | }
4 |
--------------------------------------------------------------------------------
/webpack.config.js:
--------------------------------------------------------------------------------
1 | const webpack = require('webpack');
2 | const slsw = require('serverless-webpack');
3 | const nodeExternals = require('webpack-node-externals');
4 | const GitRevisionPlugin = require('git-revision-webpack-plugin'); // XXX https://www.npmjs.com/package/git-revision-webpack-plugin
5 | const moment = require('moment');
6 |
7 | const gitRevisionPlugin = new GitRevisionPlugin();
8 |
9 | module.exports = (async () => {
10 | const accountId = await slsw.lib.serverless.providers.aws.getAccountId();
11 |
12 | return {
13 | entry: slsw.lib.entries,
14 | target: 'node',
15 | devtool: 'source-map',
16 | externals: [nodeExternals()],
17 | mode: slsw.lib.webpack.isLocal ? 'development' : 'production',
18 | performance: {
19 | hints: false, // Turn off size warnings for entry points
20 | },
21 | stats: 'minimal', // https://github.com/serverless-heaven/serverless-webpack#stats
22 | plugins: [
23 | new webpack.DefinePlugin({
24 | 'process.env.GIT_BRANCH': JSON.stringify(gitRevisionPlugin.branch()),
25 | 'process.env.GIT_COMMIT_VERSION': JSON.stringify(gitRevisionPlugin.version()),
26 | 'process.env.DEPLOY_TIME': JSON.stringify(moment().format('LLLL')),
27 | 'process.env.AWS_ACCOUNT_ID': `${accountId}`,
28 | }),
29 | ],
30 | optimization: {
31 | nodeEnv: false, // Avoids overriding NODE_ENV - See https://github.com/webpack/webpack/issues/7470#issuecomment-394259698
32 | },
33 | module: {
34 | rules: [
35 | {
36 | test: /\.js$/,
37 | use: ['babel-loader'],
38 | include: __dirname,
39 | exclude: /node_modules/,
40 | },
41 | ],
42 | },
43 | };
44 | })();
45 |
--------------------------------------------------------------------------------