├── .air.toml
├── .github
├── CODE_OF_CONDUCT.md
└── workflows
│ ├── cd-chart.yml
│ ├── cd.yml
│ ├── ci-chart.yml
│ └── ci.yml
├── .gitignore
├── .golangci.yml
├── .goreleaser.yml
├── Caddyfile.dist
├── Dockerfile
├── LICENSE
├── README.md
├── admin
├── generated
│ └── generated.go
├── gqlgen.yml
├── resolver.go
├── schema.graphqls
└── schema.resolvers.go
├── caching.go
├── caching_handler.go
├── caching_plan.go
├── caching_plan_test.go
├── caching_purger.go
├── caching_purger_test.go
├── caching_request.go
├── caching_responsewriter.go
├── caching_result.go
├── caching_rule.go
├── caching_rule_test.go
├── caching_store.go
├── caching_store_test.go
├── caching_swr.go
├── caching_tag.go
├── caching_tag_test.go
├── caching_test.go
├── caching_vary.go
├── caching_vary_test.go
├── caddyfile.go
├── caddyfile_caching.go
├── caddyfile_complexity.go
├── caddyfile_test.go
├── charts
└── gbox
│ ├── .helmignore
│ ├── Chart.lock
│ ├── Chart.yaml
│ ├── README.md
│ ├── README.md.gotmpl
│ ├── ci
│ └── ct-values.yaml
│ ├── templates
│ ├── NOTES.txt
│ ├── _helpers.tpl
│ ├── deployment.yaml
│ ├── hpa.yaml
│ ├── ingress.yaml
│ ├── secrets.yaml
│ ├── service.yaml
│ ├── serviceaccount.yaml
│ ├── servicemonitor.yaml
│ └── tests
│ │ └── test-connection.yaml
│ └── values.yaml
├── cmd
└── main.go
├── complexity.go
├── complexity_test.go
├── docker-compose.yml
├── gbox.go
├── go.mod
├── go.sum
├── handler.go
├── handler_test.go
├── internal
└── testserver
│ ├── generated
│ └── generated.go
│ ├── gqlgen.yml
│ ├── model
│ └── models_gen.go
│ ├── resolver.go
│ ├── schema.graphqls
│ └── schema.resolvers.go
├── metrics.go
├── router.go
├── schema_fetcher.go
├── schema_fetcher_test.go
├── utils.go
├── ws.go
└── ws_test.go
/.air.toml:
--------------------------------------------------------------------------------
1 | root = "."
2 | testdata_dir = "testdata"
3 | tmp_dir = "tmp"
4 |
5 | [build]
6 | bin = "./tmp/gbox run -config ./Caddyfile.dist -watch"
7 | cmd = "go build -o ./tmp/gbox ./cmd"
8 | delay = 1000
9 | exclude_dir = ["assets", "tmp", "vendor", "testdata", "internal/testserver", "charts", "dist"]
10 | exclude_file = []
11 | exclude_regex = ["_test.go"]
12 | exclude_unchanged = false
13 | follow_symlink = false
14 | full_bin = ""
15 | include_dir = []
16 | include_ext = ["go"]
17 | kill_delay = "0s"
18 | log = "build-errors.log"
19 | send_interrupt = false
20 | stop_on_error = true
21 |
22 | [color]
23 | app = ""
24 | build = "yellow"
25 | main = "magenta"
26 | runner = "green"
27 | watcher = "cyan"
28 |
29 | [log]
30 | time = false
31 |
32 | [misc]
33 | clean_on_exit = false
34 |
35 | [screen]
36 | clear_on_rebuild = false
37 |
--------------------------------------------------------------------------------
/.github/CODE_OF_CONDUCT.md:
--------------------------------------------------------------------------------
1 | # Contributor Code of Conduct
2 |
3 | ## Our Pledge
4 |
5 | We as members, contributors, and leaders pledge to make participation in our
6 | community a harassment-free experience for everyone, regardless of age, body
7 | size, visible or invisible disability, ethnicity, sex characteristics, gender
8 | identity and expression, level of experience, education, socio-economic status,
9 | nationality, personal appearance, race, caste, color, religion, or sexual
10 | identity and orientation.
11 |
12 | We pledge to act and interact in ways that contribute to an open, welcoming,
13 | diverse, inclusive, and healthy community.
14 |
15 | ## Our Standards
16 |
17 | Examples of behavior that contributes to a positive environment for our
18 | community include:
19 |
20 | * Demonstrating empathy and kindness toward other people
21 | * Being respectful of differing opinions, viewpoints, and experiences
22 | * Giving and gracefully accepting constructive feedback
23 | * Accepting responsibility and apologizing to those affected by our mistakes,
24 | and learning from the experience
25 | * Focusing on what is best not just for us as individuals, but for the overall
26 | community
27 |
28 | Examples of unacceptable behavior include:
29 |
30 | * The use of sexualized language or imagery, and sexual attention or advances of
31 | any kind
32 | * Trolling, insulting or derogatory comments, and personal or political attacks
33 | * Public or private harassment
34 | * Publishing others' private information, such as a physical or email address,
35 | without their explicit permission
36 | * Other conduct which could reasonably be considered inappropriate in a
37 | professional setting
38 |
39 | ## Enforcement Responsibilities
40 |
41 | Community leaders are responsible for clarifying and enforcing our standards of
42 | acceptable behavior and will take appropriate and fair corrective action in
43 | response to any behavior that they deem inappropriate, threatening, offensive,
44 | or harmful.
45 |
46 | Community leaders have the right and responsibility to remove, edit, or reject
47 | comments, commits, code, wiki edits, issues, and other contributions that are
48 | not aligned to this Code of Conduct, and will communicate reasons for moderation
49 | decisions when appropriate.
50 |
51 | ## Scope
52 |
53 | This Code of Conduct applies within all community spaces, and also applies when
54 | an individual is officially representing the community in public spaces.
55 | Examples of representing our community include using an official e-mail address,
56 | posting via an official social media account, or acting as an appointed
57 | representative at an online or offline event.
58 |
59 | ## Enforcement
60 |
61 | Instances of abusive, harassing, or otherwise unacceptable behavior may be
62 | reported to the community leaders responsible for enforcement at
63 | vuongxuongminh@gmail.com.
64 | All complaints will be reviewed and investigated promptly and fairly.
65 |
66 | All community leaders are obligated to respect the privacy and security of the
67 | reporter of any incident.
68 |
69 | ## Enforcement Guidelines
70 |
71 | Community leaders will follow these Community Impact Guidelines in determining
72 | the consequences for any action they deem in violation of this Code of Conduct:
73 |
74 | ### 1. Correction
75 |
76 | **Community Impact**: Use of inappropriate language or other behavior deemed
77 | unprofessional or unwelcome in the community.
78 |
79 | **Consequence**: A private, written warning from community leaders, providing
80 | clarity around the nature of the violation and an explanation of why the
81 | behavior was inappropriate. A public apology may be requested.
82 |
83 | ### 2. Warning
84 |
85 | **Community Impact**: A violation through a single incident or series of
86 | actions.
87 |
88 | **Consequence**: A warning with consequences for continued behavior. No
89 | interaction with the people involved, including unsolicited interaction with
90 | those enforcing the Code of Conduct, for a specified period of time. This
91 | includes avoiding interactions in community spaces as well as external channels
92 | like social media. Violating these terms may lead to a temporary or permanent
93 | ban.
94 |
95 | ### 3. Temporary Ban
96 |
97 | **Community Impact**: A serious violation of community standards, including
98 | sustained inappropriate behavior.
99 |
100 | **Consequence**: A temporary ban from any sort of interaction or public
101 | communication with the community for a specified period of time. No public or
102 | private interaction with the people involved, including unsolicited interaction
103 | with those enforcing the Code of Conduct, is allowed during this period.
104 | Violating these terms may lead to a permanent ban.
105 |
106 | ### 4. Permanent Ban
107 |
108 | **Community Impact**: Demonstrating a pattern of violation of community
109 | standards, including sustained inappropriate behavior, harassment of an
110 | individual, or aggression toward or disparagement of classes of individuals.
111 |
112 | **Consequence**: A permanent ban from any sort of public interaction within the
113 | community.
114 |
115 | ## Attribution
116 |
117 | This Code of Conduct is adapted from the [Contributor Covenant](https://www.contributor-covenant.org/),
118 | version 2.1, available at https://www.contributor-covenant.org/version/2/1/code_of_conduct.html.
119 |
120 | For answers to common questions about this code of conduct, see the FAQ at
121 | https://www.contributor-covenant.org/faq
--------------------------------------------------------------------------------
/.github/workflows/cd-chart.yml:
--------------------------------------------------------------------------------
1 | name: Release Chart
2 |
3 | on:
4 | push:
5 | branches:
6 | - main
7 |
8 | permissions:
9 | contents: write # needed to write releases
10 |
11 | jobs:
12 | release:
13 | runs-on: ubuntu-latest
14 | steps:
15 | - uses: actions/checkout@v2
16 | with:
17 | fetch-depth: 0
18 |
19 | - name: Configure Git
20 | run: |
21 | git config user.name "$GITHUB_ACTOR"
22 | git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
23 |
24 | - uses: azure/setup-helm@v2.1
25 | with:
26 | version: v3.8.1
27 |
28 | - run: helm repo add bitnami https://charts.bitnami.com/bitnami
29 |
30 | - uses: helm/chart-releaser-action@v1.4.0
31 | env:
32 | CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
33 | CR_RELEASE_NAME_TEMPLATE: "helm-chart-{{ .Version }}"
34 |
--------------------------------------------------------------------------------
/.github/workflows/cd.yml:
--------------------------------------------------------------------------------
1 | name: Release
2 |
3 | on:
4 | push:
5 | tags: [ 'v*' ]
6 |
7 | permissions:
8 | contents: write # needed to write releases
9 | id-token: write # needed for keyless signing
10 |
11 | jobs:
12 | goreleaser:
13 | runs-on: ubuntu-latest
14 | steps:
15 | - uses: actions/checkout@v3
16 |
17 | - run: git fetch --prune --unshallow
18 |
19 | - uses: actions/setup-go@v3
20 | with:
21 | go-version: 1.17
22 |
23 | - uses: docker/setup-qemu-action@v1
24 |
25 | - name: Setup Docker Buildx
26 | id: buildx
27 | uses: docker/setup-buildx-action@v1
28 |
29 | - uses: sigstore/cosign-installer@main
30 |
31 | - uses: docker/login-action@v1
32 | with:
33 | username: gboxproxy
34 | password: ${{ secrets.DOCKER_HUB_TOKEN }}
35 |
36 | - uses: goreleaser/goreleaser-action@v2
37 | with:
38 | version: latest
39 | args: release --rm-dist
40 | env:
41 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
--------------------------------------------------------------------------------
/.github/workflows/ci-chart.yml:
--------------------------------------------------------------------------------
1 | name: Lint and Test Chart
2 |
3 | on: pull_request
4 |
5 | jobs:
6 | lint-test:
7 | runs-on: ubuntu-latest
8 | env:
9 | CT_TARGET_BRANCH: main
10 | steps:
11 | - uses: actions/checkout@v2
12 | with:
13 | fetch-depth: 0
14 |
15 | - uses: azure/setup-helm@v2.1
16 | with:
17 | version: v3.8.1
18 |
19 | - uses: actions/setup-python@v2
20 | with:
21 | python-version: 3.7
22 |
23 | - uses: helm/chart-testing-action@v2.2.1
24 |
25 | - id: list-changed
26 | run: |
27 | changed=$(ct list-changed)
28 | if [[ -n "$changed" ]]; then
29 | echo "::set-output name=changed::true"
30 | fi
31 |
32 | - run: helm repo add bitnami https://charts.bitnami.com/bitnami
33 |
34 | - run: ct lint --check-version-increment=false
35 |
36 | - if: steps.list-changed.outputs.changed == 'true'
37 | uses: helm/kind-action@v1.2.0
38 |
39 | - run: ct install
40 |
--------------------------------------------------------------------------------
/.github/workflows/ci.yml:
--------------------------------------------------------------------------------
1 | name: Lint and Test
2 |
3 | on:
4 | push:
5 | pull_request:
6 |
7 | jobs:
8 | golangci:
9 | name: Lint
10 | runs-on: ubuntu-latest
11 | steps:
12 | - uses: actions/setup-go@v3
13 | with:
14 | go-version: '1.17'
15 |
16 | - uses: actions/checkout@v3
17 |
18 | - uses: golangci/golangci-lint-action@v3
19 | with:
20 | version: latest
21 | args: --timeout=3m
22 |
23 | test:
24 | name: Test
25 | runs-on: ubuntu-latest
26 | steps:
27 | - uses: actions/setup-go@v3
28 | with:
29 | go-version: '1.17'
30 |
31 | - uses: actions/checkout@v3
32 |
33 | - uses: actions/cache@v3
34 | with:
35 | path: |
36 | ~/go/pkg/mod # Module download cache
37 | ~/.cache/go-build # Build cache (Linux)
38 | key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
39 | restore-keys: |
40 | ${{ runner.os }}-go-
41 |
42 | - run: go get
43 |
44 | - run: go test -race -covermode atomic -coverprofile=coverage.txt -coverpkg=github.com/gbox-proxy/gbox,github.com/gbox-proxy/gbox/admin ./...
45 |
46 | - uses: codecov/codecov-action@v3
47 | with:
48 | files: coverage.txt
49 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | tmp
2 | docker-compose.override.yml
3 |
4 | # Release
5 | dist
6 | charts/gbox/charts
--------------------------------------------------------------------------------
/.golangci.yml:
--------------------------------------------------------------------------------
1 | run:
2 | tests: true
3 | skip-dirs:
4 | - admin/generated
5 | - internal/testserver/generated
6 |
7 | linters-settings:
8 | goconst:
9 | min-occurrences: 5
10 | golint:
11 | min-confidence: 0.1
12 | funlen:
13 | lines: 99
14 | statements: 50
15 |
16 | linters:
17 | enable-all: true
18 | disable:
19 | - gocognit
20 | - containedctx
21 | - errcheck
22 | - lll
23 | - wsl
24 | - gomnd
25 | - testpackage
26 | - exhaustivestruct
27 | - paralleltest
28 | - cyclop
29 | - forcetypeassert
30 | - tagliatelle
31 | - varnamelen
32 | - wrapcheck
33 | - goerr113
34 | - gochecknoglobals
35 | - execinquery
36 | - exhaustruct
37 | - nonamedreturns
38 |
39 | # deprecated
40 | - interfacer
41 | - maligned
42 | - scopelint
43 | - golint
44 |
45 | issues:
46 | exclude-rules:
47 | - path: _test\.go
48 | linters:
49 | - ireturn
50 | - noctx
51 | - dupl
52 | - funlen
53 | - errcheck
54 | - path: admin/schema.resolvers\.go
55 | linters:
56 | - ireturn
57 | - path: internal/testserver/schema.resolvers\.go
58 | linters:
59 | - ireturn
--------------------------------------------------------------------------------
/.goreleaser.yml:
--------------------------------------------------------------------------------
1 | before:
2 | hooks:
3 | - go mod download
4 |
5 | snapshot:
6 | name_template: "{{ .Tag }}-next"
7 |
8 | changelog:
9 | sort: asc
10 | filters:
11 | exclude:
12 | - '^docs:'
13 | - '^test:'
14 | - '^ci:'
15 | - '^build:'
16 | - '^chore:'
17 |
18 | release:
19 | prerelease: auto
20 |
21 | env:
22 | - CGO_ENABLED=0
23 |
24 | builds:
25 | - id: main
26 | dir: cmd
27 | binary: gbox
28 | goos:
29 | - linux
30 | - darwin
31 | - windows
32 | goarch:
33 | - 386
34 | - amd64
35 | - arm
36 | - arm64
37 | goarm:
38 | - 5
39 | - 6
40 | - 7
41 |
42 | archives:
43 | - builds:
44 | - main
45 | replacements:
46 | darwin: Darwin
47 | linux: Linux
48 | windows: Windows
49 | 386: i386
50 | amd64: x86_64
51 | files:
52 | - LICENSE
53 | - Caddyfile.dist
54 | format_overrides:
55 | - goos: windows
56 | format: zip
57 |
58 | dockers:
59 | - ids:
60 | - main
61 | goos: linux
62 | goarch: amd64
63 | image_templates:
64 | - 'gboxproxy/gbox:{{ .Tag }}-amd64'
65 | - 'gboxproxy/gbox:v{{ .Major }}-amd64'
66 | - 'gboxproxy/gbox:v{{ .Major }}.{{ .Minor }}-amd64'
67 | - 'gboxproxy/gbox:latest-amd64'
68 | use: buildx
69 | build_flag_templates:
70 | - "--platform=linux/amd64"
71 | extra_files:
72 | - Caddyfile.dist
73 | - ids:
74 | - main
75 | goos: linux
76 | goarch: arm64
77 | image_templates:
78 | - 'gboxproxy/gbox:{{ .Tag }}-arm64v8'
79 | - 'gboxproxy/gbox:v{{ .Major }}-arm64v8'
80 | - 'gboxproxy/gbox:v{{ .Major }}.{{ .Minor }}-arm64v8'
81 | - 'gboxproxy/gbox:latest-arm64v8'
82 | use: buildx
83 | build_flag_templates:
84 | - "--platform=linux/arm64/v8"
85 | extra_files:
86 | - Caddyfile.dist
87 |
88 | docker_manifests:
89 | - name_template: gboxproxy/gbox:{{ .Tag }}
90 | image_templates:
91 | - gboxproxy/gbox:{{ .Tag }}-amd64
92 | - gboxproxy/gbox:{{ .Tag }}-arm64v8
93 | - name_template: gboxproxy/gbox:v{{ .Major }}
94 | image_templates:
95 | - gboxproxy/gbox:v{{ .Major }}-amd64
96 | - gboxproxy/gbox:v{{ .Major }}-arm64v8
97 | - name_template: gboxproxy/gbox:v{{ .Major }}.{{ .Minor }}
98 | image_templates:
99 | - gboxproxy/gbox:v{{ .Major }}.{{ .Minor }}-amd64
100 | - gboxproxy/gbox:v{{ .Major }}.{{ .Minor }}-arm64v8
101 | - name_template: gboxproxy/gbox:latest
102 | image_templates:
103 | - gboxproxy/gbox:latest-amd64
104 | - gboxproxy/gbox:latest-arm64v8
105 |
106 | docker_signs:
107 | - cmd: cosign
108 | env:
109 | - COSIGN_EXPERIMENTAL=1
110 | args:
111 | - sign
112 | - '${artifact}'
113 | artifacts: all
114 | output: true
115 |
116 | signs:
117 | - cmd: cosign
118 | env:
119 | - COSIGN_EXPERIMENTAL=1
120 | certificate: '${artifact}.pem'
121 | args:
122 | - sign-blob
123 | - '--output-certificate=${certificate}'
124 | - '--output-signature=${signature}'
125 | - '${artifact}'
126 | artifacts: checksum
127 | output: true
--------------------------------------------------------------------------------
/Caddyfile.dist:
--------------------------------------------------------------------------------
1 | {
2 | {$GBOX_GLOBAL_DIRECTIVES}
3 | }
4 |
5 | (gbox_default_caching_rules) {
6 | # default caching rules will match all types and public.
7 | default {
8 | max_age 30m
9 | swr 30m
10 | }
11 | }
12 |
13 | {$GBOX_SERVER_NAME:localhost}
14 |
15 | log
16 |
17 | @admin_auth {
18 | path /admin/graphql
19 | method POST
20 | expression `{$GBOX_ENABLED_CACHING:true} == true && {$GBOX_ENABLED_ADMIN_AUTH:false} == true`
21 | }
22 |
23 | @metrics {
24 | path {$GBOX_METRICS_PATH:/metrics}
25 | method GET
26 | expression `{$GBOX_ENABLED_METRICS:false} == true`
27 | }
28 |
29 | route {
30 | encode gzip
31 |
32 | respond /healthz "OK"
33 |
34 | metrics @metrics
35 |
36 | basicauth @admin_auth bcrypt {
37 | {$GBOX_ADMIN_USERNAME:gbox} {$GBOX_ADMIN_PASSWORD:JDJhJDE0JHBXUk5YMjVRYlRtTjN3SERiU0Rrei4uMW4ub0FhaHZlY0hnbWtBMUNSLnhmeEUuUDVOOW5l}
38 | }
39 |
40 | gbox {
41 | upstream {$GBOX_UPSTREAM} {
42 | {$GBOX_UPSTREAM_REVERSE_PROXY_DIRECTIVES}
43 | }
44 | fetch_schema_interval {$GBOX_FETCH_SCHEMA_INTERVAL:10m}
45 | complexity {
46 | enabled {$GBOX_ENABLED_COMPLEXITY:true}
47 | node_count_limit {$GBOX_NODE_COUNT_LIMIT:60}
48 | max_complexity {$GBOX_MAX_COMPLEXITY:60}
49 | max_depth {$GBOX_MAX_DEPTH:15}
50 | }
51 | disabled_introspection {$GBOX_DISABLED_INTROSPECTION:false}
52 | disabled_playgrounds {$GBOX_DISABLED_PLAYGROUNDS:false}
53 | caching {
54 | enabled {$GBOX_ENABLED_CACHING:true}
55 | store_dsn {$GBOX_STORE_DSN:freecache://?cache_size=5368709120}
56 | rules {
57 | {$GBOX_CACHING_RULES:import gbox_default_caching_rules}
58 | }
59 | varies {
60 | {$GBOX_CACHING_VARIES}
61 | }
62 | type_keys {
63 | {$GBOX_CACHING_TYPE_KEYS}
64 | }
65 | auto_invalidate_cache {$GBOX_AUTO_INVALIDATE_CACHE:true}
66 | debug_headers {$GBOX_CACHING_DEBUG_HEADERS:true}
67 | }
68 | {$GBOX_EXTRA_DIRECTIVES}
69 | }
70 | }
71 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM caddy:2-alpine
2 |
3 | COPY gbox /usr/bin/caddy
4 | COPY Caddyfile.dist /etc/caddy/Caddyfile
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 |
2 | Apache License
3 | Version 2.0, January 2004
4 | http://www.apache.org/licenses/
5 |
6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
7 |
8 | 1. Definitions.
9 |
10 | "License" shall mean the terms and conditions for use, reproduction,
11 | and distribution as defined by Sections 1 through 9 of this document.
12 |
13 | "Licensor" shall mean the copyright owner or entity authorized by
14 | the copyright owner that is granting the License.
15 |
16 | "Legal Entity" shall mean the union of the acting entity and all
17 | other entities that control, are controlled by, or are under common
18 | control with that entity. For the purposes of this definition,
19 | "control" means (i) the power, direct or indirect, to cause the
20 | direction or management of such entity, whether by contract or
21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
22 | outstanding shares, or (iii) beneficial ownership of such entity.
23 |
24 | "You" (or "Your") shall mean an individual or Legal Entity
25 | exercising permissions granted by this License.
26 |
27 | "Source" form shall mean the preferred form for making modifications,
28 | including but not limited to software source code, documentation
29 | source, and configuration files.
30 |
31 | "Object" form shall mean any form resulting from mechanical
32 | transformation or translation of a Source form, including but
33 | not limited to compiled object code, generated documentation,
34 | and conversions to other media types.
35 |
36 | "Work" shall mean the work of authorship, whether in Source or
37 | Object form, made available under the License, as indicated by a
38 | copyright notice that is included in or attached to the work
39 | (an example is provided in the Appendix below).
40 |
41 | "Derivative Works" shall mean any work, whether in Source or Object
42 | form, that is based on (or derived from) the Work and for which the
43 | editorial revisions, annotations, elaborations, or other modifications
44 | represent, as a whole, an original work of authorship. For the purposes
45 | of this License, Derivative Works shall not include works that remain
46 | separable from, or merely link (or bind by name) to the interfaces of,
47 | the Work and Derivative Works thereof.
48 |
49 | "Contribution" shall mean any work of authorship, including
50 | the original version of the Work and any modifications or additions
51 | to that Work or Derivative Works thereof, that is intentionally
52 | submitted to Licensor for inclusion in the Work by the copyright owner
53 | or by an individual or Legal Entity authorized to submit on behalf of
54 | the copyright owner. For the purposes of this definition, "submitted"
55 | means any form of electronic, verbal, or written communication sent
56 | to the Licensor or its representatives, including but not limited to
57 | communication on electronic mailing lists, source code control systems,
58 | and issue tracking systems that are managed by, or on behalf of, the
59 | Licensor for the purpose of discussing and improving the Work, but
60 | excluding communication that is conspicuously marked or otherwise
61 | designated in writing by the copyright owner as "Not a Contribution."
62 |
63 | "Contributor" shall mean Licensor and any individual or Legal Entity
64 | on behalf of whom a Contribution has been received by Licensor and
65 | subsequently incorporated within the Work.
66 |
67 | 2. Grant of Copyright License. Subject to the terms and conditions of
68 | this License, each Contributor hereby grants to You a perpetual,
69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
70 | copyright license to reproduce, prepare Derivative Works of,
71 | publicly display, publicly perform, sublicense, and distribute the
72 | Work and such Derivative Works in Source or Object form.
73 |
74 | 3. Grant of Patent License. Subject to the terms and conditions of
75 | this License, each Contributor hereby grants to You a perpetual,
76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
77 | (except as stated in this section) patent license to make, have made,
78 | use, offer to sell, sell, import, and otherwise transfer the Work,
79 | where such license applies only to those patent claims licensable
80 | by such Contributor that are necessarily infringed by their
81 | Contribution(s) alone or by combination of their Contribution(s)
82 | with the Work to which such Contribution(s) was submitted. If You
83 | institute patent litigation against any entity (including a
84 | cross-claim or counterclaim in a lawsuit) alleging that the Work
85 | or a Contribution incorporated within the Work constitutes direct
86 | or contributory patent infringement, then any patent licenses
87 | granted to You under this License for that Work shall terminate
88 | as of the date such litigation is filed.
89 |
90 | 4. Redistribution. You may reproduce and distribute copies of the
91 | Work or Derivative Works thereof in any medium, with or without
92 | modifications, and in Source or Object form, provided that You
93 | meet the following conditions:
94 |
95 | (a) You must give any other recipients of the Work or
96 | Derivative Works a copy of this License; and
97 |
98 | (b) You must cause any modified files to carry prominent notices
99 | stating that You changed the files; and
100 |
101 | (c) You must retain, in the Source form of any Derivative Works
102 | that You distribute, all copyright, patent, trademark, and
103 | attribution notices from the Source form of the Work,
104 | excluding those notices that do not pertain to any part of
105 | the Derivative Works; and
106 |
107 | (d) If the Work includes a "NOTICE" text file as part of its
108 | distribution, then any Derivative Works that You distribute must
109 | include a readable copy of the attribution notices contained
110 | within such NOTICE file, excluding those notices that do not
111 | pertain to any part of the Derivative Works, in at least one
112 | of the following places: within a NOTICE text file distributed
113 | as part of the Derivative Works; within the Source form or
114 | documentation, if provided along with the Derivative Works; or,
115 | within a display generated by the Derivative Works, if and
116 | wherever such third-party notices normally appear. The contents
117 | of the NOTICE file are for informational purposes only and
118 | do not modify the License. You may add Your own attribution
119 | notices within Derivative Works that You distribute, alongside
120 | or as an addendum to the NOTICE text from the Work, provided
121 | that such additional attribution notices cannot be construed
122 | as modifying the License.
123 |
124 | You may add Your own copyright statement to Your modifications and
125 | may provide additional or different license terms and conditions
126 | for use, reproduction, or distribution of Your modifications, or
127 | for any such Derivative Works as a whole, provided Your use,
128 | reproduction, and distribution of the Work otherwise complies with
129 | the conditions stated in this License.
130 |
131 | 5. Submission of Contributions. Unless You explicitly state otherwise,
132 | any Contribution intentionally submitted for inclusion in the Work
133 | by You to the Licensor shall be under the terms and conditions of
134 | this License, without any additional terms or conditions.
135 | Notwithstanding the above, nothing herein shall supersede or modify
136 | the terms of any separate license agreement you may have executed
137 | with Licensor regarding such Contributions.
138 |
139 | 6. Trademarks. This License does not grant permission to use the trade
140 | names, trademarks, service marks, or product names of the Licensor,
141 | except as required for reasonable and customary use in describing the
142 | origin of the Work and reproducing the content of the NOTICE file.
143 |
144 | 7. Disclaimer of Warranty. Unless required by applicable law or
145 | agreed to in writing, Licensor provides the Work (and each
146 | Contributor provides its Contributions) on an "AS IS" BASIS,
147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
148 | implied, including, without limitation, any warranties or conditions
149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
150 | PARTICULAR PURPOSE. You are solely responsible for determining the
151 | appropriateness of using or redistributing the Work and assume any
152 | risks associated with Your exercise of permissions under this License.
153 |
154 | 8. Limitation of Liability. In no event and under no legal theory,
155 | whether in tort (including negligence), contract, or otherwise,
156 | unless required by applicable law (such as deliberate and grossly
157 | negligent acts) or agreed to in writing, shall any Contributor be
158 | liable to You for damages, including any direct, indirect, special,
159 | incidental, or consequential damages of any character arising as a
160 | result of this License or out of the use or inability to use the
161 | Work (including but not limited to damages for loss of goodwill,
162 | work stoppage, computer failure or malfunction, or any and all
163 | other commercial damages or losses), even if such Contributor
164 | has been advised of the possibility of such damages.
165 |
166 | 9. Accepting Warranty or Additional Liability. While redistributing
167 | the Work or Derivative Works thereof, You may choose to offer,
168 | and charge a fee for, acceptance of support, warranty, indemnity,
169 | or other liability obligations and/or rights consistent with this
170 | License. However, in accepting such obligations, You may act only
171 | on Your own behalf and on Your sole responsibility, not on behalf
172 | of any other Contributor, and only if You agree to indemnify,
173 | defend, and hold each Contributor harmless for any liability
174 | incurred by, or claims asserted against, such Contributor by reason
175 | of your accepting any such warranty or additional liability.
176 |
177 | END OF TERMS AND CONDITIONS
178 |
179 | APPENDIX: How to apply the Apache License to your work.
180 |
181 | To apply the Apache License to your work, attach the following
182 | boilerplate notice, with the fields enclosed by brackets "[]"
183 | replaced with your own identifying information. (Don't include
184 | the brackets!) The text should be enclosed in the appropriate
185 | comment syntax for the file format. We also recommend that a
186 | file or class name and description of purpose be included on the
187 | same "printed page" as the copyright notice for easier
188 | identification within third-party archives.
189 |
190 | Copyright 2022 GBox Authors
191 |
192 | Licensed under the Apache License, Version 2.0 (the "License");
193 | you may not use this file except in compliance with the License.
194 | You may obtain a copy of the License at
195 |
196 | http://www.apache.org/licenses/LICENSE-2.0
197 |
198 | Unless required by applicable law or agreed to in writing, software
199 | distributed under the License is distributed on an "AS IS" BASIS,
200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
201 | See the License for the specific language governing permissions and
202 | limitations under the License.
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |

2 |
3 | Fast :zap: reverse proxy in front of any GraphQL server for caching, securing and monitoring.
4 |
5 | [](https://github.com/gbox-proxy/gbox/actions/workflows/ci.yml)
6 | [](https://codecov.io/gh/gbox-proxy/gbox)
7 | [](https://goreportcard.com/report/github.com/gbox-proxy/gbox)
8 | [](https://pkg.go.dev/github.com/gbox-proxy/gbox)
9 | [](https://artifacthub.io/packages/search?repo=gbox)
10 |
11 | Features
12 | --------
13 |
14 | + :floppy_disk: Caching
15 | + [RFC7234](https://httpwg.org/specs/rfc7234.html) compliant HTTP Cache.
16 | + Cache query operations results through types.
17 | + Auto invalidate cache through mutation operations.
18 | + [Swr](https://web.dev/stale-while-revalidate/) query results in background.
19 | + Cache query results to specific headers, cookies (varies).
20 | + :closed_lock_with_key: Securing
21 | + Disable introspection.
22 | + Limit operations depth, nodes and complexity.
23 | + :chart_with_upwards_trend: Monitoring ([Prometheus](https://prometheus.io/) metrics)
24 | + Operations in flight.
25 | + Operations count.
26 | + Operations request durations.
27 | + Operations caching statuses.
28 |
29 | How it works
30 | ------------
31 |
32 | Every single request sent by your clients will serve by GBox. The GBox reverse proxy will cache, validate, and collect metrics before pass through requests to your GraphQL server.
33 |
34 | 
35 |
36 | Documentation
37 | -------------
38 |
39 | The GBox documentation can be browsed on [Github page](https://gbox-proxy.github.io/).
40 |
41 | Inspiration
42 | -----------
43 |
44 | The GBox has been inspired by many others related work including:
45 |
46 | + [Mercure](https://github.com/dunglas/mercure)
47 | + [graphql-go-tools](https://github.com/jensneuse/graphql-go-tools)
48 | + [GraphCDN](https://graphcdn.io)
49 |
50 | Thanks to all the great people who created these projects!
--------------------------------------------------------------------------------
/admin/gqlgen.yml:
--------------------------------------------------------------------------------
1 | # Where are all the schema files located? globs are supported eg src/**/*.graphqls
2 | schema:
3 | - './*.graphqls'
4 |
5 | # Where should the generated server code go?
6 | exec:
7 | filename: generated/generated.go
8 | package: generated
9 |
10 | #
11 | ## Where should any generated models go?
12 | #model:
13 | # filename: admin/model/models_gen.go
14 | # package: model
15 |
16 | # Where should the resolver implementations go?
17 | resolver:
18 | layout: follow-schema
19 | dir: .
20 | package: admin
21 |
22 | # Optional: turn on use `gqlgen:"fieldName"` tags in your models
23 | # struct_tag: json
24 |
25 | # Optional: turn on to use []Thing instead of []*Thing
26 | # omit_slice_element_pointers: false
27 |
28 | # Optional: set to speed up generation time by not performing a final validation pass.
29 | # skip_validation: true
30 |
31 | # gqlgen will search for any type names in the schema in these go packages
32 | # if they match it will use them, otherwise it will generate them.
33 | autobind:
34 | # - "github.com/gbox-proxy/gbox/admin/model"
35 |
36 | # This section declares type mapping between the GraphQL and go type systems
37 | #
38 | # The first line in each type will be used as defaults for resolver arguments and
39 | # modelgen, the others will be allowed when binding to fields. Configure them to
40 | # your liking
41 | models:
42 | ID:
43 | model:
44 | - github.com/99designs/gqlgen/graphql.ID
45 | - github.com/99designs/gqlgen/graphql.Int
46 | - github.com/99designs/gqlgen/graphql.Int64
47 | - github.com/99designs/gqlgen/graphql.Int32
48 | Int:
49 | model:
50 | - github.com/99designs/gqlgen/graphql.Int
51 | - github.com/99designs/gqlgen/graphql.Int64
52 | - github.com/99designs/gqlgen/graphql.Int32
53 |
--------------------------------------------------------------------------------
/admin/resolver.go:
--------------------------------------------------------------------------------
1 | package admin
2 |
3 | //go:generate go run -mod=mod github.com/99designs/gqlgen generate
4 |
5 | import (
6 | "context"
7 |
8 | "github.com/jensneuse/graphql-go-tools/pkg/ast"
9 | "github.com/jensneuse/graphql-go-tools/pkg/graphql"
10 | "go.uber.org/zap"
11 | )
12 |
13 | type QueryResultCachePurger interface {
14 | PurgeQueryResultBySchema(context.Context, *graphql.Schema) error
15 | PurgeQueryResultByOperationName(context.Context, string) error
16 | PurgeQueryResultByTypeName(context.Context, string) error
17 | PurgeQueryResultByTypeField(ctx context.Context, typeName, fieldName string) error
18 | PurgeQueryResultByTypeKey(ctx context.Context, typeName, fieldName string, value interface{}) error
19 | }
20 |
21 | type Resolver struct {
22 | upstreamSchema *graphql.Schema
23 | upstreamSchemaDefinition *ast.Document
24 | purger QueryResultCachePurger
25 | logger *zap.Logger
26 | }
27 |
28 | func NewResolver(s *graphql.Schema, d *ast.Document, l *zap.Logger, p QueryResultCachePurger) *Resolver {
29 | return &Resolver{
30 | upstreamSchema: s,
31 | upstreamSchemaDefinition: d,
32 | logger: l,
33 | purger: p,
34 | }
35 | }
36 |
--------------------------------------------------------------------------------
/admin/schema.graphqls:
--------------------------------------------------------------------------------
1 | type Query {
2 | dummy: String!
3 | }
4 |
5 | type Mutation {
6 | purgeAll: Boolean!
7 | purgeOperation(name: String!): Boolean!
8 | purgeTypeKey(type: String!, field: String!, key: ID!): Boolean!
9 | purgeQueryRootField(field: String!): Boolean!
10 | purgeType(type: String!): Boolean!
11 | }
--------------------------------------------------------------------------------
/admin/schema.resolvers.go:
--------------------------------------------------------------------------------
1 | package admin
2 |
3 | // This file will be automatically regenerated based on the schema, any resolver implementations
4 | // will be copied through when generating and any unknown code will be moved to the end.
5 |
6 | import (
7 | "context"
8 |
9 | "github.com/gbox-proxy/gbox/admin/generated"
10 | "go.uber.org/zap"
11 | )
12 |
13 | func (r *mutationResolver) PurgeAll(ctx context.Context) (bool, error) {
14 | if err := r.purger.PurgeQueryResultBySchema(ctx, r.upstreamSchema); err != nil {
15 | r.logger.Warn("fail to purge query result by operation name", zap.Error(err))
16 |
17 | return false, nil
18 | }
19 |
20 | return true, nil
21 | }
22 |
23 | func (r *mutationResolver) PurgeOperation(ctx context.Context, name string) (bool, error) {
24 | if err := r.purger.PurgeQueryResultByOperationName(ctx, name); err != nil {
25 | r.logger.Warn("fail to purge query result by operation name", zap.Error(err))
26 |
27 | return false, nil
28 | }
29 |
30 | return true, nil
31 | }
32 |
33 | func (r *mutationResolver) PurgeTypeKey(ctx context.Context, typeArg string, field string, key string) (bool, error) {
34 | if err := r.purger.PurgeQueryResultByTypeKey(ctx, typeArg, field, key); err != nil {
35 | r.logger.Warn("fail to purge query result by type key", zap.Error(err))
36 |
37 | return false, nil
38 | }
39 |
40 | return true, nil
41 | }
42 |
43 | func (r *mutationResolver) PurgeQueryRootField(ctx context.Context, field string) (bool, error) {
44 | if err := r.purger.PurgeQueryResultByTypeField(ctx, r.upstreamSchema.QueryTypeName(), field); err != nil {
45 | r.logger.Warn("fail to purge query result by root field", zap.Error(err))
46 |
47 | return false, nil
48 | }
49 |
50 | return true, nil
51 | }
52 |
53 | func (r *mutationResolver) PurgeType(ctx context.Context, typeArg string) (bool, error) {
54 | if err := r.purger.PurgeQueryResultByTypeName(ctx, typeArg); err != nil {
55 | r.logger.Warn("fail to purge query result by type", zap.Error(err))
56 |
57 | return false, err
58 | }
59 |
60 | return true, nil
61 | }
62 |
63 | func (r *queryResolver) Dummy(ctx context.Context) (string, error) {
64 | return "no query fields exists", nil
65 | }
66 |
67 | // Mutation returns generated.MutationResolver implementation.
68 | func (r *Resolver) Mutation() generated.MutationResolver { return &mutationResolver{r} }
69 |
70 | // Query returns generated.QueryResolver implementation.
71 | func (r *Resolver) Query() generated.QueryResolver { return &queryResolver{r} }
72 |
73 | type (
74 | mutationResolver struct{ *Resolver }
75 | queryResolver struct{ *Resolver }
76 | )
77 |
--------------------------------------------------------------------------------
/caching.go:
--------------------------------------------------------------------------------
1 | package gbox
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "net/url"
7 |
8 | "github.com/caddyserver/caddy/v2"
9 | "github.com/jensneuse/graphql-go-tools/pkg/graphql"
10 | "go.uber.org/zap"
11 | )
12 |
13 | var cachingStores = caddy.NewUsagePool()
14 |
15 | type (
16 | CachingStatus string
17 | )
18 |
19 | const (
20 | CachingStatusPass CachingStatus = "PASS"
21 | CachingStatusHit CachingStatus = "HIT"
22 | CachingStatusMiss CachingStatus = "MISS"
23 | )
24 |
25 | type Caching struct {
26 | // Storage DSN currently support redis and freecache only.
27 | // Redis example:
28 | // redis://username:password@localhost:6379?db=0&max_retries=3
29 | // more dsn options see at https://github.com/go-redis/redis/blob/v8.11.5/options.go#L31
30 | // Freecache example:
31 | // freecache://?cache_size=104857600
32 | // If not set it will be freecache://?cache_size=104857600 (cache size 100MB)
33 | StoreDsn string `json:"store_dsn,omitempty"`
34 |
35 | // Caching rules
36 | Rules CachingRules `json:"rules,omitempty"`
37 |
38 | // Caching varies
39 | Varies CachingVaries `json:"varies,omitempty"`
40 |
41 | // GraphQL type fields will be used to detect change of cached query results when user execute mutation query.
42 | // Example when execute mutation query bellow:
43 | // mutation { updateUser { id } }
44 | // if `updateUser` field have type User and id returning in example above is 1, all cache results of user id 1 will be purged.
45 | // If not set default value of it will be `id` for all types.
46 | TypeKeys graphql.RequestTypes `json:"type_keys,omitempty"`
47 |
48 | // Auto invalidate query result cached by mutation result type keys
49 | // Example: if you had cached query result of User type, when you make mutation query and result
50 | // of this query have type User with id's 3, all cached query result related with id 3 of User type will be purged.
51 | AutoInvalidate bool
52 |
53 | // Add debug headers like query result cache key,
54 | // plan cache key and query result had types keys or not...
55 | DebugHeaders bool
56 |
57 | logger *zap.Logger
58 | store *CachingStore
59 | ctxBackground context.Context
60 | ctxBackgroundCancel func()
61 | cachingMetrics
62 | }
63 |
64 | type cachingStoreDestructor struct {
65 | store *CachingStore
66 | }
67 |
68 | func (c *cachingStoreDestructor) Destruct() error {
69 | return c.store.close()
70 | }
71 |
72 | func (c *Caching) withLogger(l *zap.Logger) {
73 | c.logger = l
74 | }
75 |
76 | func (c *Caching) withMetrics(m cachingMetrics) {
77 | c.cachingMetrics = m
78 | }
79 |
80 | func (c *Caching) Provision(ctx caddy.Context) error {
81 | repl := caddy.NewReplacer()
82 | c.StoreDsn = repl.ReplaceKnown(c.StoreDsn, "")
83 | c.ctxBackground, c.ctxBackgroundCancel = context.WithCancel(context.Background())
84 |
85 | if c.StoreDsn == "" {
86 | c.StoreDsn = "freecache://?cache_size=104857600"
87 | }
88 |
89 | destructor, _, err := cachingStores.LoadOrNew(c.StoreDsn, func() (caddy.Destructor, error) {
90 | var u *url.URL
91 | var err error
92 | var store *CachingStore
93 | u, err = url.Parse(c.StoreDsn)
94 |
95 | if err != nil {
96 | return nil, err
97 | }
98 |
99 | store, err = NewCachingStore(u)
100 |
101 | if err != nil {
102 | return nil, err
103 | }
104 |
105 | return &cachingStoreDestructor{
106 | store: store,
107 | }, nil
108 | })
109 | if err != nil {
110 | return err
111 | }
112 |
113 | c.store = destructor.(*cachingStoreDestructor).store
114 |
115 | return nil
116 | }
117 |
118 | func (c *Caching) Validate() error {
119 | for ruleName, rule := range c.Rules {
120 | for _, vary := range rule.Varies {
121 | if _, ok := c.Varies[vary]; !ok {
122 | return fmt.Errorf("caching rule %s, configured vary: %s does not exist", ruleName, vary)
123 | }
124 | }
125 |
126 | if rule.MaxAge <= 0 {
127 | return fmt.Errorf("caching rule %s, max age must greater than zero", ruleName)
128 | }
129 | }
130 |
131 | return nil
132 | }
133 |
134 | func (c *Caching) Cleanup() error {
135 | c.ctxBackgroundCancel()
136 | _, err := cachingStores.Delete(c.StoreDsn)
137 |
138 | return err
139 | }
140 |
141 | // Interface guards.
142 | var (
143 | _ caddy.Provisioner = (*Caching)(nil)
144 | _ caddy.Validator = (*Caching)(nil)
145 | _ caddy.CleanerUpper = (*Caching)(nil)
146 | )
147 |
--------------------------------------------------------------------------------
/caching_handler.go:
--------------------------------------------------------------------------------
1 | package gbox
2 |
3 | import (
4 | "bytes"
5 | "errors"
6 | "fmt"
7 | "mime"
8 | "net/http"
9 | "strings"
10 | "time"
11 |
12 | "github.com/caddyserver/caddy/v2/modules/caddyhttp"
13 | "github.com/jensneuse/graphql-go-tools/pkg/graphql"
14 | "github.com/jensneuse/graphql-go-tools/pkg/operationreport"
15 | "go.uber.org/zap"
16 | )
17 |
18 | var ErrHandleUnknownOperationTypeError = errors.New("unknown operation type")
19 |
20 | // HandleRequest caching GraphQL query result by configured rules and varies.
21 | func (c *Caching) HandleRequest(w http.ResponseWriter, r *cachingRequest, h caddyhttp.HandlerFunc) error {
22 | // Remove `accept-encoding` header to prevent response body encoded when forward request to upstream
23 | // encode directive had read this header, safe to delete it.
24 | r.httpRequest.Header.Del("accept-encoding")
25 | operationType, _ := r.gqlRequest.OperationType()
26 |
27 | // nolint:exhaustive
28 | switch operationType {
29 | case graphql.OperationTypeQuery:
30 | return c.handleQueryRequest(w, r, h)
31 | case graphql.OperationTypeMutation:
32 | return c.handleMutationRequest(w, r, h)
33 | }
34 |
35 | return ErrHandleUnknownOperationTypeError
36 | }
37 |
38 | func (c *Caching) handleQueryRequest(w http.ResponseWriter, r *cachingRequest, h caddyhttp.HandlerFunc) (err error) {
39 | var plan *cachingPlan
40 | report := &operationreport.Report{}
41 | plan, err = c.getCachingPlan(r)
42 |
43 | if err != nil {
44 | report.AddInternalError(err)
45 |
46 | return report
47 | }
48 |
49 | status, result := c.resolvePlan(r, plan)
50 | defer c.addMetricsCaching(r.gqlRequest, status)
51 |
52 | switch status {
53 | case CachingStatusMiss:
54 | bodyBuff := bufferPool.Get().(*bytes.Buffer)
55 | defer bufferPool.Put(bodyBuff)
56 | bodyBuff.Reset()
57 |
58 | crw := newCachingResponseWriter(bodyBuff)
59 |
60 | if err = h(crw, r.httpRequest); err != nil {
61 | return err
62 | }
63 |
64 | defer func() {
65 | c.addCachingResponseHeaders(status, result, plan, w.Header())
66 | err = crw.WriteResponse(w)
67 | }()
68 |
69 | shouldCache := false
70 | mt, _, _ := mime.ParseMediaType(crw.header.Get("content-type"))
71 |
72 | if crw.Status() == http.StatusOK && mt == "application/json" {
73 | // respect no-store directive
74 | // https://datatracker.ietf.org/doc/html/rfc7234#section-5.2.1.5
75 | shouldCache = r.cacheControl == nil || !r.cacheControl.NoStore
76 | }
77 |
78 | if !shouldCache {
79 | return err
80 | }
81 |
82 | err = c.cachingQueryResult(r.httpRequest.Context(), r, plan, crw.buffer.Bytes(), crw.Header().Clone())
83 |
84 | if err == nil {
85 | c.logger.Info("caching query result successful", zap.String("cache_key", plan.queryResultCacheKey))
86 | }
87 | case CachingStatusHit:
88 | for header, values := range result.Header {
89 | w.Header()[header] = values
90 | }
91 |
92 | c.addCachingResponseHeaders(status, result, plan, w.Header())
93 | w.WriteHeader(http.StatusOK)
94 | _, err = w.Write(result.Body)
95 |
96 | if err != nil || result.Status() != CachingQueryResultStale {
97 | return err
98 | }
99 |
100 | r.httpRequest = prepareSwrHTTPRequest(c.ctxBackground, r.httpRequest, w)
101 |
102 | go func() {
103 | if err := c.swrQueryResult(c.ctxBackground, result, r, h); err != nil {
104 | c.logger.Error("swr failed, can not update query result", zap.String("cache_key", plan.queryResultCacheKey), zap.Error(err))
105 | } else {
106 | c.logger.Info("swr query result successful", zap.String("cache_key", plan.queryResultCacheKey))
107 | }
108 | }()
109 | case CachingStatusPass:
110 | c.addCachingResponseHeaders(status, result, plan, w.Header())
111 | err = h(w, r.httpRequest)
112 | }
113 |
114 | return err
115 | }
116 |
117 | func (c *Caching) resolvePlan(r *cachingRequest, p *cachingPlan) (CachingStatus, *cachingQueryResult) {
118 | if p.Passthrough {
119 | return CachingStatusPass, nil
120 | }
121 |
122 | result, _ := c.getCachingQueryResult(r.httpRequest.Context(), p)
123 |
124 | if result != nil && (r.cacheControl == nil || result.ValidFor(r.cacheControl)) {
125 | err := c.increaseQueryResultHitTimes(r.httpRequest.Context(), result)
126 | if err != nil {
127 | c.logger.Error("increase query result hit times failed", zap.String("cache_key", p.queryResultCacheKey), zap.Error(err))
128 | }
129 |
130 | return CachingStatusHit, result
131 | }
132 |
133 | return CachingStatusMiss, nil
134 | }
135 |
136 | func (c *Caching) addCachingResponseHeaders(s CachingStatus, r *cachingQueryResult, p *cachingPlan, h http.Header) {
137 | h.Set("x-cache", string(s))
138 |
139 | if s == CachingStatusPass {
140 | return
141 | }
142 |
143 | for _, name := range p.VaryNames {
144 | for _, v := range c.Varies[name].Headers {
145 | h.Add("vary", v)
146 | }
147 |
148 | for _, v := range c.Varies[name].Cookies {
149 | h.Add("vary", fmt.Sprintf("cookie:%s", v))
150 | }
151 | }
152 |
153 | if s == CachingStatusHit {
154 | age := int64(r.Age().Seconds())
155 | maxAge := int64(time.Duration(r.MaxAge).Seconds())
156 | cacheControl := []string{"public", fmt.Sprintf("s-maxage=%d", maxAge)}
157 |
158 | if r.Swr > 0 {
159 | swr := int64(time.Duration(r.Swr).Seconds())
160 | cacheControl = append(cacheControl, fmt.Sprintf("stale-while-revalidate=%d", swr))
161 | }
162 |
163 | h.Set("age", fmt.Sprintf("%d", age))
164 | h.Set("cache-control", strings.Join(cacheControl, "; "))
165 | h.Set("x-cache-hits", fmt.Sprintf("%d", r.HitTime))
166 | }
167 |
168 | if c.DebugHeaders {
169 | h.Set("x-debug-result-cache-key", p.queryResultCacheKey)
170 |
171 | if r == nil {
172 | return
173 | }
174 |
175 | if len(r.Tags.TypeKeys()) == 0 {
176 | h.Set("x-debug-result-missing-type-keys", "")
177 | }
178 |
179 | h.Set("x-debug-result-tags", strings.Join(r.Tags.ToSlice(), ", "))
180 | }
181 | }
182 |
183 | func (c *Caching) handleMutationRequest(w http.ResponseWriter, r *cachingRequest, h caddyhttp.HandlerFunc) (err error) {
184 | if !c.AutoInvalidate {
185 | return h(w, r.httpRequest)
186 | }
187 |
188 | bodyBuff := bufferPool.Get().(*bytes.Buffer)
189 | defer bufferPool.Put(bodyBuff)
190 | bodyBuff.Reset()
191 |
192 | crw := newCachingResponseWriter(bodyBuff)
193 | err = h(crw, r.httpRequest)
194 |
195 | if err != nil {
196 | return err
197 | }
198 |
199 | defer func() {
200 | err = crw.WriteResponse(w)
201 | }()
202 |
203 | mt, _, _ := mime.ParseMediaType(crw.Header().Get("content-type"))
204 |
205 | if crw.Status() != http.StatusOK || mt != "application/json" {
206 | return err
207 | }
208 |
209 | foundTags := make(cachingTags)
210 | tagAnalyzer := newCachingTagAnalyzer(r, c.TypeKeys)
211 |
212 | if aErr := tagAnalyzer.AnalyzeResult(crw.buffer.Bytes(), nil, foundTags); aErr != nil {
213 | c.logger.Info("fail to analyze result tags", zap.Error(aErr))
214 |
215 | return err
216 | }
217 |
218 | purgeTags := foundTags.TypeKeys().ToSlice()
219 |
220 | if len(purgeTags) == 0 {
221 | return err
222 | }
223 |
224 | if c.DebugHeaders {
225 | w.Header().Set("x-debug-purged-tags", strings.Join(purgeTags, "; "))
226 | }
227 |
228 | if err = c.purgeQueryResultByTags(c.ctxBackground, purgeTags); err != nil {
229 | c.logger.Error("fail to purge query result by tags", zap.Error(err))
230 | }
231 |
232 | return err
233 | }
234 |
--------------------------------------------------------------------------------
/caching_plan.go:
--------------------------------------------------------------------------------
1 | package gbox
2 |
3 | import (
4 | "bytes"
5 | "context"
6 | "encoding/json"
7 | "errors"
8 | "fmt"
9 |
10 | "github.com/caddyserver/caddy/v2"
11 | "github.com/eko/gocache/v2/store"
12 | "github.com/jensneuse/graphql-go-tools/pkg/astparser"
13 | "github.com/jensneuse/graphql-go-tools/pkg/astprinter"
14 | "github.com/jensneuse/graphql-go-tools/pkg/graphql"
15 | "github.com/jensneuse/graphql-go-tools/pkg/operationreport"
16 | "github.com/jensneuse/graphql-go-tools/pkg/pool"
17 | )
18 |
19 | var (
20 | cachingPlanCacheKeyPattern = "gbox_cp_%d"
21 | cachingQueryResultKeyPattern = "gbox_cqr_%d"
22 | )
23 |
24 | type cachingPlan struct {
25 | MaxAge caddy.Duration
26 | Swr caddy.Duration
27 | VaryNames []string
28 | Types map[string]struct{}
29 | RulesHash uint64
30 | VariesHash uint64
31 | Passthrough bool
32 |
33 | queryResultCacheKey string
34 | }
35 |
36 | type cachingPlanner struct {
37 | caching *Caching
38 | request *cachingRequest
39 | ctxBackground context.Context
40 | cacheKey string
41 | }
42 |
43 | func newCachingPlanner(r *cachingRequest, c *Caching) (*cachingPlanner, error) {
44 | hash := pool.Hash64.Get()
45 | defer pool.Hash64.Put(hash)
46 | hash.Reset()
47 | schemaHash, err := r.schema.Hash()
48 | if err != nil {
49 | return nil, err
50 | }
51 |
52 | hash.Write([]byte(fmt.Sprintf("schema=%d; ", schemaHash)))
53 |
54 | gqlRequestClone := *r.gqlRequest
55 | documentBuffer := bufferPool.Get().(*bytes.Buffer)
56 | defer bufferPool.Put(documentBuffer)
57 | documentBuffer.Reset()
58 |
59 | if _, err = gqlRequestClone.Print(documentBuffer); err != nil {
60 | return nil, err
61 | }
62 |
63 | document, _ := astparser.ParseGraphqlDocumentBytes(documentBuffer.Bytes())
64 | gqlRequestClone.Query, _ = astprinter.PrintString(&document, nil)
65 |
66 | if err = json.NewEncoder(hash).Encode(gqlRequestClone); err != nil {
67 | return nil, err
68 | }
69 |
70 | return &cachingPlanner{
71 | caching: c,
72 | request: r,
73 | ctxBackground: c.ctxBackground,
74 | cacheKey: fmt.Sprintf(cachingPlanCacheKeyPattern, hash.Sum64()),
75 | }, nil
76 | }
77 |
78 | func (c *Caching) getCachingPlan(r *cachingRequest) (plan *cachingPlan, err error) {
79 | var planner *cachingPlanner
80 | planner, err = newCachingPlanner(r, c)
81 |
82 | if err != nil {
83 | return nil, err
84 | }
85 |
86 | plan, err = planner.getPlan()
87 |
88 | if err != nil {
89 | return nil, err
90 | }
91 |
92 | return plan, nil
93 | }
94 |
95 | func (p *cachingPlanner) getPlan() (plan *cachingPlan, err error) {
96 | defer func() {
97 | if plan == nil {
98 | return
99 | }
100 |
101 | var queryResultCacheKey string
102 | queryResultCacheKey, err = p.calcQueryResultCacheKey(plan)
103 |
104 | if err == nil {
105 | plan.queryResultCacheKey = queryResultCacheKey
106 | } else {
107 | plan = nil
108 | }
109 | }()
110 |
111 | if plan, err = p.getCached(); err == nil {
112 | return plan, nil
113 | }
114 |
115 | plan, err = p.computePlan()
116 |
117 | if err != nil {
118 | return nil, err
119 | }
120 |
121 | if err = p.savePlan(plan); err != nil {
122 | return nil, err
123 | }
124 |
125 | return plan, err
126 | }
127 |
128 | func (p *cachingPlanner) getCached() (*cachingPlan, error) {
129 | ctx := p.request.httpRequest.Context()
130 | cachedPlan := new(cachingPlan)
131 |
132 | if _, err := p.caching.store.Get(ctx, p.cacheKey, cachedPlan); err != nil {
133 | return nil, err
134 | }
135 |
136 | if rulesHash, err := p.caching.Rules.hash(); err != nil {
137 | return nil, err
138 | } else if rulesHash != cachedPlan.RulesHash {
139 | return nil, errors.New("invalid checksum rules")
140 | }
141 |
142 | if variesHash, err := p.caching.Varies.hash(); err != nil {
143 | return nil, err
144 | } else if variesHash != cachedPlan.VariesHash {
145 | return nil, errors.New("invalid checksum varies")
146 | }
147 |
148 | return cachedPlan, nil
149 | }
150 |
151 | func (p *cachingPlanner) savePlan(plan *cachingPlan) error {
152 | ctx := p.request.httpRequest.Context()
153 | sh, _ := p.request.schema.Hash()
154 | tag := fmt.Sprintf(cachingTagSchemaHashPattern, sh)
155 |
156 | return p.caching.store.Set(ctx, p.cacheKey, plan, &store.Options{Tags: []string{tag}})
157 | }
158 |
159 | func (p *cachingPlanner) computePlan() (*cachingPlan, error) {
160 | types := make(map[string]struct{})
161 | var varyNames []string
162 | plan := &cachingPlan{
163 | Passthrough: true,
164 | }
165 | rulesHash, err := p.caching.Rules.hash()
166 | if err != nil {
167 | return nil, err
168 | }
169 |
170 | plan.RulesHash = rulesHash
171 | variesHash, err := p.caching.Varies.hash()
172 | if err != nil {
173 | return nil, err
174 | }
175 |
176 | plan.VariesHash = variesHash
177 | requestFieldTypes := make(graphql.RequestTypes)
178 | extractor := graphql.NewExtractor()
179 | extractor.ExtractFieldsFromRequest(p.request.gqlRequest, p.request.schema, &operationreport.Report{}, requestFieldTypes)
180 |
181 | for _, rule := range p.caching.Rules {
182 | if !p.matchRule(requestFieldTypes, rule) {
183 | continue
184 | }
185 |
186 | if plan.MaxAge == 0 || plan.MaxAge > rule.MaxAge {
187 | plan.MaxAge = rule.MaxAge
188 | }
189 |
190 | if plan.Swr == 0 || (plan.Swr > rule.Swr && rule.Swr > 0) {
191 | plan.Swr = rule.Swr
192 | }
193 |
194 | varyNames = append(varyNames, rule.Varies...)
195 |
196 | if rule.Types == nil {
197 | types = nil
198 | } else if types != nil {
199 | for typeName := range rule.Types {
200 | types[typeName] = struct{}{}
201 | }
202 | }
203 |
204 | plan.Passthrough = false
205 | }
206 |
207 | plan.VaryNames = varyNames
208 | plan.Types = types
209 |
210 | return plan, nil
211 | }
212 |
213 | func (p *cachingPlanner) matchRule(requestTypes graphql.RequestTypes, rule *CachingRule) bool {
214 | mainLoop:
215 | for name, fields := range rule.Types {
216 | compareFields, typeExist := requestTypes[name]
217 |
218 | if !typeExist {
219 | continue mainLoop
220 | }
221 |
222 | for field := range fields {
223 | if _, fieldExist := compareFields[field]; !fieldExist {
224 | continue mainLoop
225 | }
226 | }
227 |
228 | return true
229 | }
230 |
231 | return rule.Types == nil
232 | }
233 |
234 | func (p *cachingPlanner) calcQueryResultCacheKey(plan *cachingPlan) (string, error) {
235 | hash := pool.Hash64.Get()
236 | defer pool.Hash64.Put(hash)
237 | hash.Reset()
238 |
239 | hash.Write([]byte(fmt.Sprintf("%s;", p.cacheKey)))
240 |
241 | r := p.request.httpRequest
242 |
243 | for _, name := range plan.VaryNames {
244 | vary, ok := p.caching.Varies[name]
245 |
246 | if !ok {
247 | return "", fmt.Errorf("setting of vary %s does not exist in varies list given", vary)
248 | }
249 |
250 | for _, name := range vary.Headers {
251 | buffString := fmt.Sprintf("header:%s=%s;", name, r.Header.Get(name))
252 |
253 | if _, err := hash.Write([]byte(buffString)); err != nil {
254 | return "", err
255 | }
256 | }
257 |
258 | for _, name := range vary.Cookies {
259 | var value string
260 | cookie, err := r.Cookie(name)
261 |
262 | if err == nil {
263 | value = cookie.Value
264 | }
265 |
266 | buffString := fmt.Sprintf("cookie:%s=%s;", cookie, value)
267 |
268 | if _, err := hash.Write([]byte(buffString)); err != nil {
269 | return "", err
270 | }
271 | }
272 | }
273 |
274 | return fmt.Sprintf(cachingQueryResultKeyPattern, hash.Sum64()), nil
275 | }
276 |
--------------------------------------------------------------------------------
/caching_plan_test.go:
--------------------------------------------------------------------------------
1 | package gbox
2 |
3 | import (
4 | "net/http"
5 | "net/url"
6 | "strings"
7 | "testing"
8 |
9 | "github.com/caddyserver/caddy/v2"
10 | "github.com/jensneuse/graphql-go-tools/pkg/astparser"
11 | "github.com/jensneuse/graphql-go-tools/pkg/graphql"
12 | "github.com/stretchr/testify/require"
13 | )
14 |
15 | func TestComputeCachingPlan(t *testing.T) {
16 | u, _ := url.Parse("freecache://?cache_size=1000000")
17 | s, _ := NewCachingStore(u)
18 | c := &Caching{
19 | store: s,
20 | Rules: map[string]*CachingRule{
21 | "rule1": {
22 | MaxAge: 3,
23 | Swr: 10,
24 | },
25 | "rule2": {
26 | MaxAge: 10,
27 | Swr: 3,
28 | },
29 | "rule3": {
30 | Types: map[string]graphql.RequestFields{
31 | "Book": {},
32 | },
33 | MaxAge: 1,
34 | Swr: 1,
35 | },
36 | },
37 | }
38 | cr := newTestCachingRequest()
39 | planner, _ := newCachingPlanner(cr, c)
40 | require.NotNil(t, planner)
41 |
42 | p, pErr := planner.getPlan()
43 | require.NoError(t, pErr)
44 | require.Equal(t, p.MaxAge, caddy.Duration(3))
45 | require.Equal(t, p.Swr, caddy.Duration(3))
46 | }
47 |
48 | func newTestCachingRequest() *cachingRequest {
49 | s, _ := graphql.NewSchemaFromString(`
50 | type Query {
51 | users: [User!]!
52 | }
53 |
54 | type User {
55 | name: String!
56 | }
57 | `)
58 | s.Normalize()
59 |
60 | d, _ := astparser.ParseGraphqlDocumentBytes(s.Document())
61 | r, _ := http.NewRequest(
62 | "POST",
63 | "http://localhost:9090/graphql",
64 | strings.NewReader(`{"query": "query { users { name } }"}`),
65 | )
66 | gqlRequest := &graphql.Request{
67 | Query: `query GetUsers { users { name } }`,
68 | }
69 | gqlRequest.Normalize(s)
70 |
71 | cr := newCachingRequest(r, &d, s, gqlRequest)
72 |
73 | return cr
74 | }
75 |
--------------------------------------------------------------------------------
/caching_purger.go:
--------------------------------------------------------------------------------
1 | package gbox
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "strconv"
7 |
8 | "github.com/eko/gocache/v2/store"
9 | "github.com/jensneuse/graphql-go-tools/pkg/graphql"
10 | "github.com/pkg/errors"
11 | "go.uber.org/zap"
12 | )
13 |
14 | func (c *Caching) PurgeQueryResultBySchema(ctx context.Context, schema *graphql.Schema) error {
15 | hash, _ := schema.Hash()
16 | tag := fmt.Sprintf(cachingTagSchemaHashPattern, hash)
17 |
18 | return c.purgeQueryResultByTags(ctx, []string{tag})
19 | }
20 |
21 | func (c *Caching) PurgeQueryResultByOperationName(ctx context.Context, name string) error {
22 | return c.purgeQueryResultByTags(ctx, []string{fmt.Sprintf(cachingTagOperationPattern, name)})
23 | }
24 |
25 | func (c *Caching) PurgeQueryResultByTypeName(ctx context.Context, name string) error {
26 | return c.purgeQueryResultByTags(ctx, []string{fmt.Sprintf(cachingTagTypePattern, name)})
27 | }
28 |
29 | func (c *Caching) PurgeQueryResultByTypeField(ctx context.Context, typeName, fieldName string) error {
30 | return c.purgeQueryResultByTags(ctx, []string{fmt.Sprintf(cachingTagTypeFieldPattern, typeName, fieldName)})
31 | }
32 |
33 | func (c *Caching) PurgeQueryResultByTypeKey(ctx context.Context, typeName, fieldName string, value interface{}) error {
34 | var cacheKey string
35 |
36 | switch v := value.(type) {
37 | case string:
38 | cacheKey = fmt.Sprintf(cachingTagTypeKeyPattern, typeName, fieldName, v)
39 |
40 | return c.purgeQueryResultByTags(ctx, []string{cacheKey})
41 | case int:
42 | cacheKey = fmt.Sprintf(cachingTagTypeKeyPattern, typeName, fieldName, strconv.Itoa(v))
43 |
44 | return c.purgeQueryResultByTags(ctx, []string{cacheKey})
45 | default:
46 |
47 | return fmt.Errorf("only support purging type key value int or string, got %T", v)
48 | }
49 | }
50 |
51 | func (c *Caching) purgeQueryResultByTags(ctx context.Context, tags []string) error {
52 | var err error
53 |
54 | c.logger.Debug("purging query result by tags", zap.Strings("tags", tags))
55 |
56 | for _, t := range tags {
57 | // because store invalidate method will be stopped on first error,
58 | // so we need to invalidate tag by tag.
59 | if e := c.store.Invalidate(ctx, store.InvalidateOptions{Tags: []string{t}}); e != nil {
60 | if err == nil {
61 | err = e
62 | } else {
63 | err = errors.WithMessage(err, e.Error())
64 | }
65 | }
66 | }
67 |
68 | return err
69 | }
70 |
--------------------------------------------------------------------------------
/caching_purger_test.go:
--------------------------------------------------------------------------------
1 | package gbox
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "net/url"
7 | "testing"
8 |
9 | "github.com/eko/gocache/v2/store"
10 | "github.com/jensneuse/graphql-go-tools/pkg/graphql"
11 | "github.com/stretchr/testify/require"
12 | "go.uber.org/zap"
13 | )
14 |
15 | func TestCaching_PurgeQueryResultByOperationName(t *testing.T) {
16 | u, _ := url.Parse("freecache://?cache_size=1000000")
17 | s, _ := NewCachingStore(u)
18 | c := &Caching{
19 | store: s,
20 | logger: zap.NewNop(),
21 | }
22 | v := &struct{}{}
23 |
24 | _, err := c.store.Get(context.Background(), "test", v)
25 | require.Error(t, err)
26 |
27 | c.store.Set(context.Background(), "test", v, &store.Options{
28 | Tags: []string{fmt.Sprintf(cachingTagOperationPattern, "test")},
29 | })
30 |
31 | _, err = c.store.Get(context.Background(), "test", v)
32 |
33 | require.NoError(t, err)
34 | require.NoError(t, c.PurgeQueryResultByOperationName(context.Background(), "test"))
35 |
36 | _, err = c.store.Get(context.Background(), "test", v)
37 | require.Error(t, err)
38 | }
39 |
40 | func TestCaching_PurgeQueryResultBySchema(t *testing.T) {
41 | u, _ := url.Parse("freecache://?cache_size=1000000")
42 | s, _ := NewCachingStore(u)
43 | c := &Caching{
44 | store: s,
45 | logger: zap.NewNop(),
46 | }
47 | schema, _ := graphql.NewSchemaFromString(`
48 | type Query {
49 | test: String!
50 | }
51 | `)
52 | schema.Normalize()
53 | schemaHash, _ := schema.Hash()
54 | v := &struct{}{}
55 |
56 | _, err := c.store.Get(context.Background(), "test", v)
57 | require.Error(t, err)
58 |
59 | c.store.Set(context.Background(), "test", v, &store.Options{
60 | Tags: []string{fmt.Sprintf(cachingTagSchemaHashPattern, schemaHash)},
61 | })
62 |
63 | _, err = c.store.Get(context.Background(), "test", v)
64 |
65 | require.NoError(t, err)
66 | require.NoError(t, c.PurgeQueryResultBySchema(context.Background(), schema))
67 |
68 | _, err = c.store.Get(context.Background(), "test", v)
69 | require.Error(t, err)
70 | }
71 |
72 | func TestCaching_PurgeQueryResultByTypeKey(t *testing.T) {
73 | u, _ := url.Parse("freecache://?cache_size=1000000")
74 | s, _ := NewCachingStore(u)
75 | c := &Caching{
76 | store: s,
77 | logger: zap.NewNop(),
78 | }
79 | v := &struct{}{}
80 |
81 | _, err := c.store.Get(context.Background(), "test", v)
82 | require.Error(t, err)
83 |
84 | c.store.Set(context.Background(), "test", v, &store.Options{
85 | Tags: []string{fmt.Sprintf(cachingTagTypeKeyPattern, "a", "b", "c")},
86 | })
87 |
88 | _, err = c.store.Get(context.Background(), "test", v)
89 |
90 | require.NoError(t, err)
91 | require.NoError(t, c.PurgeQueryResultByTypeKey(context.Background(), "a", "b", "c"))
92 |
93 | _, err = c.store.Get(context.Background(), "test", v)
94 | require.Error(t, err)
95 | }
96 |
97 | func TestCaching_PurgeQueryResultByTypeField(t *testing.T) {
98 | u, _ := url.Parse("freecache://?cache_size=1000000")
99 | s, _ := NewCachingStore(u)
100 | c := &Caching{
101 | store: s,
102 | logger: zap.NewNop(),
103 | }
104 | v := &struct{}{}
105 |
106 | _, err := c.store.Get(context.Background(), "test", v)
107 | require.Error(t, err)
108 |
109 | c.store.Set(context.Background(), "test", v, &store.Options{
110 | Tags: []string{fmt.Sprintf(cachingTagTypeFieldPattern, "a", "b")},
111 | })
112 |
113 | _, err = c.store.Get(context.Background(), "test", v)
114 |
115 | require.NoError(t, err)
116 | require.NoError(t, c.PurgeQueryResultByTypeField(context.Background(), "a", "b"))
117 |
118 | _, err = c.store.Get(context.Background(), "test", v)
119 | require.Error(t, err)
120 | }
121 |
122 | func TestCaching_PurgeQueryResultByTypeName(t *testing.T) {
123 | u, _ := url.Parse("freecache://?cache_size=1000000")
124 | s, _ := NewCachingStore(u)
125 | c := &Caching{
126 | store: s,
127 | logger: zap.NewNop(),
128 | }
129 | v := &struct{}{}
130 |
131 | _, err := c.store.Get(context.Background(), "test", v)
132 | require.Error(t, err)
133 |
134 | c.store.Set(context.Background(), "test", v, &store.Options{
135 | Tags: []string{fmt.Sprintf(cachingTagTypePattern, "a")},
136 | })
137 |
138 | _, err = c.store.Get(context.Background(), "test", v)
139 |
140 | require.NoError(t, err)
141 | require.NoError(t, c.PurgeQueryResultByTypeName(context.Background(), "a"))
142 |
143 | _, err = c.store.Get(context.Background(), "test", v)
144 | require.Error(t, err)
145 | }
146 |
--------------------------------------------------------------------------------
/caching_request.go:
--------------------------------------------------------------------------------
1 | package gbox
2 |
3 | import (
4 | "net/http"
5 |
6 | "github.com/jensneuse/graphql-go-tools/pkg/ast"
7 | "github.com/jensneuse/graphql-go-tools/pkg/astnormalization"
8 | "github.com/jensneuse/graphql-go-tools/pkg/astparser"
9 | "github.com/jensneuse/graphql-go-tools/pkg/graphql"
10 | "github.com/pquerna/cachecontrol/cacheobject"
11 | )
12 |
13 | type cachingRequest struct {
14 | httpRequest *http.Request
15 | schema *graphql.Schema
16 | gqlRequest *graphql.Request
17 | definition, operation *ast.Document
18 | cacheControl *cacheobject.RequestCacheDirectives
19 | }
20 |
21 | func newCachingRequest(r *http.Request, d *ast.Document, s *graphql.Schema, gr *graphql.Request) *cachingRequest {
22 | cr := &cachingRequest{
23 | httpRequest: r,
24 | schema: s,
25 | definition: d,
26 | gqlRequest: gr,
27 | }
28 |
29 | cacheControlString := r.Header.Get("cache-control")
30 | cr.cacheControl, _ = cacheobject.ParseRequestCacheControl(cacheControlString)
31 |
32 | return cr
33 | }
34 |
35 | func (r *cachingRequest) initOperation() error {
36 | if r.operation != nil {
37 | return nil
38 | }
39 |
40 | operation, report := astparser.ParseGraphqlDocumentString(r.gqlRequest.Query)
41 |
42 | if report.HasErrors() {
43 | return &report
44 | }
45 |
46 | operation.Input.Variables = r.gqlRequest.Variables
47 | normalizer := astnormalization.NewWithOpts(
48 | astnormalization.WithExtractVariables(),
49 | astnormalization.WithRemoveFragmentDefinitions(),
50 | astnormalization.WithRemoveUnusedVariables(),
51 | )
52 |
53 | if r.gqlRequest.OperationName != "" {
54 | normalizer.NormalizeNamedOperation(&operation, r.definition, []byte(r.gqlRequest.OperationName), &report)
55 | } else {
56 | normalizer.NormalizeOperation(&operation, r.definition, &report)
57 | }
58 |
59 | if report.HasErrors() {
60 | return &report
61 | }
62 |
63 | r.operation = &operation
64 |
65 | return nil
66 | }
67 |
--------------------------------------------------------------------------------
/caching_responsewriter.go:
--------------------------------------------------------------------------------
1 | package gbox
2 |
3 | import (
4 | "bytes"
5 | "io"
6 | "net/http"
7 | )
8 |
9 | type cachingResponseWriter struct {
10 | header http.Header
11 | status int
12 | buffer *bytes.Buffer
13 | }
14 |
15 | func newCachingResponseWriter(buffer *bytes.Buffer) *cachingResponseWriter {
16 | return &cachingResponseWriter{
17 | header: make(http.Header),
18 | buffer: buffer,
19 | }
20 | }
21 |
22 | func (c *cachingResponseWriter) Status() int {
23 | return c.status
24 | }
25 |
26 | func (c *cachingResponseWriter) Header() http.Header {
27 | return c.header
28 | }
29 |
30 | func (c *cachingResponseWriter) Write(i []byte) (int, error) {
31 | return c.buffer.Write(i)
32 | }
33 |
34 | func (c *cachingResponseWriter) WriteHeader(statusCode int) {
35 | c.status = statusCode
36 | }
37 |
38 | func (c *cachingResponseWriter) WriteResponse(dst http.ResponseWriter) (err error) {
39 | for h, v := range c.Header() {
40 | dst.Header()[h] = v
41 | }
42 |
43 | dst.WriteHeader(c.Status())
44 |
45 | _, err = io.Copy(dst, c.buffer)
46 |
47 | return
48 | }
49 |
--------------------------------------------------------------------------------
/caching_result.go:
--------------------------------------------------------------------------------
1 | package gbox
2 |
3 | import (
4 | "context"
5 | "encoding/json"
6 | "net/http"
7 | "time"
8 |
9 | "github.com/caddyserver/caddy/v2"
10 | "github.com/eko/gocache/v2/store"
11 | "github.com/pquerna/cachecontrol/cacheobject"
12 | )
13 |
14 | const (
15 | CachingQueryResultStale cachingQueryResultStatus = "STALE"
16 | CachingQueryResultValid cachingQueryResultStatus = "VALID"
17 | )
18 |
19 | type cachingQueryResultStatus string
20 |
21 | type cachingQueryResult struct {
22 | Header http.Header
23 | Body json.RawMessage
24 | HitTime uint64
25 | CreatedAt time.Time
26 | Expiration time.Duration
27 | MaxAge caddy.Duration
28 | Swr caddy.Duration
29 | Tags cachingTags
30 |
31 | plan *cachingPlan
32 | }
33 |
34 | func (c *Caching) getCachingQueryResult(ctx context.Context, plan *cachingPlan) (*cachingQueryResult, error) {
35 | result := &cachingQueryResult{
36 | plan: plan,
37 | }
38 |
39 | if _, err := c.store.Get(ctx, plan.queryResultCacheKey, result); err != nil {
40 | return nil, err
41 | }
42 |
43 | return result, nil
44 | }
45 |
46 | func (c *Caching) cachingQueryResult(ctx context.Context, request *cachingRequest, plan *cachingPlan, body []byte, header http.Header) (err error) {
47 | tags := make(cachingTags)
48 | tagAnalyzer := newCachingTagAnalyzer(request, c.TypeKeys)
49 |
50 | if err = tagAnalyzer.AnalyzeResult(body, plan.Types, tags); err != nil {
51 | return err
52 | }
53 |
54 | result := &cachingQueryResult{
55 | Body: body,
56 | Header: header,
57 | CreatedAt: time.Now(),
58 | MaxAge: plan.MaxAge,
59 | Swr: plan.Swr,
60 | Tags: tags,
61 | Expiration: time.Duration(plan.MaxAge) + time.Duration(plan.Swr),
62 | }
63 |
64 | result.normalizeHeader()
65 |
66 | return c.store.Set(ctx, plan.queryResultCacheKey, result, &store.Options{
67 | Tags: tags.ToSlice(),
68 | Expiration: result.Expiration,
69 | })
70 | }
71 |
72 | func (c *Caching) increaseQueryResultHitTimes(ctx context.Context, r *cachingQueryResult) error {
73 | r.HitTime++
74 |
75 | return c.store.Set(ctx, r.plan.queryResultCacheKey, r, &store.Options{
76 | Expiration: r.Expiration - time.Since(r.CreatedAt),
77 | })
78 | }
79 |
80 | func (r *cachingQueryResult) Status() cachingQueryResultStatus {
81 | if time.Duration(r.MaxAge) >= r.Age() {
82 | return CachingQueryResultValid
83 | }
84 |
85 | return CachingQueryResultStale
86 | }
87 |
88 | // ValidFor check caching result still valid with cache control directives
89 | // https://datatracker.ietf.org/doc/html/rfc7234#section-5.2.1
90 | func (r *cachingQueryResult) ValidFor(cc *cacheobject.RequestCacheDirectives) bool {
91 | status := r.Status()
92 | age := r.Age()
93 |
94 | if cc.NoCache && status == CachingQueryResultStale {
95 | return false
96 | }
97 |
98 | if cc.MinFresh != -1 {
99 | maxAge := time.Duration(r.MaxAge)
100 | d := age + time.Duration(cc.MinFresh)*time.Second
101 |
102 | if d > maxAge {
103 | return false
104 | }
105 | }
106 |
107 | // max-age request
108 | if cc.MaxAge != -1 {
109 | d := time.Duration(cc.MaxAge) * time.Second
110 |
111 | if d >= age && status == CachingQueryResultValid {
112 | return true
113 | }
114 |
115 | // max-age with max-stale
116 | if (cc.MaxStaleSet || cc.MaxStale != -1) && status == CachingQueryResultStale {
117 | // client is willing to accept a stale response of any age.
118 | if cc.MaxStale == -1 {
119 | return true
120 | }
121 |
122 | d += time.Duration(cc.MaxStale) * time.Second
123 |
124 | return d >= age
125 | }
126 |
127 | return false
128 | }
129 |
130 | // max-stale only
131 | if cc.MaxStaleSet || cc.MaxStale != -1 {
132 | if cc.MaxStale == -1 || status == CachingQueryResultValid {
133 | return true
134 | }
135 |
136 | d := time.Duration(r.MaxAge) + time.Duration(cc.MaxStale)*time.Second
137 |
138 | return d >= age
139 | }
140 |
141 | return true
142 | }
143 |
144 | func (r *cachingQueryResult) Age() time.Duration {
145 | return time.Since(r.CreatedAt)
146 | }
147 |
148 | func (r *cachingQueryResult) normalizeHeader() {
149 | r.Header.Del("date")
150 | r.Header.Del("server")
151 | }
152 |
--------------------------------------------------------------------------------
/caching_rule.go:
--------------------------------------------------------------------------------
1 | package gbox
2 |
3 | import (
4 | "encoding/json"
5 |
6 | "github.com/caddyserver/caddy/v2"
7 | "github.com/jensneuse/graphql-go-tools/pkg/graphql"
8 | "github.com/jensneuse/graphql-go-tools/pkg/pool"
9 | )
10 |
11 | type CachingRule struct {
12 | // GraphQL type to cache
13 | // ex: `User` will cache all query results have type User
14 | // ex: `User { is_admin }` will cache all query results have type User and have field `is_admin`.
15 | // If not set this rule will match all types.
16 | Types graphql.RequestTypes `json:"types,omitempty"`
17 |
18 | // how long query results that match the rule types should be store.
19 | MaxAge caddy.Duration `json:"max_age,omitempty"`
20 |
21 | // how long stale query results that match the rule types should be served while fresh data is already being fetched in the background.
22 | Swr caddy.Duration `json:"swr,omitempty"`
23 |
24 | // Varies name apply to query results that match the rule types.
25 | // If not set query results will cache public.
26 | Varies []string `json:"varies,omitempty"`
27 | }
28 |
29 | type CachingRules map[string]*CachingRule
30 |
31 | func (rules CachingRules) hash() (uint64, error) {
32 | if rules == nil {
33 | return 0, nil
34 | }
35 |
36 | hash := pool.Hash64.Get()
37 | hash.Reset()
38 | defer pool.Hash64.Put(hash)
39 |
40 | if err := json.NewEncoder(hash).Encode(rules); err != nil {
41 | return 0, err
42 | }
43 |
44 | return hash.Sum64(), nil
45 | }
46 |
--------------------------------------------------------------------------------
/caching_rule_test.go:
--------------------------------------------------------------------------------
1 | package gbox
2 |
3 | import (
4 | "testing"
5 |
6 | "github.com/stretchr/testify/require"
7 | )
8 |
9 | func TestCachingRulesHash(t *testing.T) {
10 | var rules CachingRules
11 | hash, err := rules.hash()
12 |
13 | require.NoError(t, err)
14 | require.Equal(t, uint64(0), hash)
15 |
16 | rules = CachingRules{
17 | "default": &CachingRule{
18 | MaxAge: 1,
19 | Swr: 1,
20 | },
21 | }
22 |
23 | hash, err = rules.hash()
24 |
25 | require.NoError(t, err)
26 | require.Greater(t, hash, uint64(0))
27 | }
28 |
--------------------------------------------------------------------------------
/caching_store.go:
--------------------------------------------------------------------------------
1 | package gbox
2 |
3 | import (
4 | "errors"
5 | "fmt"
6 | "net/url"
7 | "strconv"
8 | "sync"
9 |
10 | "github.com/coocood/freecache"
11 | "github.com/eko/gocache/v2/marshaler"
12 | "github.com/eko/gocache/v2/store"
13 | "github.com/go-redis/redis/v8"
14 | )
15 |
16 | var (
17 | cachingStoreFactories = make(map[string]CachingStoreFactory)
18 | cachingStoreFactoriesMu sync.RWMutex
19 | )
20 |
21 | func init() { // nolint:gochecknoinits
22 | RegisterCachingStoreFactory("redis", RedisCachingStoreFactory)
23 | RegisterCachingStoreFactory("freecache", FreeCacheStoreFactory)
24 | }
25 |
26 | type CachingStore struct {
27 | *marshaler.Marshaler
28 | close func() error
29 | }
30 |
31 | type CachingStoreFactory = func(u *url.URL) (*CachingStore, error)
32 |
33 | func RegisterCachingStoreFactory(schema string, factory CachingStoreFactory) {
34 | cachingStoreFactoriesMu.Lock()
35 | defer cachingStoreFactoriesMu.Unlock()
36 |
37 | cachingStoreFactories[schema] = factory
38 | }
39 |
40 | func NewCachingStore(u *url.URL) (*CachingStore, error) {
41 | cachingStoreFactoriesMu.RLock()
42 | defer cachingStoreFactoriesMu.RUnlock()
43 | factory, ok := cachingStoreFactories[u.Scheme]
44 |
45 | if !ok {
46 | return nil, fmt.Errorf("caching store schema: %s is not support", u.Scheme)
47 | }
48 |
49 | return factory(u)
50 | }
51 |
52 | func FreeCacheStoreFactory(u *url.URL) (*CachingStore, error) {
53 | q := u.Query()
54 | cacheSize := q.Get("cache_size")
55 |
56 | if cacheSize == "" {
57 | return nil, errors.New("cache_size must be set explicit")
58 | }
59 |
60 | cacheSizeInt, err := strconv.Atoi(cacheSize)
61 | if err != nil {
62 | return nil, fmt.Errorf("`cache_size` param should be numeric string, %s given", cacheSize)
63 | }
64 |
65 | client := freecache.NewCache(cacheSizeInt)
66 | freeCacheStore := store.NewFreecache(client, nil)
67 |
68 | return &CachingStore{
69 | Marshaler: marshaler.New(freeCacheStore),
70 | close: func() error {
71 | client.Clear()
72 |
73 | return nil
74 | },
75 | }, nil
76 | }
77 |
78 | func RedisCachingStoreFactory(u *url.URL) (*CachingStore, error) {
79 | q := u.Query()
80 | opts := &redis.Options{
81 | Addr: u.Host,
82 | }
83 |
84 | if v := q.Get("db"); v != "" {
85 | db, err := strconv.Atoi(v)
86 | if err != nil {
87 | return nil, fmt.Errorf("`db` param should be numeric string, %s given", v)
88 | }
89 |
90 | opts.DB = db
91 | }
92 |
93 | user := u.User.Username()
94 | password, hasPassword := u.User.Password()
95 |
96 | if !hasPassword {
97 | opts.Password = user
98 | } else {
99 | opts.Username = user
100 | opts.Password = password
101 | }
102 |
103 | client := redis.NewClient(opts)
104 | redisStore := store.NewRedis(client, nil)
105 |
106 | return &CachingStore{
107 | Marshaler: marshaler.New(redisStore),
108 | close: func() error {
109 | return client.Close()
110 | },
111 | }, nil
112 | }
113 |
--------------------------------------------------------------------------------
/caching_store_test.go:
--------------------------------------------------------------------------------
1 | package gbox
2 |
3 | import (
4 | "net/url"
5 | "testing"
6 |
7 | "github.com/stretchr/testify/require"
8 | )
9 |
10 | func TestNewCachingStore(t *testing.T) {
11 | testCases := map[string]struct {
12 | url string
13 | expectedErrorMsg string
14 | }{
15 | "redis": {
16 | url: "redis://redis",
17 | },
18 | "freecache": {
19 | url: "freecache://?cache_size=1024",
20 | },
21 | "unknown": {
22 | url: "unknown://unknown",
23 | expectedErrorMsg: "caching store schema: unknown is not support",
24 | },
25 | }
26 |
27 | for name, testCase := range testCases {
28 | u, _ := url.Parse(testCase.url)
29 | _, err := NewCachingStore(u)
30 |
31 | if testCase.expectedErrorMsg == "" {
32 | require.NoErrorf(t, err, "case %s: unexpected error", name)
33 | } else {
34 | require.Errorf(t, err, "case %s: should be error", name)
35 | require.Equalf(t, testCase.expectedErrorMsg, err.Error(), "case %s: unexpected error message", name)
36 | }
37 | }
38 | }
39 |
40 | func TestFreeCacheStoreFactory(t *testing.T) {
41 | u, _ := url.Parse("freecache://?cache_size=1024")
42 | _, e := FreeCacheStoreFactory(u)
43 |
44 | require.NoError(t, e)
45 |
46 | u, _ = url.Parse("freecache://") // missing cache size
47 |
48 | _, e = NewCachingStore(u)
49 |
50 | require.Error(t, e)
51 | require.Equal(t, "cache_size must be set explicit", e.Error())
52 | }
53 |
54 | func TestRedisCachingStoreFactory(t *testing.T) {
55 | u, _ := url.Parse("redis://redis")
56 | _, e := RedisCachingStoreFactory(u)
57 |
58 | require.NoError(t, e)
59 |
60 | u, _ = url.Parse("redis://redis?db=xyz")
61 | _, e = RedisCachingStoreFactory(u)
62 |
63 | require.Error(t, e)
64 | require.Equal(t, "`db` param should be numeric string, xyz given", e.Error())
65 | }
66 |
--------------------------------------------------------------------------------
/caching_swr.go:
--------------------------------------------------------------------------------
1 | package gbox
2 |
3 | import (
4 | "bytes"
5 | "context"
6 | "fmt"
7 | "mime"
8 | "net/http"
9 |
10 | "github.com/caddyserver/caddy/v2"
11 | "github.com/caddyserver/caddy/v2/modules/caddyhttp"
12 | )
13 |
14 | func (c *Caching) swrQueryResult(ctx context.Context, result *cachingQueryResult, request *cachingRequest, handler caddyhttp.HandlerFunc) error {
15 | buff := bufferPool.Get().(*bytes.Buffer)
16 | defer bufferPool.Put(buff)
17 | buff.Reset()
18 | rw := newCachingResponseWriter(buff)
19 |
20 | if err := handler(rw, request.httpRequest); err != nil {
21 | return err
22 | }
23 |
24 | ct := rw.Header().Get("content-type")
25 | mt, _, _ := mime.ParseMediaType(ct)
26 |
27 | if rw.Status() != http.StatusOK || mt != "application/json" {
28 | return fmt.Errorf("getting invalid response from upstream, status: %d, content-type: %s", rw.Status(), ct)
29 | }
30 |
31 | if err := c.cachingQueryResult(ctx, request, result.plan, buff.Bytes(), rw.Header()); err != nil {
32 | return err
33 | }
34 |
35 | return nil
36 | }
37 |
38 | func prepareSwrHTTPRequest(ctx context.Context, r *http.Request, w http.ResponseWriter) *http.Request {
39 | s := r.Context().Value(caddyhttp.ServerCtxKey).(*caddyhttp.Server)
40 |
41 | return caddyhttp.PrepareRequest(r.Clone(ctx), caddy.NewReplacer(), w, s)
42 | }
43 |
--------------------------------------------------------------------------------
/caching_tag.go:
--------------------------------------------------------------------------------
1 | package gbox
2 |
3 | import (
4 | "encoding/json"
5 | "errors"
6 | "fmt"
7 | "sort"
8 | "strconv"
9 | "strings"
10 |
11 | "github.com/jensneuse/graphql-go-tools/pkg/astvisitor"
12 | "github.com/jensneuse/graphql-go-tools/pkg/graphql"
13 | "github.com/jensneuse/graphql-go-tools/pkg/operationreport"
14 | )
15 |
16 | const (
17 | cachingTagSchemaHashPrefix = "schema:"
18 | cachingTagSchemaHashPattern = cachingTagSchemaHashPrefix + "%d"
19 | cachingTagTypeFieldPrefix = "field:"
20 | cachingTagTypeFieldPattern = cachingTagTypeFieldPrefix + "%s:%s"
21 | cachingTagTypePrefix = "type:"
22 | cachingTagTypePattern = cachingTagTypePrefix + "%s"
23 | cachingTagTypeKeyPrefix = "key:"
24 | cachingTagTypeKeyPattern = cachingTagTypeKeyPrefix + "%s:%s:%s"
25 | cachingTagOperationPrefix = "operation:"
26 | cachingTagOperationPattern = cachingTagOperationPrefix + "%s"
27 | )
28 |
29 | type cachingTagVisitor struct {
30 | *cachingTagAnalyzer
31 | *astvisitor.Walker
32 | data map[string]interface{}
33 | tags cachingTags
34 | onlyTypes map[string]struct{}
35 | }
36 |
37 | func (c *cachingTagVisitor) EnterField(ref int) {
38 | operation, definition := c.request.operation, c.request.definition
39 | fieldName := operation.FieldNameString(ref)
40 | typeName := definition.NodeNameString(c.EnclosingTypeDefinition)
41 |
42 | if c.onlyTypes != nil && typeName != c.request.schema.QueryTypeName() && typeName != c.request.schema.MutationTypeName() {
43 | if _, exist := c.onlyTypes[typeName]; !exist {
44 | return
45 | }
46 | }
47 |
48 | c.addTagForType(typeName)
49 | c.addTagForTypeField(typeName, fieldName)
50 |
51 | keys, ok := c.typeKeys[typeName]
52 |
53 | if !ok {
54 | keys = graphql.RequestFields{
55 | "id": struct{}{},
56 | }
57 | }
58 |
59 | path := make([]string, 0)
60 |
61 | for _, p := range c.Path[1:] {
62 | path = append(path, p.FieldName.String())
63 | }
64 |
65 | path = append(path, operation.FieldAliasOrNameString(ref))
66 |
67 | for key := range keys {
68 | if key == fieldName {
69 | c.collectTypeKeyTags(path, c.data, typeName)
70 |
71 | break
72 | }
73 | }
74 | }
75 |
76 | func (c *cachingTagVisitor) addTagForTypeField(typeName, fieldName string) {
77 | c.tags[fmt.Sprintf(cachingTagTypeFieldPattern, typeName, fieldName)] = struct{}{}
78 | }
79 |
80 | func (c *cachingTagVisitor) addTagForType(typeName string) {
81 | c.tags[fmt.Sprintf(cachingTagTypePattern, typeName)] = struct{}{}
82 | }
83 |
84 | func (c *cachingTagVisitor) addTagForTypeKey(field string, value interface{}, typeName string) {
85 | switch v := value.(type) {
86 | case string:
87 | c.tags[fmt.Sprintf(cachingTagTypeKeyPattern, typeName, field, v)] = struct{}{}
88 | case float64:
89 | c.tags[fmt.Sprintf(cachingTagTypeKeyPattern, typeName, field, strconv.FormatInt(int64(v), 10))] = struct{}{}
90 | default:
91 | c.Walker.StopWithInternalErr(fmt.Errorf("invalid type key of %s.%s only accept string or numeric but got: %T", typeName, field, v))
92 | }
93 | }
94 |
95 | func (c *cachingTagVisitor) collectTypeKeyTags(path []string, data interface{}, typeName string) {
96 | at := path[0]
97 |
98 | if next := path[1:]; len(next) > 0 {
99 | switch v := data.(type) {
100 | case []interface{}:
101 | for _, item := range v {
102 | c.collectTypeKeyTags(path, item, typeName)
103 | }
104 | case map[string]interface{}:
105 | if item, ok := v[at]; ok {
106 | c.collectTypeKeyTags(next, item, typeName)
107 | }
108 | default:
109 | // skip in cases field value's null
110 | }
111 |
112 | return
113 | }
114 |
115 | switch v := data.(type) {
116 | case []interface{}:
117 | for _, item := range v {
118 | c.collectTypeKeyTags(path, item, typeName)
119 | }
120 | case map[string]interface{}:
121 | c.addTagForTypeKey(at, v[at], typeName)
122 | default:
123 | c.Walker.StopWithInternalErr(fmt.Errorf("invalid data type expected map or array map but got %T", v))
124 | }
125 | }
126 |
127 | type cachingTags map[string]struct{}
128 |
129 | type cachingTagAnalyzer struct {
130 | request *cachingRequest
131 | typeKeys graphql.RequestTypes
132 | }
133 |
134 | func newCachingTagAnalyzer(r *cachingRequest, t graphql.RequestTypes) *cachingTagAnalyzer {
135 | return &cachingTagAnalyzer{r, t}
136 | }
137 |
138 | func (c *cachingTagAnalyzer) AnalyzeResult(result []byte, onlyTypes map[string]struct{}, tags cachingTags) (err error) {
139 | normalizedQueryResult := &struct {
140 | Data map[string]interface{} `json:"data,omitempty"`
141 | }{}
142 |
143 | if err = json.Unmarshal(result, normalizedQueryResult); err != nil {
144 | return err
145 | }
146 |
147 | if normalizedQueryResult.Data == nil || len(normalizedQueryResult.Data) == 0 {
148 | return errors.New("query result: `data` field missing")
149 | }
150 |
151 | if err = c.request.initOperation(); err != nil {
152 | return err
153 | }
154 |
155 | report := &operationreport.Report{}
156 | walker := astvisitor.NewWalker(48)
157 | visitor := &cachingTagVisitor{
158 | cachingTagAnalyzer: c,
159 | Walker: &walker,
160 | data: normalizedQueryResult.Data,
161 | tags: tags,
162 | onlyTypes: onlyTypes,
163 | }
164 |
165 | walker.RegisterEnterFieldVisitor(visitor)
166 | walker.Walk(c.request.operation, c.request.definition, report)
167 |
168 | if report.HasErrors() {
169 | return report
170 | }
171 |
172 | schemaHash, _ := c.request.schema.Hash()
173 | schemaHashTag := fmt.Sprintf(cachingTagSchemaHashPattern, schemaHash)
174 | operationTag := fmt.Sprintf(cachingTagOperationPattern, c.request.gqlRequest.OperationName)
175 | tags[schemaHashTag] = struct{}{}
176 | tags[operationTag] = struct{}{}
177 |
178 | return nil
179 | }
180 |
181 | func (t cachingTags) ToSlice() []string {
182 | s := make([]string, 0)
183 |
184 | for item := range t {
185 | s = append(s, item)
186 | }
187 |
188 | sort.Strings(s)
189 |
190 | return s
191 | }
192 |
193 | func (t cachingTags) TypeKeys() cachingTags {
194 | return t.filterWithPrefix(cachingTagTypeKeyPrefix)
195 | }
196 |
197 | func (t cachingTags) Types() cachingTags {
198 | return t.filterWithPrefix(cachingTagTypePrefix)
199 | }
200 |
201 | func (t cachingTags) TypeFields() cachingTags {
202 | return t.filterWithPrefix(cachingTagTypeFieldPrefix)
203 | }
204 |
205 | func (t cachingTags) SchemaHash() cachingTags {
206 | return t.filterWithPrefix(cachingTagSchemaHashPrefix)
207 | }
208 |
209 | func (t cachingTags) Operation() cachingTags {
210 | return t.filterWithPrefix(cachingTagOperationPrefix)
211 | }
212 |
213 | func (t cachingTags) filterWithPrefix(prefix string) cachingTags {
214 | keys := make(cachingTags)
215 |
216 | for tag := range t {
217 | if strings.HasPrefix(tag, prefix) {
218 | keys[tag] = struct{}{}
219 | }
220 | }
221 |
222 | return keys
223 | }
224 |
--------------------------------------------------------------------------------
/caching_tag_test.go:
--------------------------------------------------------------------------------
1 | package gbox
2 |
3 | import (
4 | "fmt"
5 | "testing"
6 |
7 | "github.com/jensneuse/graphql-go-tools/pkg/graphql"
8 | "github.com/stretchr/testify/require"
9 | )
10 |
11 | func TestCachingTagAnalyzer_AnalyzeResult_WithoutTypeKeys(t *testing.T) {
12 | cr := newTestCachingRequest()
13 | tags := make(cachingTags)
14 | analyzer := newCachingTagAnalyzer(cr, nil)
15 | err := analyzer.AnalyzeResult([]byte(`{"data": {"users":[{"name":"A"}]}}`), nil, tags)
16 | sh, _ := cr.schema.Hash()
17 |
18 | require.NoError(t, err)
19 | require.Equal(t, tags.Types().ToSlice(), []string{"type:Query", "type:User"})
20 | require.Equal(t, []string{fmt.Sprintf(cachingTagSchemaHashPattern, sh)}, tags.SchemaHash().ToSlice())
21 | require.Equal(t, tags.TypeFields().ToSlice(), []string{"field:Query:users", "field:User:name"})
22 | require.Equal(t, tags.TypeKeys().ToSlice(), []string{})
23 | require.Equal(t, tags.Operation().ToSlice(), []string{fmt.Sprintf(cachingTagOperationPattern, cr.gqlRequest.OperationName)})
24 | }
25 |
26 | func TestCachingTagAnalyzer_AnalyzeResult_WithTypeKeys(t *testing.T) {
27 | cr := newTestCachingRequest()
28 | tags := make(cachingTags)
29 | analyzer := newCachingTagAnalyzer(cr, graphql.RequestTypes{
30 | "User": graphql.RequestFields{
31 | "name": struct{}{},
32 | },
33 | })
34 | err := analyzer.AnalyzeResult([]byte(`{"data": {"users":[{"name":"A"}]}}`), nil, tags)
35 | sh, _ := cr.schema.Hash()
36 |
37 | require.NoError(t, err)
38 | require.Equal(t, tags.Types().ToSlice(), []string{"type:Query", "type:User"})
39 | require.Equal(t, []string{fmt.Sprintf(cachingTagSchemaHashPattern, sh)}, tags.SchemaHash().ToSlice())
40 | require.Equal(t, tags.TypeFields().ToSlice(), []string{"field:Query:users", "field:User:name"})
41 | require.Equal(t, tags.TypeKeys().ToSlice(), []string{"key:User:name:A"})
42 | require.Equal(t, tags.Operation().ToSlice(), []string{fmt.Sprintf(cachingTagOperationPattern, cr.gqlRequest.OperationName)})
43 | }
44 |
45 | func TestCachingTagAnalyzer_AnalyzeResult_OnlyTypes(t *testing.T) {
46 | cr := newTestCachingRequest()
47 | tags := make(cachingTags)
48 | analyzer := newCachingTagAnalyzer(cr, graphql.RequestTypes{
49 | "User": graphql.RequestFields{
50 | "name": struct{}{},
51 | },
52 | })
53 | err := analyzer.AnalyzeResult(
54 | []byte(`{"data": {"users":[{"name":"A"}]}}`),
55 | map[string]struct{}{"Unknown": {}},
56 | tags,
57 | )
58 | sh, _ := cr.schema.Hash()
59 |
60 | require.NoError(t, err)
61 | require.Equal(t, tags.Types().ToSlice(), []string{"type:Query"})
62 | require.Equal(t, []string{fmt.Sprintf(cachingTagSchemaHashPattern, sh)}, tags.SchemaHash().ToSlice())
63 | require.Equal(t, tags.TypeFields().ToSlice(), []string{"field:Query:users"})
64 | require.Equal(t, tags.TypeKeys().ToSlice(), []string{})
65 | require.Equal(t, tags.Operation().ToSlice(), []string{fmt.Sprintf(cachingTagOperationPattern, cr.gqlRequest.OperationName)})
66 | }
67 |
--------------------------------------------------------------------------------
/caching_test.go:
--------------------------------------------------------------------------------
1 | package gbox
2 |
3 | import (
4 | "context"
5 | "testing"
6 |
7 | "github.com/caddyserver/caddy/v2"
8 | "github.com/stretchr/testify/require"
9 | "go.uber.org/zap"
10 | )
11 |
12 | func TestCaching_Cleanup(t *testing.T) {
13 | ctx, cancel := context.WithCancel(context.Background())
14 | c := &Caching{
15 | ctxBackground: ctx,
16 | ctxBackgroundCancel: cancel,
17 | StoreDsn: "test",
18 | logger: zap.NewNop(),
19 | }
20 |
21 | _, loaded := cachingStores.LoadOrStore(c.StoreDsn, "b")
22 | require.False(t, loaded)
23 |
24 | require.NoError(t, ctx.Err())
25 | require.NoError(t, c.Cleanup())
26 | require.Error(t, ctx.Err())
27 |
28 | _, loaded = cachingStores.LoadOrStore(c.StoreDsn, "b")
29 | require.False(t, loaded)
30 | }
31 |
32 | func TestCaching_Validate(t *testing.T) {
33 | testCases := map[string]struct {
34 | caching *Caching
35 | expectedErrorMsg string
36 | }{
37 | "valid_rules_without_varies": {
38 | caching: &Caching{
39 | Rules: CachingRules{
40 | "default": &CachingRule{
41 | MaxAge: 1,
42 | },
43 | },
44 | },
45 | },
46 | "valid_rules_with_varies": {
47 | caching: &Caching{
48 | Varies: map[string]*CachingVary{
49 | "test": {},
50 | },
51 | Rules: CachingRules{
52 | "default": &CachingRule{
53 | MaxAge: 1,
54 | Varies: []string{"test"},
55 | },
56 | },
57 | },
58 | },
59 | "invalid_rules_max_age": {
60 | expectedErrorMsg: "caching rule default, max age must greater than zero",
61 | caching: &Caching{
62 | Rules: CachingRules{
63 | "default": &CachingRule{},
64 | },
65 | },
66 | },
67 | "rules_vary_name_not_exist": {
68 | expectedErrorMsg: "caching rule default, configured vary: test does not exist",
69 | caching: &Caching{
70 | Rules: CachingRules{
71 | "default": &CachingRule{
72 | MaxAge: 1,
73 | Varies: []string{"test"},
74 | },
75 | },
76 | },
77 | },
78 | }
79 |
80 | for name, testCase := range testCases {
81 | err := testCase.caching.Validate()
82 |
83 | if testCase.expectedErrorMsg != "" {
84 | require.Errorf(t, err, "case %s: expected error but not", name)
85 | require.Equalf(t, testCase.expectedErrorMsg, err.Error(), "case %s: unexpected error message", name)
86 | } else {
87 | require.NoErrorf(t, err, "case %s: should not error", name)
88 | }
89 | }
90 | }
91 |
92 | func TestCaching_Provision(t *testing.T) {
93 | c := &Caching{
94 | StoreDsn: "redis://test",
95 | }
96 |
97 | require.NoError(t, c.Provision(caddy.Context{}))
98 | require.NotNil(t, c.store)
99 | require.NotNil(t, c.ctxBackground)
100 | require.NotNil(t, c.ctxBackgroundCancel)
101 | }
102 |
--------------------------------------------------------------------------------
/caching_vary.go:
--------------------------------------------------------------------------------
1 | package gbox
2 |
3 | import (
4 | "encoding/json"
5 |
6 | "github.com/jensneuse/graphql-go-tools/pkg/pool"
7 | )
8 |
9 | // CachingVary using to compute query result cache key by http request cookies and headers.
10 | type CachingVary struct {
11 | // Headers names for identifier query result cache key.
12 | Headers []string `json:"headers,omitempty"`
13 |
14 | // Cookies names for identifier query result cache key.
15 | Cookies []string `json:"cookies,omitempty"`
16 | }
17 |
18 | type CachingVaries map[string]*CachingVary
19 |
20 | func (varies CachingVaries) hash() (uint64, error) {
21 | if varies == nil {
22 | return 0, nil
23 | }
24 |
25 | hash := pool.Hash64.Get()
26 | hash.Reset()
27 | defer pool.Hash64.Put(hash)
28 |
29 | if err := json.NewEncoder(hash).Encode(varies); err != nil {
30 | return 0, err
31 | }
32 |
33 | return hash.Sum64(), nil
34 | }
35 |
--------------------------------------------------------------------------------
/caching_vary_test.go:
--------------------------------------------------------------------------------
1 | package gbox
2 |
3 | import (
4 | "testing"
5 |
6 | "github.com/stretchr/testify/require"
7 | )
8 |
9 | func TestCachingVariesHash(t *testing.T) {
10 | var varies CachingVaries
11 | hash, err := varies.hash()
12 |
13 | require.NoError(t, err)
14 | require.Equal(t, uint64(0), hash)
15 |
16 | varies = CachingVaries{
17 | "default": &CachingVary{
18 | Cookies: []string{"session"},
19 | },
20 | }
21 |
22 | hash, err = varies.hash()
23 |
24 | require.NoError(t, err)
25 | require.Greater(t, hash, uint64(0))
26 | }
27 |
--------------------------------------------------------------------------------
/caddyfile.go:
--------------------------------------------------------------------------------
1 | package gbox
2 |
3 | import (
4 | "errors"
5 | "fmt"
6 | "net/url"
7 | "strconv"
8 | "time"
9 |
10 | "github.com/caddyserver/caddy/v2"
11 | "github.com/caddyserver/caddy/v2/caddyconfig"
12 | "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
13 | "github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile"
14 | "github.com/caddyserver/caddy/v2/modules/caddyhttp"
15 | "github.com/caddyserver/caddy/v2/modules/caddyhttp/reverseproxy"
16 | "github.com/caddyserver/caddy/v2/modules/caddyhttp/rewrite"
17 | )
18 |
19 | func init() { // nolint:gochecknoinits
20 | httpcaddyfile.RegisterHandlerDirective("gbox", parseCaddyfile)
21 | }
22 |
23 | func parseCaddyfile(h httpcaddyfile.Helper) (caddyhttp.MiddlewareHandler, error) { // nolint:ireturn
24 | m := new(Handler).CaddyModule().New().(*Handler)
25 |
26 | if err := m.UnmarshalCaddyfile(h.Dispenser); err != nil {
27 | return nil, err
28 | }
29 |
30 | if m.Upstream == "" {
31 | return nil, errors.New("upstream url must be set")
32 | }
33 |
34 | return m, nil
35 | }
36 |
37 | // nolint:funlen,gocyclo
38 | func (h *Handler) UnmarshalCaddyfile(d *caddyfile.Dispenser) (err error) {
39 | for d.Next() {
40 | for d.NextBlock(0) {
41 | switch d.Val() {
42 | case "upstream":
43 | if h.Upstream != "" {
44 | return d.Err("upstream already specified")
45 | }
46 |
47 | if !d.NextArg() {
48 | return d.ArgErr()
49 | }
50 |
51 | val := d.Val()
52 | var u *url.URL
53 | var tokens []caddyfile.Token
54 | u, err = url.Parse(val)
55 |
56 | if err != nil {
57 | return err
58 | }
59 |
60 | r := &rewrite.Rewrite{URI: u.RequestURI()}
61 | rp := &reverseproxy.Handler{}
62 | rpPattern := `
63 | reverse_proxy {
64 | to %s://%s
65 | header_up Host {upstream_hostport}
66 | }
67 | `
68 | rpConfig := fmt.Sprintf(rpPattern, u.Scheme, u.Host)
69 | tokens, err = caddyfile.Tokenize([]byte(rpConfig), "")
70 |
71 | if err != nil {
72 | return err
73 | }
74 |
75 | err = rp.UnmarshalCaddyfile(caddyfile.NewDispenser(tokens))
76 |
77 | if err != nil {
78 | return err
79 | }
80 |
81 | // unmarshal again to add extra reverse proxy config
82 | err = rp.UnmarshalCaddyfile(d.NewFromNextSegment())
83 |
84 | if err != nil {
85 | return err
86 | }
87 |
88 | h.Upstream = val
89 | h.RewriteRaw = caddyconfig.JSONModuleObject(r, "rewrite", "rewrite", nil)
90 | h.ReverseProxyRaw = caddyconfig.JSONModuleObject(rp, "reverse_proxy", "reverse_proxy", nil)
91 | case "disabled_introspection":
92 | if !d.NextArg() {
93 | return d.ArgErr()
94 | }
95 |
96 | var disabled bool
97 | disabled, err = strconv.ParseBool(d.Val())
98 |
99 | if err != nil {
100 | return err
101 | }
102 |
103 | h.DisabledIntrospection = disabled
104 | case "fetch_schema_interval":
105 | if !d.NextArg() {
106 | return d.ArgErr()
107 | }
108 |
109 | var dt time.Duration
110 | dt, err = caddy.ParseDuration(d.Val())
111 |
112 | if err != nil {
113 | return err
114 | }
115 |
116 | h.FetchSchemaInterval = caddy.Duration(dt)
117 | case "fetch_schema_timeout":
118 | if !d.NextArg() {
119 | return d.ArgErr()
120 | }
121 |
122 | var dt time.Duration
123 | dt, err = caddy.ParseDuration(d.Val())
124 |
125 | if err != nil {
126 | return err
127 | }
128 |
129 | h.FetchSchemaTimeout = caddy.Duration(dt)
130 | case "fetch_schema_header":
131 | if !d.NextArg() {
132 | return d.ArgErr()
133 | }
134 |
135 | name := d.Val()
136 |
137 | if !d.NextArg() {
138 | return d.ArgErr()
139 | }
140 |
141 | h.FetchSchemaHeader.Add(name, d.Val())
142 | case "complexity":
143 | if h.Complexity != nil {
144 | return d.Err("complexity already specified")
145 | }
146 |
147 | if err = h.unmarshalCaddyfileComplexity(d.NewFromNextSegment()); err != nil {
148 | return err
149 | }
150 | case "caching":
151 | if h.Caching != nil {
152 | return d.Err("caching already specified")
153 | }
154 |
155 | if err = h.unmarshalCaddyfileCaching(d.NewFromNextSegment()); err != nil {
156 | return err
157 | }
158 | case "disabled_playgrounds":
159 | if !d.NextArg() {
160 | return d.ArgErr()
161 | }
162 |
163 | var disabled bool
164 | disabled, err = strconv.ParseBool(d.Val())
165 |
166 | if err != nil {
167 | return err
168 | }
169 |
170 | h.DisabledPlaygrounds = disabled
171 | case "cors_origins":
172 | origins := d.RemainingArgs()
173 |
174 | if len(origins) == 0 {
175 | return d.ArgErr()
176 | }
177 |
178 | h.CORSOrigins = origins
179 | case "cors_allowed_headers":
180 | headers := d.RemainingArgs()
181 |
182 | if len(headers) == 0 {
183 | return d.ArgErr()
184 | }
185 |
186 | h.CORSAllowedHeaders = headers
187 | default:
188 | return d.Errf("unrecognized subdirective %s", d.Val())
189 | }
190 | }
191 | }
192 |
193 | return err
194 | }
195 |
196 | // Interface guards.
197 | var (
198 | _ caddyfile.Unmarshaler = (*Handler)(nil)
199 | )
200 |
--------------------------------------------------------------------------------
/caddyfile_caching.go:
--------------------------------------------------------------------------------
1 | package gbox
2 |
3 | import (
4 | "fmt"
5 | "net/url"
6 | "strconv"
7 |
8 | "github.com/caddyserver/caddy/v2"
9 | "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
10 | "github.com/jensneuse/graphql-go-tools/pkg/graphql"
11 | )
12 |
13 | func (h *Handler) unmarshalCaddyfileCaching(d *caddyfile.Dispenser) error {
14 | var disabled bool
15 | caching := new(Caching)
16 |
17 | for d.Next() {
18 | for d.NextBlock(0) {
19 | switch d.Val() {
20 | case "enabled":
21 | if !d.NextArg() {
22 | return d.ArgErr()
23 | }
24 |
25 | val, err := strconv.ParseBool(d.Val())
26 | if err != nil {
27 | return err
28 | }
29 |
30 | disabled = !val
31 | case "store_dsn":
32 | if !d.NextArg() {
33 | return d.ArgErr()
34 | }
35 |
36 | _, err := url.Parse(d.Val())
37 | if err != nil {
38 | return err
39 | }
40 |
41 | caching.StoreDsn = d.Val()
42 | case "rules":
43 | if err := caching.unmarshalCaddyfileRules(d.NewFromNextSegment()); err != nil {
44 | return err
45 | }
46 | case "varies":
47 | if err := caching.unmarshalCaddyfileVaries(d.NewFromNextSegment()); err != nil {
48 | return err
49 | }
50 | case "type_keys":
51 | if err := caching.unmarshalCaddyfileTypeKeys(d.NewFromNextSegment()); err != nil {
52 | return err
53 | }
54 | case "auto_invalidate_cache":
55 | if !d.NextArg() {
56 | return d.ArgErr()
57 | }
58 |
59 | val, err := strconv.ParseBool(d.Val())
60 | if err != nil {
61 | return err
62 | }
63 |
64 | caching.AutoInvalidate = val
65 | case "debug_headers":
66 | if !d.NextArg() {
67 | return d.ArgErr()
68 | }
69 |
70 | val, err := strconv.ParseBool(d.Val())
71 | if err != nil {
72 | return err
73 | }
74 |
75 | caching.DebugHeaders = val
76 | default:
77 | return d.Errf("unrecognized subdirective %s", d.Val())
78 | }
79 | }
80 | }
81 |
82 | if !disabled {
83 | h.Caching = caching
84 | }
85 |
86 | return nil
87 | }
88 |
89 | func (c *Caching) unmarshalCaddyfileRules(d *caddyfile.Dispenser) error {
90 | rules := make(CachingRules)
91 |
92 | for d.Next() {
93 | for d.NextBlock(0) {
94 | rule := new(CachingRule)
95 | desc := d.Val()
96 |
97 | for subNesting := d.Nesting(); d.NextBlock(subNesting); {
98 | switch d.Val() {
99 | case "types":
100 | if err := rule.unmarshalCaddyfileTypes(d.NewFromNextSegment()); err != nil {
101 | return err
102 | }
103 | case "max_age":
104 | if !d.NextArg() {
105 | return d.ArgErr()
106 | }
107 |
108 | v, err := caddy.ParseDuration(d.Val())
109 | if err != nil {
110 | return err
111 | }
112 |
113 | rule.MaxAge = caddy.Duration(v)
114 | case "swr":
115 | if !d.NextArg() {
116 | return d.ArgErr()
117 | }
118 |
119 | v, err := caddy.ParseDuration(d.Val())
120 | if err != nil {
121 | return err
122 | }
123 |
124 | rule.Swr = caddy.Duration(v)
125 | case "varies":
126 | args := d.RemainingArgs()
127 |
128 | if len(args) == 0 {
129 | return d.ArgErr()
130 | }
131 |
132 | rule.Varies = args
133 | default:
134 | return d.Errf("unrecognized subdirective %s", d.Val())
135 | }
136 | }
137 |
138 | rules[desc] = rule
139 | }
140 | }
141 |
142 | c.Rules = rules
143 |
144 | return nil
145 | }
146 |
147 | func (r *CachingRule) unmarshalCaddyfileTypes(d *caddyfile.Dispenser) error {
148 | types := make(graphql.RequestTypes)
149 |
150 | for d.Next() {
151 | for d.NextBlock(0) {
152 | val := d.Val()
153 |
154 | if _, ok := types[val]; ok {
155 | return fmt.Errorf("%s already specific", d.Val())
156 | }
157 |
158 | fields := map[string]struct{}{}
159 | args := d.RemainingArgs()
160 |
161 | for _, arg := range args {
162 | fields[arg] = struct{}{}
163 | }
164 |
165 | types[val] = fields
166 | }
167 | }
168 |
169 | r.Types = types
170 |
171 | return nil
172 | }
173 |
174 | func (c *Caching) unmarshalCaddyfileVaries(d *caddyfile.Dispenser) error {
175 | varies := make(CachingVaries)
176 |
177 | for d.Next() {
178 | for d.NextBlock(0) {
179 | name := d.Val()
180 | vary := &CachingVary{
181 | Headers: []string{},
182 | Cookies: []string{},
183 | }
184 |
185 | for subNesting := d.Nesting(); d.NextBlock(subNesting); {
186 | switch d.Val() {
187 | case "headers":
188 | args := d.RemainingArgs()
189 |
190 | if len(args) == 0 {
191 | return d.ArgErr()
192 | }
193 |
194 | vary.Headers = args
195 | case "cookies":
196 | args := d.RemainingArgs()
197 |
198 | if len(args) == 0 {
199 | return d.ArgErr()
200 | }
201 |
202 | vary.Cookies = args
203 | default:
204 | return d.Errf("unrecognized subdirective %s", d.Val())
205 | }
206 | }
207 |
208 | varies[name] = vary
209 | }
210 | }
211 |
212 | c.Varies = varies
213 |
214 | return nil
215 | }
216 |
217 | func (c *Caching) unmarshalCaddyfileTypeKeys(d *caddyfile.Dispenser) error {
218 | fields := make(map[string]struct{})
219 | typeKeys := make(graphql.RequestTypes)
220 |
221 | for d.Next() {
222 | for d.NextBlock(0) {
223 | typeName := d.Val()
224 | args := d.RemainingArgs()
225 |
226 | if len(args) == 0 {
227 | return d.ArgErr()
228 | }
229 |
230 | for _, field := range args {
231 | fields[field] = struct{}{}
232 | }
233 |
234 | typeKeys[typeName] = fields
235 | }
236 | }
237 |
238 | c.TypeKeys = typeKeys
239 |
240 | return nil
241 | }
242 |
--------------------------------------------------------------------------------
/caddyfile_complexity.go:
--------------------------------------------------------------------------------
1 | package gbox
2 |
3 | import (
4 | "strconv"
5 |
6 | "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
7 | )
8 |
9 | func (h *Handler) unmarshalCaddyfileComplexity(d *caddyfile.Dispenser) error {
10 | var disabled bool
11 | complexity := new(Complexity)
12 |
13 | for d.Next() {
14 | for d.NextBlock(0) {
15 | switch d.Val() {
16 | case "enabled":
17 | if !d.NextArg() {
18 | return d.ArgErr()
19 | }
20 |
21 | v, err := strconv.ParseBool(d.Val())
22 | if err != nil {
23 | return err
24 | }
25 |
26 | disabled = !v
27 | case "max_depth":
28 | if !d.NextArg() {
29 | return d.ArgErr()
30 | }
31 |
32 | v, err := strconv.ParseInt(d.Val(), 10, 32)
33 | if err != nil {
34 | return err
35 | }
36 |
37 | complexity.MaxDepth = int(v)
38 | case "node_count_limit":
39 | if !d.NextArg() {
40 | return d.ArgErr()
41 | }
42 |
43 | v, err := strconv.ParseInt(d.Val(), 10, 32)
44 | if err != nil {
45 | return err
46 | }
47 |
48 | complexity.NodeCountLimit = int(v)
49 | case "max_complexity":
50 | if !d.NextArg() {
51 | return d.ArgErr()
52 | }
53 |
54 | v, err := strconv.ParseInt(d.Val(), 10, 32)
55 | if err != nil {
56 | return err
57 | }
58 |
59 | complexity.MaxComplexity = int(v)
60 | default:
61 | return d.Errf("unrecognized subdirective %s", d.Val())
62 | }
63 | }
64 | }
65 |
66 | if !disabled {
67 | h.Complexity = complexity
68 | }
69 |
70 | return nil
71 | }
72 |
--------------------------------------------------------------------------------
/caddyfile_test.go:
--------------------------------------------------------------------------------
1 | package gbox
2 |
3 | import (
4 | "fmt"
5 | "strconv"
6 | "testing"
7 | "time"
8 |
9 | "github.com/caddyserver/caddy/v2"
10 | "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
11 | "github.com/stretchr/testify/require"
12 | )
13 |
14 | func TestCaddyfile(t *testing.T) {
15 | testCases := map[string]struct {
16 | name string
17 | disabledIntrospection string
18 | disabledPlaygrounds string
19 | enabledCaching string
20 | enabledComplexity string
21 | enabledCachingAutoInvalidate string
22 | }{
23 | "enabled_all_features": {
24 | enabledCaching: "true",
25 | enabledCachingAutoInvalidate: "true",
26 | enabledComplexity: "true",
27 | disabledIntrospection: "false",
28 | disabledPlaygrounds: "false",
29 | },
30 | "disabled_all_features": {
31 | enabledCaching: "false",
32 | enabledCachingAutoInvalidate: "false",
33 | enabledComplexity: "false",
34 | disabledIntrospection: "true",
35 | disabledPlaygrounds: "true",
36 | },
37 | "enabled_caching_and_disabled_caching_auto_invalidate": {
38 | enabledCaching: "true",
39 | enabledCachingAutoInvalidate: "false",
40 | enabledComplexity: "false",
41 | disabledIntrospection: "true",
42 | disabledPlaygrounds: "true",
43 | },
44 | }
45 |
46 | for name, testCase := range testCases {
47 | h := &Handler{}
48 | d := caddyfile.NewTestDispenser(fmt.Sprintf(`
49 | gbox {
50 | upstream http://localhost:9091
51 | complexity {
52 | enabled %s
53 | max_depth 3
54 | max_complexity 2
55 | node_count_limit 1
56 | }
57 | disabled_playgrounds %s
58 | disabled_introspection %s
59 | caching {
60 | enabled %s
61 | auto_invalidate_cache %s
62 | varies {
63 | authorization {
64 | headers Authorization
65 | cookies session_id
66 | }
67 | }
68 | rules {
69 | rule1 {
70 | max_age 10m
71 | }
72 | rule2 {
73 | max_age 5m
74 | }
75 | }
76 | }
77 | }
78 | `, testCase.enabledComplexity, testCase.disabledPlaygrounds, testCase.disabledIntrospection, testCase.enabledCaching, testCase.enabledCachingAutoInvalidate))
79 | require.NoErrorf(t, h.UnmarshalCaddyfile(d), "case %s: unmarshal caddy file error", name)
80 | require.Equalf(t, h.Upstream, "http://localhost:9091", "case %s: invalid upstream", name)
81 | require.NotNilf(t, h.RewriteRaw, "case %s: rewrite raw should be set", name)
82 | require.NotNilf(t, h.ReverseProxyRaw, "case %s: reverse proxy raw should be set", name)
83 |
84 | enabledComplexity, _ := strconv.ParseBool(testCase.enabledComplexity)
85 | enabledCaching, _ := strconv.ParseBool(testCase.enabledCaching)
86 | disabledPlaygrounds, _ := strconv.ParseBool(testCase.disabledPlaygrounds)
87 | disabledIntrospection, _ := strconv.ParseBool(testCase.disabledIntrospection)
88 | enabledCachingAutoInvalidate, _ := strconv.ParseBool(testCase.enabledCachingAutoInvalidate)
89 |
90 | if enabledCaching {
91 | rule1, rule1Exist := h.Caching.Rules["rule1"]
92 | rule2, rule2Exist := h.Caching.Rules["rule2"]
93 |
94 | require.Equalf(t, enabledCachingAutoInvalidate, h.Caching.AutoInvalidate, "case %s: unexpected caching auto invalidate", name)
95 | require.Truef(t, rule1Exist, "case %s: rule1 should be exist", name)
96 | require.Truef(t, rule2Exist, "case %s: rule2 should be exist", name)
97 | require.Equal(t, caddy.Duration(time.Minute*10), rule1.MaxAge, "case %s: unexpected rule1 max age", name)
98 | require.Equal(t, caddy.Duration(time.Minute*5), rule2.MaxAge, "case %s: unexpected rule2 max age", name)
99 | } else {
100 | require.Nilf(t, h.Caching, "case %s: caching should be nil if not enabled", name)
101 | }
102 |
103 | if enabledComplexity {
104 | require.Equalf(t, 3, h.Complexity.MaxDepth, "case %s: max depth should be 3", name)
105 | require.Equalf(t, 2, h.Complexity.MaxComplexity, "case %s: max complexity should be 2", name)
106 | require.Equalf(t, 1, h.Complexity.NodeCountLimit, "case %s: node count limit should be 1", name)
107 | } else {
108 | require.Nilf(t, h.Complexity, "case %s: complexity should be nil if not enabled", name)
109 | }
110 |
111 | require.Equalf(t, disabledIntrospection, h.DisabledIntrospection, "case %s: unexpected disabled introspection", name)
112 | require.Equalf(t, disabledPlaygrounds, h.DisabledPlaygrounds, "case %s: unexpected disabled playgrounds", name)
113 | }
114 | }
115 |
116 | func TestCaddyfileErrors(t *testing.T) {
117 | testCases := map[string]struct {
118 | config string
119 | errorMsg string
120 | }{
121 | "unexpected_gbox_subdirective": {
122 | config: `unknown`,
123 | errorMsg: `unrecognized subdirective unknown`,
124 | },
125 | "blank_gbox_disabled_introspection": {
126 | config: `
127 | disabled_introspection
128 | `,
129 | errorMsg: `Wrong argument count`,
130 | },
131 | "invalid_syntax_gbox_disabled_introspection": {
132 | config: `
133 | disabled_introspection invalid
134 | `,
135 | errorMsg: `invalid syntax`,
136 | },
137 | "blank_gbox_complexity_enabled": {
138 | config: `
139 | complexity {
140 | enabled
141 | }
142 | `,
143 | errorMsg: `Wrong argument count`,
144 | },
145 | "invalid_syntax_gbox_complexity_enabled": {
146 | config: `
147 | complexity {
148 | enabled invalid
149 | }
150 | `,
151 | errorMsg: `invalid syntax`,
152 | },
153 | "blank_gbox_complexity_max_complexity": {
154 | config: `
155 | complexity {
156 | max_complexity
157 | }
158 | `,
159 | errorMsg: `Wrong argument count`,
160 | },
161 | "invalid_syntax_gbox_complexity_max_complexity": {
162 | config: `
163 | complexity {
164 | max_complexity invalid
165 | }
166 | `,
167 | errorMsg: `invalid syntax`,
168 | },
169 | "blank_gbox_complexity_max_depth": {
170 | config: `
171 | complexity {
172 | max_depth
173 | }
174 | `,
175 | errorMsg: `Wrong argument count`,
176 | },
177 | "invalid_syntax_gbox_complexity_max_depth": {
178 | config: `
179 | complexity {
180 | max_depth invalid
181 | }
182 | `,
183 | errorMsg: `invalid syntax`,
184 | },
185 | "blank_gbox_complexity_node_count_limit": {
186 | config: `
187 | complexity {
188 | node_count_limit
189 | }
190 | `,
191 | errorMsg: `Wrong argument count`,
192 | },
193 | "invalid_syntax_gbox_complexity_node_count_limit": {
194 | config: `
195 | complexity {
196 | max_depth invalid
197 | }
198 | `,
199 | errorMsg: `invalid syntax`,
200 | },
201 | "unexpected_gbox_complexity_subdirective": {
202 | config: `
203 | complexity {
204 | unknown
205 | }
206 | `,
207 | errorMsg: `unrecognized subdirective unknown`,
208 | },
209 | "unexpected_gbox_caching_subdirective": {
210 | config: `
211 | caching {
212 | unknown
213 | }
214 | `,
215 | errorMsg: `unrecognized subdirective unknown`,
216 | },
217 | "blank_gbox_caching_enabled": {
218 | config: `
219 | caching {
220 | enabled
221 | }
222 | `,
223 | errorMsg: `Wrong argument count`,
224 | },
225 | "invalid_syntax_gbox_caching_enabled": {
226 | config: `
227 | caching {
228 | enabled invalid
229 | }
230 | `,
231 | errorMsg: `invalid syntax`,
232 | },
233 | "blank_gbox_caching_auto_invalidate_cache": {
234 | config: `
235 | caching {
236 | auto_invalidate_cache
237 | }
238 | `,
239 | errorMsg: `Wrong argument count`,
240 | },
241 | "invalid_syntax_gbox_caching_auto_invalidate_cache": {
242 | config: `
243 | caching {
244 | auto_invalidate_cache invalid
245 | }
246 | `,
247 | errorMsg: `invalid syntax`,
248 | },
249 | "blank_gbox_caching_store_dsn": {
250 | config: `
251 | caching {
252 | store_dsn
253 | }
254 | `,
255 | errorMsg: `Wrong argument count`,
256 | },
257 | "invalid_syntax_gbox_caching_store_dsn": {
258 | config: `
259 | caching {
260 | store_dsn !://a
261 | }
262 | `,
263 | errorMsg: `first path segment in URL cannot contain colon`,
264 | },
265 | "unexpected_gbox_caching_rules_subdirective": {
266 | config: `
267 | caching {
268 | rules {
269 | a {
270 | unknown
271 | }
272 | }
273 | }
274 | `,
275 | errorMsg: `unrecognized subdirective unknown`,
276 | },
277 | "unexpected_gbox_caching_varies_subdirective": {
278 | config: `
279 | caching {
280 | varies {
281 | a {
282 | unknown
283 | }
284 | }
285 | }
286 | `,
287 | errorMsg: `unrecognized subdirective unknown`,
288 | },
289 | "unexpected_gbox_caching_type_keys": {
290 | config: `
291 | caching {
292 | type_keys {
293 | UserTest
294 | }
295 | }
296 | `,
297 | errorMsg: `Wrong argument count`,
298 | },
299 | }
300 |
301 | for name, testCase := range testCases {
302 | h := &Handler{}
303 | d := caddyfile.NewTestDispenser(fmt.Sprintf(`
304 | gbox {
305 | %s
306 | }
307 | `, testCase.config))
308 | e := h.UnmarshalCaddyfile(d)
309 | require.Errorf(t, e, "case %s: should be invalid", name)
310 | require.Contains(t, e.Error(), testCase.errorMsg, "case %s: unexpected error message", name)
311 | }
312 | }
313 |
--------------------------------------------------------------------------------
/charts/gbox/.helmignore:
--------------------------------------------------------------------------------
1 | # Patterns to ignore when building packages.
2 | # This supports shell glob matching, relative path matching, and
3 | # negation (prefixed with !). Only one pattern per line.
4 | .DS_Store
5 | # Common VCS dirs
6 | .git/
7 | .gitignore
8 | .bzr/
9 | .bzrignore
10 | .hg/
11 | .hgignore
12 | .svn/
13 | # Common backup files
14 | *.swp
15 | *.bak
16 | *.tmp
17 | *.orig
18 | *~
19 | # Various IDEs
20 | .project
21 | .idea/
22 | *.tmproj
23 | .vscode/
24 |
--------------------------------------------------------------------------------
/charts/gbox/Chart.lock:
--------------------------------------------------------------------------------
1 | dependencies:
2 | - name: redis
3 | repository: https://charts.bitnami.com/bitnami
4 | version: 16.8.9
5 | digest: sha256:229b73b6e9192b61243500e9497f8a67ce1df3a3fbe7f64105487e0eed0e4f7f
6 | generated: "2022-05-01T10:41:17.666828508+07:00"
7 |
--------------------------------------------------------------------------------
/charts/gbox/Chart.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v2
2 | name: gbox
3 | description: GBox Helm chart for Kubernetes. GBox is a reverse proxy in front of any GraphQL server for caching, securing and monitoring.
4 | type: application
5 | version: 1.0.5
6 | appVersion: "v1.0.6"
7 | sources:
8 | - https://github.com/gbox-proxy/gbox
9 | dependencies:
10 | - name: redis
11 | version: 16.8.9
12 | repository: https://charts.bitnami.com/bitnami
13 | condition: redis.enabled
14 | maintainers:
15 | - name: vuongxuongminh
16 | email: vuongxuongminh@gmail.com
17 |
--------------------------------------------------------------------------------
/charts/gbox/README.md:
--------------------------------------------------------------------------------
1 | # GBox Chart for Kubernetes
2 |
3 |   
4 |
5 | GBox Helm chart for Kubernetes. GBox is a reverse proxy in front of any GraphQL server for caching, securing and monitoring.
6 |
7 | ## Installing the Chart
8 |
9 | To install the chart with the release name `my-release`, run the following commands:
10 |
11 | helm repo add gbox https://gbox-proxy.github.io/gbox
12 | helm install my-release gbox/gbox
13 |
14 | ## Requirements
15 |
16 | | Repository | Name | Version |
17 | |------------|------|---------|
18 | | https://charts.bitnami.com/bitnami | redis | 16.8.9 |
19 |
20 | ## Values
21 |
22 | | Key | Type | Default | Description |
23 | |-----|------|---------|-------------|
24 | | adminAuth.enabled | bool | `true` | Whether enable basic auth when interact with GraphQL admin endpoint. |
25 | | adminAuth.password | string | "gbox" | Basic auth password. |
26 | | adminAuth.username | string | `"gbox"` | Basic auth username. |
27 | | affinity | object | `{}` | [Affinity](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) configuration. See the [API reference](https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling) for details. |
28 | | autoscaling | object | Disabled by default. | Autoscaling by resources |
29 | | caching.autoInvalidateCache | string | `"true"` | Whether auto invalidate cached data through mutation results or not. |
30 | | caching.debugHeaders | string | `"false"` | Whether add debug headers to query operations responses or not. |
31 | | caching.enabled | bool | `true` | Whether enable caching or not. |
32 | | caching.rules | string | Will cache all query results, see [values.yaml](values.yaml). | Caching rules configuration. |
33 | | caching.storeDsn | string | See [values.yaml](values.yaml). | By default, this chart use Redis to storing cached data, if you want to use your external Redis server, remember to disable internal Redis sub-chart. |
34 | | caching.typeKeys | string | `""` | Specific type keys configuration, by default `id` is key of all types. |
35 | | caching.varies | string | `""` | Caching varies configuration. |
36 | | complexity.enabled | bool | `true` | Whether enable filter query complexity or not. |
37 | | complexity.maxComplexity | int | `60` | The maximum number of Node requests that might be needed to execute the query. |
38 | | complexity.maxDepth | int | `15` | Max query depth. |
39 | | complexity.nodeCountLimit | int | `60` | The maximum number of Nodes a query may return. |
40 | | disabledIntrospection | bool | `false` | Whether disable introspection queries or not. |
41 | | disabledPlaygrounds | bool | `false` | Whether disable playgrounds or not. |
42 | | extraDirectives | string | `""` | GBox extra directives, useful in cases you may want to add CORS config and/or http headers when fetch schema from upstream. |
43 | | fetchSchemaInterval | string | `"10m"` | Interval times to introspect upstream schema definition. |
44 | | fullnameOverride | string | `""` | A name to substitute for the full names of resources. |
45 | | globalDirectives | string | `""` | Caddy [global directives](https://caddyserver.com/docs/caddyfile/options). |
46 | | image.pullPolicy | string | `"IfNotPresent"` | [Image pull policy](https://kubernetes.io/docs/concepts/containers/images/#updating-images) for updating already existing images on a node. |
47 | | image.repository | string | `"gboxproxy/gbox"` | Name of the image repository to pull the container image from. |
48 | | image.tag | string | `""` | Overrides the image tag whose default is the chart appVersion. |
49 | | imagePullSecrets | list | `[]` | Reference to one or more secrets to be used when [pulling images](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/#create-a-pod-that-uses-your-secret) (from private registries). |
50 | | ingress.annotations | object | `{}` | Annotations to be added to the ingress. |
51 | | ingress.className | string | `""` | Ingress [class name](https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-class). |
52 | | ingress.enabled | bool | `false` | Enable [ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/). |
53 | | ingress.hosts | list | See [values.yaml](values.yaml). | Ingress host configuration. |
54 | | ingress.tls | list | See [values.yaml](values.yaml). | Ingress TLS configuration. |
55 | | metrics.enabled | bool | `true` | Whether enable Prometheus metric endpoint or not |
56 | | metrics.path | string | `"/metrics"` | Url path of metric endpoint. |
57 | | metrics.serviceMonitor.additionalLabels | object | `{}` | Additional labels that can be used so ServiceMonitor resource(s) can be discovered by Prometheus |
58 | | metrics.serviceMonitor.enabled | bool | `false` | Create ServiceMonitor resource(s) for scraping metrics using PrometheusOperator. When enabled @param metrics.enabled must be set to true |
59 | | metrics.serviceMonitor.honorLabels | bool | `false` | Specify honorLabels parameter to add the scrape endpoint |
60 | | metrics.serviceMonitor.interval | string | `"30s"` | The interval at which metrics should be scraped |
61 | | metrics.serviceMonitor.metricRelabelings | list | `[]` | Metrics RelabelConfigs to apply to samples before ingestion. |
62 | | metrics.serviceMonitor.namespace | string | `""` | The namespace in which the ServiceMonitor will be created |
63 | | metrics.serviceMonitor.relabellings | list | `[]` | Metrics RelabelConfigs to apply to samples before scraping. |
64 | | metrics.serviceMonitor.scrapeTimeout | string | `""` | The timeout after which the scrape is ended |
65 | | nameOverride | string | `""` | A name in place of the chart name for `app:` labels. |
66 | | nodeSelector | object | `{}` | [Node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) configuration. |
67 | | podAnnotations | object | See [values.yaml](values.yaml). | Annotations to be added to pods. |
68 | | podSecurityContext | object | `{}` | Pod [security context](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod). See the [API reference](https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context) for details. |
69 | | redis.architecture | string | `"standalone"` | Set Redis architecture standalone or replication. |
70 | | redis.auth.password | string | `"!ChangeMe!"` | |
71 | | redis.enabled | bool | `true` | Whether enable Redis sub-chart or not. |
72 | | replicaCount | int | `1` | The number of replicas (pods) to launch |
73 | | resources | object | No requests or limits. | Container resource [requests and limits](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/). See the [API reference](https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#resources) for details. |
74 | | reverseProxyDirectives | string | `""` | Reverse proxy [directives](https://caddyserver.com/docs/caddyfile/directives/reverse_proxy). |
75 | | securityContext | object | `{}` | Container [security context](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container). See the [API reference](https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context-1) for details. |
76 | | service.port | int | `80` | Service port. |
77 | | service.type | string | `"ClusterIP"` | Kubernetes [service type](https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types). |
78 | | serviceAccount.annotations | object | `{}` | Annotations to add to the service account |
79 | | serviceAccount.create | bool | `true` | Specifies whether a service account should be created |
80 | | serviceAccount.name | string | `""` | The name of the service account to use. If not set and create is true, a name is generated using the fullname template |
81 | | tolerations | list | `[]` | [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) for node taints. See the [API reference](https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling) for details. |
82 | | upstream | string | `""` | Your upstream GraphQL server url. |
83 |
--------------------------------------------------------------------------------
/charts/gbox/README.md.gotmpl:
--------------------------------------------------------------------------------
1 | # GBox Chart for Kubernetes
2 |
3 | {{ template "chart.badgesSection" . }}
4 |
5 | {{ template "chart.description" . }}
6 |
7 | ## Installing the Chart
8 |
9 | To install the chart with the release name `my-release`, run the following commands:
10 |
11 | helm repo add gbox https://gbox-proxy.github.io/gbox
12 | helm install my-release gbox/{{ template "chart.name" . }}
13 |
14 | {{ template "chart.requirementsSection" . }}
15 |
16 | {{ template "chart.valuesSection" . }}
17 |
--------------------------------------------------------------------------------
/charts/gbox/ci/ct-values.yaml:
--------------------------------------------------------------------------------
1 | upstream: https://countries.trevorblades.com/
2 |
--------------------------------------------------------------------------------
/charts/gbox/templates/NOTES.txt:
--------------------------------------------------------------------------------
1 | 1. Get the URL of GBox by running these commands:
2 | {{- if .Values.ingress.enabled }}
3 | {{- range $host := .Values.ingress.hosts }}
4 | {{- range .paths }}
5 | http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }}
6 | {{- end }}
7 | {{- end }}
8 | {{- else if contains "NodePort" .Values.service.type }}
9 | export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "gbox.fullname" . }})
10 | export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
11 | echo http://$NODE_IP:$NODE_PORT
12 | {{- else if contains "LoadBalancer" .Values.service.type }}
13 | NOTE: It may take a few minutes for the LoadBalancer IP to be available.
14 | You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "gbox.fullname" . }}'
15 | export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "gbox.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
16 | echo http://$SERVICE_IP:{{ .Values.service.port }}
17 | {{- else if contains "ClusterIP" .Values.service.type }}
18 | export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "gbox.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
19 | export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}")
20 | echo "Visit http://127.0.0.1:8080 to use your application"
21 | kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT
22 | {{- end }}
23 |
--------------------------------------------------------------------------------
/charts/gbox/templates/_helpers.tpl:
--------------------------------------------------------------------------------
1 | {{/*
2 | Expand the name of the chart.
3 | */}}
4 | {{- define "gbox.name" -}}
5 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
6 | {{- end }}
7 |
8 | {{/*
9 | Create a default fully qualified app name.
10 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
11 | If release name contains chart name it will be used as a full name.
12 | */}}
13 | {{- define "gbox.fullname" -}}
14 | {{- if .Values.fullnameOverride }}
15 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
16 | {{- else }}
17 | {{- $name := default .Chart.Name .Values.nameOverride }}
18 | {{- if contains $name .Release.Name }}
19 | {{- .Release.Name | trunc 63 | trimSuffix "-" }}
20 | {{- else }}
21 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
22 | {{- end }}
23 | {{- end }}
24 | {{- end }}
25 |
26 | {{/*
27 | Create chart name and version as used by the chart label.
28 | */}}
29 | {{- define "gbox.chart" -}}
30 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
31 | {{- end }}
32 |
33 | {{/*
34 | Common labels
35 | */}}
36 | {{- define "gbox.labels" -}}
37 | helm.sh/chart: {{ include "gbox.chart" . }}
38 | {{ include "gbox.selectorLabels" . }}
39 | {{- if .Chart.AppVersion }}
40 | app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
41 | {{- end }}
42 | app.kubernetes.io/managed-by: {{ .Release.Service }}
43 | {{- end }}
44 |
45 | {{/*
46 | Selector labels
47 | */}}
48 | {{- define "gbox.selectorLabels" -}}
49 | app.kubernetes.io/name: {{ include "gbox.name" . }}
50 | app.kubernetes.io/instance: {{ .Release.Name }}
51 | {{- end }}
52 |
53 | {{/*
54 | Create the name of the service account to use
55 | */}}
56 | {{- define "gbox.serviceAccountName" -}}
57 | {{- if .Values.serviceAccount.create }}
58 | {{- default (include "gbox.fullname" .) .Values.serviceAccount.name }}
59 | {{- else }}
60 | {{- default "default" .Values.serviceAccount.name }}
61 | {{- end }}
62 | {{- end }}
63 |
--------------------------------------------------------------------------------
/charts/gbox/templates/deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: {{ include "gbox.fullname" . }}
5 | labels:
6 | {{- include "gbox.labels" . | nindent 4 }}
7 | spec:
8 | {{- if not .Values.autoscaling.enabled }}
9 | replicas: {{ .Values.replicaCount }}
10 | {{- end }}
11 | selector:
12 | matchLabels:
13 | {{- include "gbox.selectorLabels" . | nindent 6 }}
14 | template:
15 | metadata:
16 | {{- if .Values.podAnnotations }}
17 | annotations:
18 | {{- tpl (toYaml .Values.podAnnotations) . | nindent 8 }}
19 | {{- end }}
20 | labels:
21 | {{- include "gbox.selectorLabels" . | nindent 8 }}
22 | spec:
23 | {{- with .Values.imagePullSecrets }}
24 | imagePullSecrets:
25 | {{- toYaml . | nindent 8 }}
26 | {{- end }}
27 | serviceAccountName: {{ include "gbox.serviceAccountName" . }}
28 | securityContext:
29 | {{- toYaml .Values.podSecurityContext | nindent 8 }}
30 | containers:
31 | - name: {{ .Chart.Name }}
32 | securityContext:
33 | {{- toYaml .Values.securityContext | nindent 12 }}
34 | image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
35 | imagePullPolicy: {{ .Values.image.pullPolicy }}
36 | env:
37 | - name: GBOX_GLOBAL_DIRECTIVES
38 | value: {{ .Values.globalDirectives | quote }}
39 | - name: GBOX_EXTRA_DIRECTIVES
40 | value: {{ .Values.extraDirectives | quote }}
41 | - name: GBOX_SERVER_NAME
42 | value: ':80'
43 | - name: GBOX_UPSTREAM
44 | value: {{ .Values.upstream | quote }}
45 | - name: GBOX_UPSTREAM_REVERSE_PROXY_DIRECTIVES
46 | value: {{ .Values.reverseProxyDirectives | quote }}
47 | - name: GBOX_FETCH_SCHEMA_INTERVAL
48 | value: {{ .Values.fetchSchemaInterval | quote }}
49 | - name: GBOX_DISABLED_PLAYGROUNDS
50 | value: {{ .Values.disabledPlaygrounds | quote }}
51 | - name: GBOX_DISABLED_INTROSPECTION
52 | value: {{ .Values.disabledIntrospection | quote }}
53 | - name: GBOX_ENABLED_ADMIN_AUTH
54 | value: {{ .Values.adminAuth.enabled | quote }}
55 | - name: GBOX_ENABLED_CACHING
56 | value: {{ .Values.caching.enabled | quote }}
57 | - name: GBOX_CACHING_RULES
58 | value: {{ .Values.caching.rules | quote }}
59 | - name: GBOX_CACHING_VARIES
60 | value: {{ .Values.caching.varies | quote }}
61 | - name: GBOX_CACHING_TYPE_KEYS
62 | value: {{ .Values.caching.typeKeys | quote }}
63 | - name: GBOX_AUTO_INVALIDATE_CACHE
64 | value: {{ .Values.caching.autoInvalidateCache | quote }}
65 | - name: GBOX_CACHING_DEBUG_HEADERS
66 | value: {{ .Values.caching.debugHeaders | quote }}
67 | - name: GBOX_ENABLED_COMPLEXITY
68 | value: {{ .Values.complexity.enabled | quote }}
69 | - name: GBOX_NODE_COUNT_LIMIT
70 | value: {{ .Values.complexity.nodeCountLimit | quote }}
71 | - name: GBOX_MAX_COMPLEXITY
72 | value: {{ .Values.complexity.maxComplexity | quote }}
73 | - name: GBOX_MAX_DEPTH
74 | value: {{ .Values.complexity.maxDepth | quote }}
75 | - name: GBOX_ENABLED_METRICS
76 | value: {{ .Values.metrics.enabled | quote }}
77 | - name: GBOX_METRICS_PATH
78 | value: {{ .Values.metrics.path | quote }}
79 | envFrom:
80 | - secretRef:
81 | name: {{ include "gbox.fullname" . }}
82 | ports:
83 | - name: http
84 | containerPort: 80
85 | protocol: TCP
86 | livenessProbe:
87 | httpGet:
88 | path: /healthz
89 | port: http
90 | readinessProbe:
91 | httpGet:
92 | path: /healthz
93 | port: http
94 | resources:
95 | {{- toYaml .Values.resources | nindent 12 }}
96 | {{- with .Values.nodeSelector }}
97 | nodeSelector:
98 | {{- toYaml . | nindent 8 }}
99 | {{- end }}
100 | {{- with .Values.affinity }}
101 | affinity:
102 | {{- toYaml . | nindent 8 }}
103 | {{- end }}
104 | {{- with .Values.tolerations }}
105 | tolerations:
106 | {{- toYaml . | nindent 8 }}
107 | {{- end }}
108 |
--------------------------------------------------------------------------------
/charts/gbox/templates/hpa.yaml:
--------------------------------------------------------------------------------
1 | {{- if .Values.autoscaling.enabled }}
2 | apiVersion: autoscaling/v2beta1
3 | kind: HorizontalPodAutoscaler
4 | metadata:
5 | name: {{ include "gbox.fullname" . }}
6 | labels:
7 | {{- include "gbox.labels" . | nindent 4 }}
8 | spec:
9 | scaleTargetRef:
10 | apiVersion: apps/v1
11 | kind: Deployment
12 | name: {{ include "gbox.fullname" . }}
13 | minReplicas: {{ .Values.autoscaling.minReplicas }}
14 | maxReplicas: {{ .Values.autoscaling.maxReplicas }}
15 | metrics:
16 | {{- if .Values.autoscaling.targetCPUUtilizationPercentage }}
17 | - type: Resource
18 | resource:
19 | name: cpu
20 | targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }}
21 | {{- end }}
22 | {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }}
23 | - type: Resource
24 | resource:
25 | name: memory
26 | targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }}
27 | {{- end }}
28 | {{- end }}
29 |
--------------------------------------------------------------------------------
/charts/gbox/templates/ingress.yaml:
--------------------------------------------------------------------------------
1 | {{- if .Values.ingress.enabled -}}
2 | {{- $fullName := include "gbox.fullname" . -}}
3 | {{- $svcPort := .Values.service.port -}}
4 | {{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }}
5 | {{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }}
6 | {{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}}
7 | {{- end }}
8 | {{- end }}
9 | {{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}}
10 | apiVersion: networking.k8s.io/v1
11 | {{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
12 | apiVersion: networking.k8s.io/v1beta1
13 | {{- else -}}
14 | apiVersion: extensions/v1beta1
15 | {{- end }}
16 | kind: Ingress
17 | metadata:
18 | name: {{ $fullName }}
19 | labels:
20 | {{- include "gbox.labels" . | nindent 4 }}
21 | {{- with .Values.ingress.annotations }}
22 | annotations:
23 | {{- toYaml . | nindent 4 }}
24 | {{- end }}
25 | spec:
26 | {{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }}
27 | ingressClassName: {{ .Values.ingress.className }}
28 | {{- end }}
29 | {{- if .Values.ingress.tls }}
30 | tls:
31 | {{- range .Values.ingress.tls }}
32 | - hosts:
33 | {{- range .hosts }}
34 | - {{ . | quote }}
35 | {{- end }}
36 | secretName: {{ .secretName }}
37 | {{- end }}
38 | {{- end }}
39 | rules:
40 | {{- range .Values.ingress.hosts }}
41 | - host: {{ .host | quote }}
42 | http:
43 | paths:
44 | {{- range .paths }}
45 | - path: {{ .path }}
46 | {{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }}
47 | pathType: {{ .pathType }}
48 | {{- end }}
49 | backend:
50 | {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }}
51 | service:
52 | name: {{ $fullName }}
53 | port:
54 | number: {{ $svcPort }}
55 | {{- else }}
56 | serviceName: {{ $fullName }}
57 | servicePort: {{ $svcPort }}
58 | {{- end }}
59 | {{- end }}
60 | {{- end }}
61 | {{- end }}
62 |
--------------------------------------------------------------------------------
/charts/gbox/templates/secrets.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Secret
3 | type: Opaque
4 | metadata:
5 | name: {{ include "gbox.fullname" . | quote }}
6 | labels:
7 | {{- include "gbox.labels" . | nindent 4 }}
8 | data:
9 | GBOX_STORE_DSN: {{ tpl .Values.caching.storeDsn . | b64enc | quote }}
10 | GBOX_ADMIN_USERNAME: {{ .Values.adminAuth.username | b64enc | quote }}
11 | {{- if ne .Values.adminAuth.password "" }}
12 | GBOX_ADMIN_PASSWORD: {{ .Values.adminAuth.password | b64enc | quote }}
13 | {{- end }}
--------------------------------------------------------------------------------
/charts/gbox/templates/service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: {{ include "gbox.fullname" . }}
5 | labels:
6 | {{- include "gbox.labels" . | nindent 4 }}
7 | spec:
8 | type: {{ .Values.service.type }}
9 | ports:
10 | - port: {{ .Values.service.port }}
11 | targetPort: http
12 | protocol: TCP
13 | name: http
14 | selector:
15 | {{- include "gbox.selectorLabels" . | nindent 4 }}
16 |
--------------------------------------------------------------------------------
/charts/gbox/templates/serviceaccount.yaml:
--------------------------------------------------------------------------------
1 | {{- if .Values.serviceAccount.create -}}
2 | apiVersion: v1
3 | kind: ServiceAccount
4 | metadata:
5 | name: {{ include "gbox.serviceAccountName" . }}
6 | labels:
7 | {{- include "gbox.labels" . | nindent 4 }}
8 | {{- with .Values.serviceAccount.annotations }}
9 | annotations:
10 | {{- toYaml . | nindent 4 }}
11 | {{- end }}
12 | {{- end }}
13 |
--------------------------------------------------------------------------------
/charts/gbox/templates/servicemonitor.yaml:
--------------------------------------------------------------------------------
1 | {{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }}
2 | apiVersion: monitoring.coreos.com/v1
3 | kind: ServiceMonitor
4 | metadata:
5 | name: {{ include "gbox.fullname" . }}
6 | {{- if .Values.metrics.serviceMonitor.namespace }}
7 | namespace: {{ .Values.metrics.serviceMonitor.namespace }}
8 | {{- else }}
9 | namespace: {{ .Release.Namespace | quote }}
10 | {{- end }}
11 | labels: {{- include "gbox.labels" . | nindent 4 }}
12 | {{- if .Values.metrics.serviceMonitor.additionalLabels }}
13 | {{- include .Values.metrics.serviceMonitor.additionalLabels . | nindent 4 }}
14 | {{- end }}
15 | spec:
16 | endpoints:
17 | - port: http
18 | path: {{ .Values.metrics.path }}
19 | {{- if .Values.metrics.serviceMonitor.interval }}
20 | interval: {{ .Values.metrics.serviceMonitor.interval }}
21 | {{- end }}
22 | {{- if .Values.metrics.serviceMonitor.scrapeTimeout }}
23 | scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }}
24 | {{- end }}
25 | {{- if .Values.metrics.serviceMonitor.honorLabels }}
26 | honorLabels: {{ .Values.metrics.serviceMonitor.honorLabels }}
27 | {{- end }}
28 | {{- if .Values.metrics.serviceMonitor.relabellings }}
29 | relabelings: {{- toYaml .Values.metrics.serviceMonitor.relabellings | nindent 6 }}
30 | {{- end }}
31 | {{- if .Values.metrics.serviceMonitor.metricRelabelings }}
32 | metricRelabelings: {{- toYaml .Values.metrics.serviceMonitor.metricRelabelings | nindent 6 }}
33 | {{- end }}
34 | namespaceSelector:
35 | matchNames:
36 | - {{ .Release.Namespace }}
37 | selector:
38 | matchLabels: {{- include "gbox.selectorLabels" . | nindent 6 }}
39 | {{- end }}
--------------------------------------------------------------------------------
/charts/gbox/templates/tests/test-connection.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: "{{ include "gbox.fullname" . }}-test-connection"
5 | labels:
6 | {{- include "gbox.labels" . | nindent 4 }}
7 | annotations:
8 | "helm.sh/hook": test
9 | spec:
10 | containers:
11 | - name: wget
12 | image: busybox
13 | command: ['wget']
14 | args: ['{{ include "gbox.fullname" . }}:{{ .Values.service.port }}/healthz']
15 | restartPolicy: Never
16 |
--------------------------------------------------------------------------------
/charts/gbox/values.yaml:
--------------------------------------------------------------------------------
1 | # -- The number of replicas (pods) to launch
2 | replicaCount: 1
3 |
4 | image:
5 | # -- Name of the image repository to pull the container image from.
6 | repository: gboxproxy/gbox
7 | # -- [Image pull policy](https://kubernetes.io/docs/concepts/containers/images/#updating-images) for updating already existing images on a node.
8 | pullPolicy: IfNotPresent
9 | # -- Overrides the image tag whose default is the chart appVersion.
10 | tag: ""
11 | # -- Reference to one or more secrets to be used when [pulling images](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/#create-a-pod-that-uses-your-secret) (from private registries).
12 | imagePullSecrets: []
13 | # -- A name in place of the chart name for `app:` labels.
14 | nameOverride: ""
15 | # -- A name to substitute for the full names of resources.
16 | fullnameOverride: ""
17 |
18 | serviceAccount:
19 | # -- Specifies whether a service account should be created
20 | create: true
21 | # -- Annotations to add to the service account
22 | annotations: {}
23 | # -- The name of the service account to use.
24 | # If not set and create is true, a name is generated using the fullname template
25 | name: ""
26 |
27 | # -- Annotations to be added to pods.
28 | # @default -- See [values.yaml](values.yaml).
29 | podAnnotations:
30 | prometheus.io/scrape: "true"
31 | prometheus.io/port: "80"
32 | prometheus.io/path: "{{ .Values.metrics.path }}"
33 |
34 | # -- Pod [security context](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod).
35 | # See the [API reference](https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context) for details.
36 | podSecurityContext: {}
37 | # fsGroup: 2000
38 |
39 | # -- Container [security context](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container).
40 | # See the [API reference](https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context-1) for details.
41 | securityContext: {}
42 | # capabilities:
43 | # drop:
44 | # - ALL
45 | # readOnlyRootFilesystem: true
46 | # runAsNonRoot: true
47 | # runAsUser: 1000
48 |
49 | service:
50 | # -- Kubernetes [service type](https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types).
51 | type: ClusterIP
52 | # -- Service port.
53 | port: 80
54 |
55 | ingress:
56 | # -- Enable [ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/).
57 | enabled: false
58 | # -- Ingress [class name](https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-class).
59 | className: ""
60 | # -- Annotations to be added to the ingress.
61 | annotations: {}
62 | # kubernetes.io/ingress.class: nginx
63 | # kubernetes.io/tls-acme: "true"
64 | # -- Ingress host configuration.
65 | # @default -- See [values.yaml](values.yaml).
66 | hosts:
67 | - host: chart-example.local
68 | paths:
69 | - path: /
70 | pathType: ImplementationSpecific
71 | # -- Ingress TLS configuration.
72 | # @default -- See [values.yaml](values.yaml).
73 | tls: []
74 | # - secretName: chart-example-tls
75 | # hosts:
76 | # - chart-example.local
77 |
78 | # -- Container resource [requests and limits](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/).
79 | # See the [API reference](https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#resources) for details.
80 | # @default -- No requests or limits.
81 | resources: {}
82 | # We usually recommend not to specify default resources and to leave this as a conscious
83 | # choice for the user. This also increases chances charts run on environments with little
84 | # resources, such as Minikube. If you do want to specify resources, uncomment the following
85 | # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
86 | # limits:
87 | # cpu: 100m
88 | # memory: 128Mi
89 | # requests:
90 | # cpu: 100m
91 | # memory: 128Mi
92 |
93 | # -- Autoscaling by resources
94 | # @default -- Disabled by default.
95 | autoscaling:
96 | enabled: false
97 | minReplicas: 1
98 | maxReplicas: 100
99 | targetCPUUtilizationPercentage: 80
100 | # targetMemoryUtilizationPercentage: 80
101 |
102 | # -- [Node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) configuration.
103 | nodeSelector: {}
104 |
105 | # -- [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) for node taints.
106 | # See the [API reference](https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling) for details.
107 | tolerations: []
108 |
109 | # -- [Affinity](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) configuration.
110 | # See the [API reference](https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling) for details.
111 | affinity: {}
112 |
113 | # -- Caddy [global directives](https://caddyserver.com/docs/caddyfile/options).
114 | globalDirectives: ''
115 |
116 | # -- Your upstream GraphQL server url.
117 | upstream: ""
118 |
119 | # -- Reverse proxy [directives](https://caddyserver.com/docs/caddyfile/directives/reverse_proxy).
120 | reverseProxyDirectives: ""
121 |
122 | # -- Interval times to introspect upstream schema definition.
123 | fetchSchemaInterval: 10m
124 |
125 | # -- Whether disable playgrounds or not.
126 | disabledPlaygrounds: false
127 |
128 | # -- Whether disable introspection queries or not.
129 | disabledIntrospection: false
130 |
131 | complexity:
132 | # -- Whether enable filter query complexity or not.
133 | enabled: true
134 |
135 | # -- Max query depth.
136 | maxDepth: 15
137 |
138 | # -- The maximum number of Nodes a query may return.
139 | nodeCountLimit: 60
140 |
141 | # -- The maximum number of Node requests that might be needed to execute the query.
142 | maxComplexity: 60
143 |
144 | caching:
145 | # -- Whether enable caching or not.
146 | enabled: true
147 |
148 | # -- By default, this chart use Redis to storing cached data, if you want to use your external Redis server, remember to disable internal Redis sub-chart.
149 | # @default -- See [values.yaml](values.yaml).
150 | storeDsn: '{{ printf "redis://%s@%s-redis-master.%s.svc.cluster.local:6379" .Values.redis.auth.password .Release.Name .Release.Namespace }}'
151 |
152 | # -- Caching rules configuration.
153 | # @default -- Will cache all query results, see [values.yaml](values.yaml).
154 | rules: |
155 | default {
156 | max_age 30m
157 | swr 30m
158 | }
159 |
160 | # -- Caching varies configuration.
161 | varies: ''
162 |
163 | # -- Specific type keys configuration, by default `id` is key of all types.
164 | typeKeys: ''
165 |
166 | # -- Whether auto invalidate cached data through mutation results or not.
167 | autoInvalidateCache: 'true'
168 |
169 | # -- Whether add debug headers to query operations responses or not.
170 | debugHeaders: 'false'
171 |
172 | # -- GBox extra directives, useful in cases you may want to add CORS config and/or http headers when fetch schema from upstream.
173 | extraDirectives: ''
174 |
175 | adminAuth:
176 | # -- Whether enable basic auth when interact with GraphQL admin endpoint.
177 | enabled: true
178 |
179 | # -- Basic auth username.
180 | username: 'gbox'
181 |
182 | # -- Basic auth password.
183 | # @default -- "gbox"
184 | password: ''
185 |
186 | redis:
187 | # -- Whether enable Redis sub-chart or not.
188 | enabled: true
189 |
190 | # -- Set Redis architecture standalone or replication.
191 | architecture: standalone
192 |
193 | auth:
194 | password: '!ChangeMe!'
195 |
196 | metrics:
197 | # -- Whether enable Prometheus metric endpoint or not
198 | enabled: true
199 |
200 | # -- Url path of metric endpoint.
201 | path: /metrics
202 |
203 | # Prometheus Service Monitor
204 | serviceMonitor:
205 | # -- Create ServiceMonitor resource(s) for scraping metrics using PrometheusOperator. When enabled @param metrics.enabled must be set to true
206 | enabled: false
207 |
208 | # -- The namespace in which the ServiceMonitor will be created
209 | namespace: ""
210 |
211 | # -- The interval at which metrics should be scraped
212 | interval: 30s
213 |
214 | # -- The timeout after which the scrape is ended
215 | scrapeTimeout: ""
216 |
217 | # -- Metrics RelabelConfigs to apply to samples before scraping.
218 | relabellings: []
219 |
220 | # -- Metrics RelabelConfigs to apply to samples before ingestion.
221 | metricRelabelings: []
222 |
223 | # -- Specify honorLabels parameter to add the scrape endpoint
224 | honorLabels: false
225 |
226 | # -- Additional labels that can be used so ServiceMonitor resource(s) can be discovered by Prometheus
227 | additionalLabels: {}
228 |
--------------------------------------------------------------------------------
/cmd/main.go:
--------------------------------------------------------------------------------
1 | // Copied from https://github.com/caddyserver/xcaddy/blob/b7fd102f41e12be4735dc77b0391823989812ce8/environment.go#L251
2 | package main
3 |
4 | import (
5 | caddycmd "github.com/caddyserver/caddy/v2/cmd"
6 | _ "github.com/caddyserver/caddy/v2/modules/standard"
7 | _ "github.com/gbox-proxy/gbox"
8 | )
9 |
10 | func main() {
11 | caddycmd.Main()
12 | }
13 |
--------------------------------------------------------------------------------
/complexity.go:
--------------------------------------------------------------------------------
1 | package gbox
2 |
3 | import (
4 | "fmt"
5 |
6 | "github.com/jensneuse/graphql-go-tools/pkg/graphql"
7 | )
8 |
9 | type Complexity struct {
10 | // Max query depth accept, disabled by default.
11 | MaxDepth int `json:"max_depth,omitempty"`
12 |
13 | // Query node count limit, disabled by default.
14 | NodeCountLimit int `json:"node_count_limit,omitempty"`
15 |
16 | // Max query complexity, disabled by default.
17 | MaxComplexity int `json:"complexity,omitempty"`
18 | }
19 |
20 | func (c *Complexity) validateRequest(s *graphql.Schema, r *graphql.Request) (requestErrors graphql.RequestErrors) {
21 | result, err := r.CalculateComplexity(graphql.DefaultComplexityCalculator, s)
22 | if err != nil {
23 | requestErrors = graphql.RequestErrorsFromError(err)
24 |
25 | return requestErrors
26 | }
27 |
28 | if c.MaxDepth > 0 && result.Depth > c.MaxDepth {
29 | requestErrors = append(requestErrors, graphql.RequestError{Message: fmt.Sprintf("query max depth is %d, current %d", c.MaxDepth, result.Depth)})
30 | }
31 |
32 | if c.NodeCountLimit > 0 && result.NodeCount > c.NodeCountLimit {
33 | requestErrors = append(requestErrors, graphql.RequestError{Message: fmt.Sprintf("query node count limit is %d, current %d", c.NodeCountLimit, result.NodeCount)})
34 | }
35 |
36 | if c.MaxComplexity > 0 && result.Complexity > c.MaxComplexity {
37 | requestErrors = append(requestErrors, graphql.RequestError{Message: fmt.Sprintf("max query complexity allow is %d, current %d", c.MaxComplexity, result.Complexity)})
38 | }
39 |
40 | return requestErrors
41 | }
42 |
--------------------------------------------------------------------------------
/complexity_test.go:
--------------------------------------------------------------------------------
1 | package gbox
2 |
3 | import (
4 | "testing"
5 |
6 | "github.com/jensneuse/graphql-go-tools/pkg/graphql"
7 | "github.com/stretchr/testify/require"
8 | )
9 |
10 | func TestComplexity(t *testing.T) {
11 | testCases := map[string]struct {
12 | complexity *Complexity
13 | expectedErrorCount int
14 | }{
15 | "disabled_all": {
16 | complexity: &Complexity{},
17 | },
18 | "invalid": {
19 | complexity: &Complexity{
20 | NodeCountLimit: 1,
21 | MaxDepth: 1,
22 | MaxComplexity: 1,
23 | },
24 | expectedErrorCount: 3,
25 | },
26 | "invalid_node_count_limit": {
27 | complexity: &Complexity{
28 | NodeCountLimit: 1,
29 | },
30 | expectedErrorCount: 1,
31 | },
32 | "invalid_max_depth": {
33 | complexity: &Complexity{
34 | MaxDepth: 1,
35 | },
36 | expectedErrorCount: 1,
37 | },
38 | "invalid_max_complexity": {
39 | complexity: &Complexity{
40 | MaxComplexity: 1,
41 | },
42 | expectedErrorCount: 1,
43 | },
44 | }
45 |
46 | s, _ := graphql.NewSchemaFromString(`
47 | type Query {
48 | books: [Book!]!
49 | }
50 |
51 | type Book {
52 | id: ID!
53 | title: String!
54 | buyers: [User!]!
55 | }
56 |
57 | type User {
58 | id: ID!
59 | name: String!
60 | }
61 | `)
62 | gqlRequest := &graphql.Request{
63 | Query: `query GetBooks {
64 | books {
65 | buyers {
66 | id
67 | name
68 | }
69 | }
70 | }`,
71 | }
72 | s.Normalize()
73 | gqlRequest.Normalize(s)
74 |
75 | for name, testCase := range testCases {
76 | err := testCase.complexity.validateRequest(s, gqlRequest)
77 |
78 | require.Equalf(t, testCase.expectedErrorCount, err.Count(), "case %s: unexpected error count", name)
79 | }
80 | }
81 |
--------------------------------------------------------------------------------
/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '3.8'
2 |
3 | volumes:
4 | data:
5 | mod-cache:
6 |
7 | services:
8 | air:
9 | image: cosmtrek/air:v1.29.0
10 | working_dir: /app
11 | ports:
12 | - 8888:80
13 | volumes:
14 | - ./:/app:rw
15 | - mod-cache:/go/pkg/mod/cache:rw,cached
16 | - data:/data:rw,cached
17 | environment:
18 | XDG_DATA_HOME: /data
19 | GBOX_STORE_DSN: redis://redis:6379
20 | GBOX_SERVER_NAME: :80
21 | GBOX_UPSTREAM: https://countries.trevorblades.com/
22 | GBOX_ENABLED_METRICS: 'true'
23 | GBOX_GLOBAL_DIRECTIVES: |
24 | debug
25 | admin off
26 | redis:
27 | image: redis
28 | ports:
29 | - 6379:6379
--------------------------------------------------------------------------------
/gbox.go:
--------------------------------------------------------------------------------
1 | // GBox is reverse proxy in front of any GraphQL server for caching, securing and monitoring.
2 |
3 | package gbox
4 |
--------------------------------------------------------------------------------
/go.mod:
--------------------------------------------------------------------------------
1 | module github.com/gbox-proxy/gbox
2 |
3 | go 1.17
4 |
5 | require (
6 | github.com/99designs/gqlgen v0.17.2
7 | github.com/caddyserver/caddy/v2 v2.5.0
8 | github.com/coocood/freecache v1.2.1
9 | github.com/eko/gocache/v2 v2.3.0
10 | github.com/go-redis/redis/v8 v8.11.5
11 | github.com/gobwas/ws v1.0.4
12 | github.com/gorilla/handlers v1.5.1
13 | github.com/gorilla/mux v1.8.0
14 | github.com/jensneuse/graphql-go-tools v1.51.0
15 | github.com/pkg/errors v0.9.1
16 | github.com/pquerna/cachecontrol v0.1.0
17 | github.com/prometheus/client_golang v1.12.1
18 | github.com/prometheus/client_model v0.2.0
19 | github.com/stretchr/testify v1.7.1
20 | github.com/vektah/gqlparser/v2 v2.4.0
21 | go.uber.org/zap v1.21.0
22 | )
23 |
24 | require (
25 | filippo.io/edwards25519 v1.0.0-rc.1 // indirect
26 | github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96 // indirect
27 | github.com/BurntSushi/toml v1.0.0 // indirect
28 | github.com/Masterminds/goutils v1.1.1 // indirect
29 | github.com/Masterminds/semver v1.5.0 // indirect
30 | github.com/Masterminds/semver/v3 v3.1.1 // indirect
31 | github.com/Masterminds/sprig v2.22.0+incompatible // indirect
32 | github.com/Masterminds/sprig/v3 v3.2.2 // indirect
33 | github.com/XiaoMi/pegasus-go-client v0.0.0-20210427083443-f3b6b08bc4c2 // indirect
34 | github.com/agnivade/levenshtein v1.1.1 // indirect
35 | github.com/alecthomas/chroma v0.10.0 // indirect
36 | github.com/antlr/antlr4 v0.0.0-20200503195918-621b933c7a7f // indirect
37 | github.com/aryann/difflib v0.0.0-20210328193216-ff5ff6dc229b // indirect
38 | github.com/beorn7/perks v1.0.1 // indirect
39 | github.com/bradfitz/gomemcache v0.0.0-20220106215444-fb4bf637b56d // indirect
40 | github.com/buger/jsonparser v1.1.1 // indirect
41 | github.com/caddyserver/certmagic v0.16.1 // indirect
42 | github.com/cenkalti/backoff/v4 v4.1.3 // indirect
43 | github.com/cespare/xxhash v1.1.0 // indirect
44 | github.com/cespare/xxhash/v2 v2.1.2 // indirect
45 | github.com/cheekybits/genny v1.0.0 // indirect
46 | github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e // indirect
47 | github.com/cpuguy83/go-md2man/v2 v2.0.1 // indirect
48 | github.com/davecgh/go-spew v1.1.1 // indirect
49 | github.com/dgraph-io/badger v1.6.2 // indirect
50 | github.com/dgraph-io/badger/v2 v2.2007.4 // indirect
51 | github.com/dgraph-io/ristretto v0.1.0 // indirect
52 | github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 // indirect
53 | github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
54 | github.com/dlclark/regexp2 v1.4.0 // indirect
55 | github.com/dustin/go-humanize v1.0.1-0.20200219035652-afde56e7acac // indirect
56 | github.com/eclipse/paho.mqtt.golang v1.2.0 // indirect
57 | github.com/felixge/httpsnoop v1.0.2 // indirect
58 | github.com/fsnotify/fsnotify v1.5.1 // indirect
59 | github.com/go-chi/chi v4.1.2+incompatible // indirect
60 | github.com/go-kit/kit v0.10.0 // indirect
61 | github.com/go-logfmt/logfmt v0.5.1 // indirect
62 | github.com/go-logr/logr v1.2.2 // indirect
63 | github.com/go-logr/stdr v1.2.2 // indirect
64 | github.com/go-sql-driver/mysql v1.6.0 // indirect
65 | github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 // indirect
66 | github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee // indirect
67 | github.com/gobwas/pool v0.2.0 // indirect
68 | github.com/golang/glog v1.0.0 // indirect
69 | github.com/golang/protobuf v1.5.2 // indirect
70 | github.com/golang/snappy v0.0.4 // indirect
71 | github.com/google/cel-go v0.7.3 // indirect
72 | github.com/google/uuid v1.3.0 // indirect
73 | github.com/gorilla/websocket v1.4.2 // indirect
74 | github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect
75 | github.com/hashicorp/golang-lru v0.5.4 // indirect
76 | github.com/huandu/xstrings v1.3.2 // indirect
77 | github.com/imdario/mergo v0.3.12 // indirect
78 | github.com/jackc/chunkreader/v2 v2.0.1 // indirect
79 | github.com/jackc/pgconn v1.10.1 // indirect
80 | github.com/jackc/pgio v1.0.0 // indirect
81 | github.com/jackc/pgpassfile v1.0.0 // indirect
82 | github.com/jackc/pgproto3/v2 v2.2.0 // indirect
83 | github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b // indirect
84 | github.com/jackc/pgtype v1.9.0 // indirect
85 | github.com/jackc/pgx/v4 v4.14.0 // indirect
86 | github.com/jensneuse/abstractlogger v0.0.4 // indirect
87 | github.com/jensneuse/byte-template v0.0.0-20200214152254-4f3cf06e5c68 // indirect
88 | github.com/jensneuse/graphql-go-tools/examples/federation v0.0.0-20220407073143-b484a4fba0f8 // indirect
89 | github.com/jensneuse/pipeline v0.0.0-20200117120358-9fb4de085cd6 // indirect
90 | github.com/klauspost/compress v1.15.0 // indirect
91 | github.com/klauspost/cpuid/v2 v2.0.11 // indirect
92 | github.com/libdns/libdns v0.2.1 // indirect
93 | github.com/lucas-clemente/quic-go v0.26.0 // indirect
94 | github.com/manifoldco/promptui v0.9.0 // indirect
95 | github.com/marten-seemann/qpack v0.2.1 // indirect
96 | github.com/marten-seemann/qtls-go1-16 v0.1.5 // indirect
97 | github.com/marten-seemann/qtls-go1-17 v0.1.1 // indirect
98 | github.com/marten-seemann/qtls-go1-18 v0.1.1 // indirect
99 | github.com/mattn/go-colorable v0.1.8 // indirect
100 | github.com/mattn/go-isatty v0.0.13 // indirect
101 | github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
102 | github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect
103 | github.com/mholt/acmez v1.0.2 // indirect
104 | github.com/micromdm/scep/v2 v2.1.0 // indirect
105 | github.com/miekg/dns v1.1.46 // indirect
106 | github.com/mitchellh/copystructure v1.2.0 // indirect
107 | github.com/mitchellh/go-ps v1.0.0 // indirect
108 | github.com/mitchellh/mapstructure v1.4.2 // indirect
109 | github.com/mitchellh/reflectwalk v1.0.2 // indirect
110 | github.com/nats-io/nats-server/v2 v2.3.2 // indirect
111 | github.com/nats-io/nats.go v1.11.1-0.20210623165838-4b75fc59ae30 // indirect
112 | github.com/nats-io/nkeys v0.3.0 // indirect
113 | github.com/nats-io/nuid v1.0.1 // indirect
114 | github.com/nxadm/tail v1.4.8 // indirect
115 | github.com/onsi/ginkgo v1.16.5 // indirect
116 | github.com/pegasus-kv/thrift v0.13.0 // indirect
117 | github.com/pmezard/go-difflib v1.0.0 // indirect
118 | github.com/prometheus/common v0.33.0 // indirect
119 | github.com/prometheus/procfs v0.7.3 // indirect
120 | github.com/qri-io/jsonpointer v0.1.1 // indirect
121 | github.com/qri-io/jsonschema v0.2.1 // indirect
122 | github.com/rs/xid v1.2.1 // indirect
123 | github.com/russross/blackfriday/v2 v2.1.0 // indirect
124 | github.com/shopspring/decimal v1.2.0 // indirect
125 | github.com/shurcooL/sanitized_anchor_name v1.0.0 // indirect
126 | github.com/sirupsen/logrus v1.8.1 // indirect
127 | github.com/slackhq/nebula v1.5.2 // indirect
128 | github.com/smallstep/certificates v0.19.0 // indirect
129 | github.com/smallstep/cli v0.18.0 // indirect
130 | github.com/smallstep/nosql v0.4.0 // indirect
131 | github.com/smallstep/truststore v0.11.0 // indirect
132 | github.com/spf13/cast v1.4.1 // indirect
133 | github.com/stoewer/go-strcase v1.2.0 // indirect
134 | github.com/tailscale/tscert v0.0.0-20220125204807-4509a5fbaf74 // indirect
135 | github.com/tidwall/gjson v1.11.0 // indirect
136 | github.com/tidwall/match v1.1.1 // indirect
137 | github.com/tidwall/pretty v1.2.0 // indirect
138 | github.com/tidwall/sjson v1.0.4 // indirect
139 | github.com/urfave/cli v1.22.5 // indirect
140 | github.com/vmihailenco/msgpack v4.0.4+incompatible // indirect
141 | github.com/yuin/goldmark v1.4.8 // indirect
142 | github.com/yuin/goldmark-highlighting v0.0.0-20220208100518-594be1970594 // indirect
143 | go.etcd.io/bbolt v1.3.6 // indirect
144 | go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 // indirect
145 | go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.29.0 // indirect
146 | go.opentelemetry.io/otel v1.4.0 // indirect
147 | go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.4.0 // indirect
148 | go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.4.0 // indirect
149 | go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.4.0 // indirect
150 | go.opentelemetry.io/otel/internal/metric v0.27.0 // indirect
151 | go.opentelemetry.io/otel/metric v0.27.0 // indirect
152 | go.opentelemetry.io/otel/sdk v1.4.0 // indirect
153 | go.opentelemetry.io/otel/trace v1.4.0 // indirect
154 | go.opentelemetry.io/proto/otlp v0.12.0 // indirect
155 | go.step.sm/cli-utils v0.7.0 // indirect
156 | go.step.sm/crypto v0.16.1 // indirect
157 | go.step.sm/linkedca v0.15.0 // indirect
158 | go.uber.org/atomic v1.9.0 // indirect
159 | go.uber.org/multierr v1.6.0 // indirect
160 | golang.org/x/crypto v0.0.0-20220210151621-f4118a5b28e2 // indirect
161 | golang.org/x/mod v0.5.1 // indirect
162 | golang.org/x/net v0.0.0-20220412020605-290c469a71a5 // indirect
163 | golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect
164 | golang.org/x/sys v0.0.0-20220412211240-33da011f77ad // indirect
165 | golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect
166 | golang.org/x/text v0.3.8-0.20211004125949-5bd84dd9b33b // indirect
167 | golang.org/x/tools v0.1.9 // indirect
168 | golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
169 | google.golang.org/appengine v1.6.7 // indirect
170 | google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf // indirect
171 | google.golang.org/grpc v1.44.0 // indirect
172 | google.golang.org/protobuf v1.28.0 // indirect
173 | gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
174 | gopkg.in/square/go-jose.v2 v2.6.0 // indirect
175 | gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
176 | gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637 // indirect
177 | gopkg.in/yaml.v2 v2.4.0 // indirect
178 | gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
179 | howett.net/plist v1.0.0 // indirect
180 | k8s.io/apimachinery v0.23.5 // indirect
181 | nhooyr.io/websocket v1.8.7 // indirect
182 | )
183 |
--------------------------------------------------------------------------------
/handler.go:
--------------------------------------------------------------------------------
1 | package gbox
2 |
3 | import (
4 | "context"
5 | "encoding/json"
6 | "fmt"
7 | "net/http"
8 |
9 | "github.com/caddyserver/caddy/v2"
10 | "github.com/caddyserver/caddy/v2/modules/caddyhttp"
11 | "github.com/caddyserver/caddy/v2/modules/caddyhttp/reverseproxy"
12 | "github.com/caddyserver/caddy/v2/modules/caddyhttp/rewrite"
13 | "github.com/jensneuse/graphql-go-tools/pkg/ast"
14 | "github.com/jensneuse/graphql-go-tools/pkg/graphql"
15 | "go.uber.org/zap"
16 | )
17 |
18 | const (
19 | errorReporterCtxKey caddy.CtxKey = "gbox_error_wrapper"
20 | nextHandlerCtxKey caddy.CtxKey = "gbox_caddy_handler"
21 | )
22 |
23 | func init() { // nolint:gochecknoinits
24 | caddy.RegisterModule(Handler{})
25 | }
26 |
27 | // Handler implements an HTTP handler as a GraphQL reverse proxy server for caching, securing, and monitoring.
28 | type Handler struct {
29 | // Rewrite
30 | RewriteRaw json.RawMessage `json:"rewrite_raw,omitempty" caddy:"namespace=http.handlers inline_key=rewrite"`
31 |
32 | // Reverse proxy
33 | ReverseProxyRaw json.RawMessage `json:"reverse_proxy,omitempty" caddy:"namespace=http.handlers inline_key=reverse_proxy"`
34 |
35 | // Upstream graphql server url
36 | Upstream string `json:"upstream,omitempty"`
37 |
38 | // Fetch schema interval, disabled by default.
39 | FetchSchemaInterval caddy.Duration `json:"fetch_schema_interval,omitempty"`
40 |
41 | // Fetch schema request timeout, "30s" by default
42 | FetchSchemaTimeout caddy.Duration `json:"fetch_schema_timeout,omitempty"`
43 |
44 | // Fetch schema headers
45 | FetchSchemaHeader http.Header `json:"fetch_schema_headers,omitempty"`
46 |
47 | // Whether to disable introspection request of downstream.
48 | DisabledIntrospection bool `json:"disabled_introspection,omitempty"`
49 |
50 | // Whether to disable playground paths.
51 | DisabledPlaygrounds bool `json:"disabled_playgrounds,omitempty"`
52 |
53 | // Request complexity settings, disabled by default.
54 | Complexity *Complexity `json:"complexity,omitempty"`
55 |
56 | // Caching queries result settings, disabled by default.
57 | Caching *Caching `json:"caching,omitempty"`
58 |
59 | // Cors origins
60 | CORSOrigins []string `json:"cors_origins,omitempty"`
61 |
62 | // Cors allowed headers
63 | CORSAllowedHeaders []string `json:"cors_allowed_headers,omitempty"`
64 |
65 | ReverseProxy *reverseproxy.Handler `json:"-"`
66 | Rewrite *rewrite.Rewrite `json:"-"`
67 | ctxBackground context.Context
68 | ctxBackgroundCancel func()
69 | logger *zap.Logger
70 | schema *graphql.Schema
71 | schemaDocument *ast.Document
72 | router http.Handler
73 | metrics *Metrics
74 | }
75 |
76 | type errorReporter struct {
77 | error
78 | }
79 |
80 | func (h Handler) CaddyModule() caddy.ModuleInfo {
81 | return caddy.ModuleInfo{
82 | ID: "http.handlers.gbox",
83 | New: func() caddy.Module {
84 | mh := new(Handler)
85 | mh.FetchSchemaHeader = make(http.Header)
86 | mh.ctxBackground, mh.ctxBackgroundCancel = context.WithCancel(context.Background())
87 | mh.schema = new(graphql.Schema)
88 |
89 | return mh
90 | },
91 | }
92 | }
93 |
94 | func (h *Handler) Provision(ctx caddy.Context) (err error) {
95 | h.metrics = metrics
96 | h.logger = ctx.Logger(h)
97 | h.initRouter()
98 |
99 | var m interface{}
100 | m, err = ctx.LoadModule(h, "ReverseProxyRaw")
101 |
102 | if err != nil {
103 | return fmt.Errorf("fail to load reverse proxy module: %w", err)
104 | }
105 |
106 | h.ReverseProxy = m.(*reverseproxy.Handler)
107 | m, err = ctx.LoadModule(h, "RewriteRaw")
108 |
109 | if err != nil {
110 | return fmt.Errorf("fail to load rewrite module: %w", err)
111 | }
112 |
113 | h.Rewrite = m.(*rewrite.Rewrite)
114 |
115 | if h.Caching != nil {
116 | if err = h.Caching.Provision(ctx); err != nil {
117 | return err
118 | }
119 |
120 | h.Caching.withLogger(h.logger)
121 | h.Caching.withMetrics(h)
122 | }
123 |
124 | if h.FetchSchemaTimeout == 0 {
125 | timeout, _ := caddy.ParseDuration("30s")
126 | h.FetchSchemaTimeout = caddy.Duration(timeout)
127 | }
128 |
129 | sf := &schemaFetcher{
130 | upstream: h.Upstream,
131 | header: h.FetchSchemaHeader,
132 | timeout: h.FetchSchemaTimeout,
133 | interval: h.FetchSchemaInterval,
134 | logger: h.logger,
135 | context: h.ctxBackground,
136 | onSchemaChanged: h.onSchemaChanged,
137 | caching: h.Caching,
138 | }
139 |
140 | if err = sf.Provision(ctx); err != nil {
141 | h.logger.Error("fail to fetch upstream schema", zap.Error(err))
142 | }
143 |
144 | return err
145 | }
146 |
147 | func (h *Handler) Validate() error {
148 | if h.Caching != nil {
149 | if err := h.Caching.Validate(); err != nil {
150 | return err
151 | }
152 | }
153 |
154 | return nil
155 | }
156 |
157 | func (h *Handler) onSchemaChanged(oldSchemaDocument, newSchemaDocument *ast.Document, oldSchema, newSchema *graphql.Schema) {
158 | h.schema = newSchema
159 | h.schemaDocument = newSchemaDocument
160 |
161 | if h.Caching != nil && oldSchema != nil {
162 | h.logger.Info("schema changed: purge all query result cached of old schema")
163 |
164 | if err := h.Caching.PurgeQueryResultBySchema(h.ctxBackground, oldSchema); err != nil {
165 | h.logger.Error("purge all query result failed", zap.Error(err))
166 | }
167 | }
168 | }
169 |
170 | func (h *Handler) Cleanup() error {
171 | h.ctxBackgroundCancel()
172 |
173 | if h.Caching != nil {
174 | return h.Caching.Cleanup()
175 | }
176 |
177 | return nil
178 | }
179 |
180 | func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request, n caddyhttp.Handler) error {
181 | reporter := new(errorReporter)
182 | ctx := context.WithValue(r.Context(), nextHandlerCtxKey, n)
183 | ctx = context.WithValue(ctx, errorReporterCtxKey, reporter)
184 |
185 | h.router.ServeHTTP(w, r.WithContext(ctx))
186 |
187 | return reporter.error
188 | }
189 |
190 | // Interface guards.
191 | var (
192 | _ caddy.Module = (*Handler)(nil)
193 | _ caddy.Provisioner = (*Handler)(nil)
194 | _ caddy.Validator = (*Handler)(nil)
195 | _ caddy.CleanerUpper = (*Handler)(nil)
196 | _ caddyhttp.MiddlewareHandler = (*Handler)(nil)
197 | )
198 |
--------------------------------------------------------------------------------
/internal/testserver/gqlgen.yml:
--------------------------------------------------------------------------------
1 | # Where are all the schema files located? globs are supported eg src/**/*.graphqls
2 | schema:
3 | - './*.graphqls'
4 |
5 | # Where should the generated server code go?
6 | exec:
7 | filename: generated/generated.go
8 | package: generated
9 |
10 | # Uncomment to enable federation
11 | # federation:
12 | # filename: graph/generated/federation.go
13 | # package: generated
14 |
15 | # Where should any generated models go?
16 | model:
17 | filename: model/models_gen.go
18 | package: model
19 |
20 | # Where should the resolver implementations go?
21 | resolver:
22 | layout: follow-schema
23 | dir: .
24 | package: testserver
25 |
26 | # Optional: turn on use ` + "`" + `gqlgen:"fieldName"` + "`" + ` tags in your models
27 | # struct_tag: json
28 |
29 | # Optional: turn on to use []Thing instead of []*Thing
30 | # omit_slice_element_pointers: false
31 |
32 | # Optional: set to speed up generation time by not performing a final validation pass.
33 | # skip_validation: true
34 |
35 | # gqlgen will search for any type names in the schema in these go packages
36 | # if they match it will use them, otherwise it will generate them.
37 | autobind:
38 | # - "github.com/gbox-proxy/gbox/internal/testserver/model"
39 |
40 | # This section declares type mapping between the GraphQL and go type systems
41 | #
42 | # The first line in each type will be used as defaults for resolver arguments and
43 | # modelgen, the others will be allowed when binding to fields. Configure them to
44 | # your liking
45 | models:
46 | ID:
47 | model:
48 | - github.com/99designs/gqlgen/graphql.Int
49 | Int:
50 | model:
51 | - github.com/99designs/gqlgen/graphql.Int
52 |
--------------------------------------------------------------------------------
/internal/testserver/model/models_gen.go:
--------------------------------------------------------------------------------
1 | // Code generated by github.com/99designs/gqlgen, DO NOT EDIT.
2 |
3 | package model
4 |
5 | type BookTest struct {
6 | ID int `json:"id"`
7 | Title string `json:"title"`
8 | }
9 |
10 | type UserTest struct {
11 | ID int `json:"id"`
12 | Name string `json:"name"`
13 | Books []*BookTest `json:"books"`
14 | }
15 |
--------------------------------------------------------------------------------
/internal/testserver/resolver.go:
--------------------------------------------------------------------------------
1 | package testserver
2 |
3 | //go:generate go run -mod=mod github.com/99designs/gqlgen generate
4 |
5 | // This file will not be regenerated automatically.
6 | //
7 | // It serves as dependency injection for your app, add any dependencies you require here.
8 |
9 | type Resolver struct{}
10 |
--------------------------------------------------------------------------------
/internal/testserver/schema.graphqls:
--------------------------------------------------------------------------------
1 | schema {
2 | query: QueryTest
3 | mutation: MutationTest
4 | }
5 |
6 | type BookTest {
7 | id: ID!
8 | title: String!
9 | }
10 |
11 | type UserTest {
12 | id: ID!
13 | name: String!
14 | books: [BookTest!]!
15 | }
16 |
17 | type QueryTest {
18 | users: [UserTest!]!
19 | books: [BookTest!]!
20 | }
21 |
22 | type MutationTest {
23 | updateUsers: [UserTest!]!
24 | }
--------------------------------------------------------------------------------
/internal/testserver/schema.resolvers.go:
--------------------------------------------------------------------------------
1 | package testserver
2 |
3 | // This file will be automatically regenerated based on the schema, any resolver implementations
4 | // will be copied through when generating and any unknown code will be moved to the end.
5 |
6 | import (
7 | "context"
8 |
9 | "github.com/gbox-proxy/gbox/internal/testserver/generated"
10 | "github.com/gbox-proxy/gbox/internal/testserver/model"
11 | )
12 |
13 | func (r *mutationTestResolver) UpdateUsers(ctx context.Context) ([]*model.UserTest, error) {
14 | return []*model.UserTest{
15 | {
16 | ID: 1,
17 | Name: "A",
18 | Books: []*model.BookTest{
19 | {
20 | ID: 1,
21 | Title: "A - Book 1",
22 | },
23 | {
24 | ID: 2,
25 | Title: "A - Book 2",
26 | },
27 | },
28 | },
29 | {
30 | ID: 2,
31 | Name: "B",
32 | Books: []*model.BookTest{
33 | {
34 | ID: 3,
35 | Title: "B - Book 1",
36 | },
37 | },
38 | },
39 | // Test ID 3 will be missing in purging tags debug header.
40 | }, nil
41 | }
42 |
43 | func (r *queryTestResolver) Users(ctx context.Context) ([]*model.UserTest, error) {
44 | return []*model.UserTest{
45 | {
46 | ID: 1,
47 | Name: "A",
48 | Books: []*model.BookTest{
49 | {
50 | ID: 1,
51 | Title: "A - Book 1",
52 | },
53 | {
54 | ID: 2,
55 | Title: "A - Book 2",
56 | },
57 | },
58 | },
59 | {
60 | ID: 2,
61 | Name: "B",
62 | Books: []*model.BookTest{
63 | {
64 | ID: 3,
65 | Title: "B - Book 1",
66 | },
67 | },
68 | },
69 | {
70 | ID: 3,
71 | Name: "C",
72 | Books: []*model.BookTest{
73 | {
74 | ID: 4,
75 | Title: "C - Book 1",
76 | },
77 | },
78 | },
79 | }, nil
80 | }
81 |
82 | func (r *queryTestResolver) Books(ctx context.Context) ([]*model.BookTest, error) {
83 | return []*model.BookTest{
84 | {
85 | ID: 1,
86 | Title: "A - Book 1",
87 | },
88 | {
89 | ID: 2,
90 | Title: "A - Book 2",
91 | },
92 | {
93 | ID: 3,
94 | Title: "B - Book 1",
95 | },
96 | {
97 | ID: 4,
98 | Title: "C - Book 1",
99 | },
100 | }, nil
101 | }
102 |
103 | // MutationTest returns generated.MutationTestResolver implementation.
104 | func (r *Resolver) MutationTest() generated.MutationTestResolver { return &mutationTestResolver{r} }
105 |
106 | // QueryTest returns generated.QueryTestResolver implementation.
107 | func (r *Resolver) QueryTest() generated.QueryTestResolver { return &queryTestResolver{r} }
108 |
109 | type (
110 | mutationTestResolver struct{ *Resolver }
111 | queryTestResolver struct{ *Resolver }
112 | )
113 |
--------------------------------------------------------------------------------
/metrics.go:
--------------------------------------------------------------------------------
1 | package gbox
2 |
3 | import (
4 | "sync"
5 | "time"
6 |
7 | "github.com/jensneuse/graphql-go-tools/pkg/graphql"
8 | "github.com/prometheus/client_golang/prometheus"
9 | "github.com/prometheus/client_golang/prometheus/promauto"
10 | "go.uber.org/zap"
11 | )
12 |
13 | var metrics = new(Metrics)
14 |
15 | func init() { // nolint:gochecknoinits
16 | metrics.once.Do(func() {
17 | const ns, sub = "caddy", "http_gbox"
18 | operationLabels := []string{"operation_type", "operation_name"}
19 | metrics.operationInFlight = promauto.NewGaugeVec(prometheus.GaugeOpts{
20 | Namespace: ns,
21 | Subsystem: sub,
22 | Name: "operations_in_flight",
23 | Help: "Number of graphql operations currently handled by this server.",
24 | }, operationLabels)
25 |
26 | metrics.operationCount = promauto.NewCounterVec(prometheus.CounterOpts{
27 | Namespace: ns,
28 | Subsystem: sub,
29 | Name: "operation_total",
30 | Help: "Counter of graphql operations served.",
31 | }, operationLabels)
32 |
33 | metrics.operationDuration = promauto.NewHistogramVec(prometheus.HistogramOpts{
34 | Namespace: ns,
35 | Subsystem: sub,
36 | Name: "operation_duration",
37 | Help: "Histogram of GraphQL operations execution duration.",
38 | Buckets: prometheus.DefBuckets,
39 | }, operationLabels)
40 |
41 | cachingLabels := []string{"operation_name", "status"}
42 | metrics.cachingCount = promauto.NewCounterVec(prometheus.CounterOpts{
43 | Namespace: ns,
44 | Subsystem: sub,
45 | Name: "caching_total",
46 | Help: "Counter of graphql query operations caching statues.",
47 | }, cachingLabels)
48 | })
49 | }
50 |
51 | type Metrics struct {
52 | once sync.Once
53 | operationInFlight *prometheus.GaugeVec
54 | operationCount *prometheus.CounterVec
55 | operationDuration *prometheus.HistogramVec
56 | cachingCount *prometheus.CounterVec
57 | }
58 |
59 | type cachingMetrics interface {
60 | addMetricsCaching(*graphql.Request, CachingStatus)
61 | }
62 |
63 | func (h *Handler) addMetricsBeginRequest(request *graphql.Request) {
64 | labels, err := h.metricsOperationLabels(request)
65 | if err != nil {
66 | h.logger.Warn("fail to get metrics operation labels", zap.Error(err))
67 |
68 | return
69 | }
70 |
71 | h.metrics.operationCount.With(labels).Inc()
72 | h.metrics.operationInFlight.With(labels).Inc()
73 | }
74 |
75 | func (h *Handler) addMetricsEndRequest(request *graphql.Request, d time.Duration) {
76 | labels, err := h.metricsOperationLabels(request)
77 | if err != nil {
78 | h.logger.Warn("fail to get metrics operation labels", zap.Error(err))
79 |
80 | return
81 | }
82 |
83 | h.metrics.operationInFlight.With(labels).Dec()
84 | h.metrics.operationDuration.With(labels).Observe(d.Seconds())
85 | }
86 |
87 | func (h *Handler) addMetricsCaching(request *graphql.Request, status CachingStatus) {
88 | labels, err := h.metricsCachingLabels(request, status)
89 | if err != nil {
90 | h.logger.Warn("fail to get metrics caching labels", zap.Error(err))
91 |
92 | return
93 | }
94 |
95 | h.metrics.cachingCount.With(labels).Inc()
96 | }
97 |
98 | func (h *Handler) metricsCachingLabels(request *graphql.Request, status CachingStatus) (map[string]string, error) {
99 | if !request.IsNormalized() {
100 | if result, _ := request.Normalize(h.schema); !result.Successful {
101 | return nil, result.Errors
102 | }
103 | }
104 |
105 | labels := map[string]string{
106 | "operation_name": request.OperationName,
107 | "status": string(status),
108 | }
109 |
110 | return labels, nil
111 | }
112 |
113 | func (h *Handler) metricsOperationLabels(request *graphql.Request) (map[string]string, error) {
114 | if !request.IsNormalized() {
115 | if result, _ := request.Normalize(h.schema); !result.Successful {
116 | return nil, result.Errors
117 | }
118 | }
119 |
120 | labels := map[string]string{
121 | "operation_name": request.OperationName,
122 | }
123 |
124 | operationType, _ := request.OperationType()
125 |
126 | // nolint:exhaustive
127 | switch operationType {
128 | case graphql.OperationTypeQuery:
129 | labels["operation_type"] = "query"
130 | case graphql.OperationTypeMutation:
131 | labels["operation_type"] = "mutation"
132 | case graphql.OperationTypeSubscription:
133 | labels["operation_type"] = "subscription"
134 | default:
135 | labels["operation_type"] = "unknown"
136 | }
137 |
138 | return labels, nil
139 | }
140 |
--------------------------------------------------------------------------------
/router.go:
--------------------------------------------------------------------------------
1 | package gbox
2 |
3 | import (
4 | "bytes"
5 | "errors"
6 | "io/ioutil"
7 | "net/http"
8 | "time"
9 |
10 | "github.com/99designs/gqlgen/graphql/handler"
11 | "github.com/99designs/gqlgen/graphql/playground"
12 | "github.com/caddyserver/caddy/v2/modules/caddyhttp"
13 | "github.com/gbox-proxy/gbox/admin"
14 | "github.com/gbox-proxy/gbox/admin/generated"
15 | "github.com/gorilla/handlers"
16 | "github.com/gorilla/mux"
17 | "github.com/jensneuse/graphql-go-tools/pkg/graphql"
18 | "go.uber.org/zap"
19 | )
20 |
21 | const (
22 | adminPlaygroundPath = "/admin"
23 | adminGraphQLPath = "/admin/graphql"
24 | playgroundPath = "/"
25 | graphQLPath = "/graphql"
26 | )
27 |
28 | var ErrNotAllowIntrospectionQuery = errors.New("introspection query is not allowed")
29 |
30 | func (h *Handler) initRouter() {
31 | router := mux.NewRouter()
32 | router.Path(graphQLPath).HeadersRegexp(
33 | "content-type", "application/json*",
34 | ).Methods("POST").HandlerFunc(h.GraphQLHandle)
35 | router.Path(graphQLPath).HeadersRegexp(
36 | "upgrade", "^websocket$",
37 | "sec-websocket-protocol", "^graphql-(transport-)?ws$",
38 | ).Methods("GET").HandlerFunc(h.GraphQLOverWebsocketHandle)
39 |
40 | if h.Caching != nil {
41 | router.Path(adminGraphQLPath).HeadersRegexp(
42 | "content-type", "application/json*",
43 | ).Methods("POST").HandlerFunc(h.AdminGraphQLHandle)
44 | }
45 |
46 | if !h.DisabledPlaygrounds {
47 | ph := playground.Handler("GraphQL playground", graphQLPath)
48 | router.Path(playgroundPath).Methods("GET").Handler(ph)
49 |
50 | if h.Caching != nil {
51 | phAdmin := playground.Handler("Admin GraphQL playground", adminGraphQLPath)
52 | router.Path(adminPlaygroundPath).Methods("GET").Handler(phAdmin)
53 | }
54 | }
55 |
56 | if len(h.CORSOrigins) == 0 {
57 | h.router = router
58 |
59 | return
60 | }
61 |
62 | h.router = handlers.CORS(
63 | handlers.AllowCredentials(),
64 | handlers.AllowedOrigins(h.CORSOrigins),
65 | handlers.AllowedHeaders(h.CORSAllowedHeaders),
66 | )(router)
67 | }
68 |
69 | // GraphQLOverWebsocketHandle handling websocket connection between client & upstream.
70 | func (h *Handler) GraphQLOverWebsocketHandle(w http.ResponseWriter, r *http.Request) {
71 | reporter := r.Context().Value(errorReporterCtxKey).(*errorReporter)
72 |
73 | if err := h.rewriteHandle(w, r); err != nil {
74 | reporter.error = err
75 |
76 | return
77 | }
78 |
79 | n := r.Context().Value(nextHandlerCtxKey).(caddyhttp.Handler)
80 | wsr := newWebsocketResponseWriter(w, h)
81 | reporter.error = h.ReverseProxy.ServeHTTP(wsr, r, n)
82 | }
83 |
84 | // GraphQLHandle ensure GraphQL request is safe before forwarding to upstream and caching query result of it.
85 | func (h *Handler) GraphQLHandle(w http.ResponseWriter, r *http.Request) {
86 | reporter := r.Context().Value(errorReporterCtxKey).(*errorReporter)
87 |
88 | if err := h.rewriteHandle(w, r); err != nil {
89 | reporter.error = err
90 |
91 | return
92 | }
93 |
94 | gqlRequest, err := h.unmarshalHTTPRequest(r)
95 | if err != nil {
96 | h.logger.Debug("can not unmarshal graphql request from http request", zap.Error(err))
97 | reporter.error = writeResponseErrors(err, w)
98 |
99 | return
100 | }
101 |
102 | if err = h.validateGraphqlRequest(gqlRequest); err != nil {
103 | reporter.error = writeResponseErrors(err, w)
104 |
105 | return
106 | }
107 |
108 | h.addMetricsBeginRequest(gqlRequest)
109 | defer func(startedAt time.Time) {
110 | h.addMetricsEndRequest(gqlRequest, time.Since(startedAt))
111 | }(time.Now())
112 |
113 | n := r.Context().Value(nextHandlerCtxKey).(caddyhttp.Handler)
114 |
115 | if h.Caching != nil {
116 | cachingRequest := newCachingRequest(r, h.schemaDocument, h.schema, gqlRequest)
117 | reverse := caddyhttp.HandlerFunc(func(w http.ResponseWriter, r *http.Request) error {
118 | return h.ReverseProxy.ServeHTTP(w, r, n)
119 | })
120 |
121 | if err = h.Caching.HandleRequest(w, cachingRequest, reverse); err != nil {
122 | reporter.error = writeResponseErrors(err, w)
123 |
124 | return
125 | }
126 |
127 | return
128 | }
129 |
130 | reporter.error = h.ReverseProxy.ServeHTTP(w, r, n)
131 | }
132 |
133 | func (h *Handler) unmarshalHTTPRequest(r *http.Request) (*graphql.Request, error) {
134 | gqlRequest := new(graphql.Request)
135 | rawBody, _ := ioutil.ReadAll(r.Body)
136 | r.Body = ioutil.NopCloser(bytes.NewBuffer(rawBody))
137 | copyHTTPRequest, err := http.NewRequest(r.Method, r.URL.String(), ioutil.NopCloser(bytes.NewBuffer(rawBody))) // nolint:noctx
138 | if err != nil {
139 | return nil, err
140 | }
141 |
142 | if err = graphql.UnmarshalHttpRequest(copyHTTPRequest, gqlRequest); err != nil {
143 | return nil, err
144 | }
145 |
146 | if err = normalizeGraphqlRequest(h.schema, gqlRequest); err != nil {
147 | return nil, err
148 | }
149 |
150 | return gqlRequest, nil
151 | }
152 |
153 | func (h *Handler) validateGraphqlRequest(r *graphql.Request) error {
154 | isIntrospectQuery, _ := r.IsIntrospectionQuery()
155 |
156 | if isIntrospectQuery && h.DisabledIntrospection {
157 | return ErrNotAllowIntrospectionQuery
158 | }
159 |
160 | if h.Complexity != nil {
161 | requestErrors := h.Complexity.validateRequest(h.schema, r)
162 |
163 | if requestErrors.Count() > 0 {
164 | return requestErrors
165 | }
166 | }
167 |
168 | return nil
169 | }
170 |
171 | // AdminGraphQLHandle purging query result cached and describe cache key.
172 | func (h *Handler) AdminGraphQLHandle(w http.ResponseWriter, r *http.Request) {
173 | resolver := admin.NewResolver(h.schema, h.schemaDocument, h.logger, h.Caching)
174 | gqlGen := handler.NewDefaultServer(generated.NewExecutableSchema(generated.Config{Resolvers: resolver}))
175 |
176 | gqlGen.ServeHTTP(w, r)
177 | }
178 |
179 | func (h *Handler) rewriteHandle(w http.ResponseWriter, r *http.Request) error {
180 | n := caddyhttp.HandlerFunc(func(http.ResponseWriter, *http.Request) error {
181 | return nil // trick for skip passing cachingRequest to next handle
182 | })
183 |
184 | return h.Rewrite.ServeHTTP(w, r, n)
185 | }
186 |
--------------------------------------------------------------------------------
/schema_fetcher.go:
--------------------------------------------------------------------------------
1 | package gbox
2 |
3 | import (
4 | "bytes"
5 | "context"
6 | "encoding/json"
7 | "fmt"
8 | "io/ioutil"
9 | "net/http"
10 | "time"
11 |
12 | "github.com/caddyserver/caddy/v2"
13 | "github.com/jensneuse/graphql-go-tools/pkg/ast"
14 | "github.com/jensneuse/graphql-go-tools/pkg/astparser"
15 | "github.com/jensneuse/graphql-go-tools/pkg/astprinter"
16 | "github.com/jensneuse/graphql-go-tools/pkg/graphql"
17 | "github.com/jensneuse/graphql-go-tools/pkg/introspection"
18 | "go.uber.org/zap"
19 | )
20 |
21 | const (
22 | schemaIntrospectionCacheKey = "gbox_schema_introspection"
23 | )
24 |
25 | type schemaChangedHandler func(oldDocument, newDocument *ast.Document, oldSchema, newSchema *graphql.Schema)
26 |
27 | // schemaFetcher help to fetch SDL of upstream.
28 | type schemaFetcher struct {
29 | // Upstream url
30 | upstream string
31 | header http.Header
32 | interval caddy.Duration
33 | timeout caddy.Duration
34 |
35 | caching *Caching
36 | context context.Context
37 | logger *zap.Logger
38 | schema *graphql.Schema
39 | schemaDocument *ast.Document
40 | onSchemaChanged schemaChangedHandler
41 | }
42 |
43 | func (s *schemaFetcher) Provision(ctx caddy.Context) (err error) {
44 | introspectionData, _ := s.getCachingIntrospectionData()
45 |
46 | if introspectionData != nil {
47 | if err = s.fetchByIntrospectionData(introspectionData); err != nil {
48 | return err
49 | }
50 | }
51 |
52 | if err = s.fetch(); err != nil {
53 | return err
54 | }
55 |
56 | if s.interval == 0 {
57 | s.logger.Info("fetch schema interval disabled")
58 |
59 | return nil
60 | }
61 |
62 | go s.startInterval()
63 |
64 | return nil
65 | }
66 |
67 | func (s *schemaFetcher) startInterval() {
68 | interval := time.NewTicker(time.Duration(s.interval))
69 |
70 | defer interval.Stop()
71 |
72 | for {
73 | select {
74 | case <-s.context.Done():
75 | s.logger.Info("fetch schema interval context cancelled")
76 |
77 | return
78 | case <-interval.C:
79 | if err := s.fetch(); err != nil {
80 | s.logger.Error("interval fetch schema fail", zap.Error(err))
81 | }
82 | }
83 | }
84 | }
85 |
86 | func (s *schemaFetcher) fetch() error {
87 | data, err := s.introspect()
88 | if err != nil {
89 | return err
90 | }
91 |
92 | return s.fetchByIntrospectionData(data)
93 | }
94 |
95 | func (s *schemaFetcher) fetchByIntrospectionData(data *introspection.Data) (err error) {
96 | var newSchema *graphql.Schema
97 | var document *ast.Document
98 | dataJSON, _ := json.Marshal(data) // nolint:errchkjson
99 | converter := &introspection.JsonConverter{}
100 |
101 | if document, err = converter.GraphQLDocument(bytes.NewBuffer(dataJSON)); err != nil {
102 | return err
103 | }
104 |
105 | documentOutWriter := bufferPool.Get().(*bytes.Buffer)
106 | defer bufferPool.Put(documentOutWriter)
107 | documentOutWriter.Reset()
108 |
109 | if err = astprinter.Print(document, nil, documentOutWriter); err != nil {
110 | return err
111 | }
112 |
113 | if newSchema, err = graphql.NewSchemaFromReader(documentOutWriter); err != nil {
114 | return err
115 | }
116 |
117 | normalizationResult, _ := newSchema.Normalize()
118 |
119 | if !normalizationResult.Successful {
120 | return normalizationResult.Errors
121 | }
122 |
123 | s.schemaChanged(newSchema)
124 |
125 | return nil
126 | }
127 |
128 | func (s *schemaFetcher) introspect() (data *introspection.Data, err error) {
129 | client := &http.Client{
130 | Timeout: time.Duration(s.timeout),
131 | }
132 | requestBody, _ := json.Marshal(s.newIntrospectRequest()) // nolint:errchkjson
133 | request, _ := http.NewRequestWithContext(s.context, "POST", s.upstream, bytes.NewBuffer(requestBody))
134 | request.Header = s.header.Clone()
135 | request.Header.Set("user-agent", "GBox Proxy")
136 | request.Header.Set("content-type", "application/json")
137 |
138 | var response *http.Response
139 |
140 | response, err = client.Do(request)
141 |
142 | if err != nil {
143 | return nil, err
144 | }
145 |
146 | defer response.Body.Close()
147 | rawResponseBody, _ := ioutil.ReadAll(response.Body)
148 |
149 | var responseBody struct {
150 | Data *introspection.Data `json:"data"`
151 | }
152 |
153 | if err = json.Unmarshal(rawResponseBody, &responseBody); err != nil {
154 | return nil, err
155 | }
156 |
157 | if responseBody.Data == nil {
158 | return nil, fmt.Errorf("introspection response not have data field")
159 | }
160 |
161 | if err = s.cachingIntrospectionData(responseBody.Data); err != nil {
162 | return nil, err
163 | }
164 |
165 | return responseBody.Data, nil
166 | }
167 |
168 | func (s *schemaFetcher) getCachingIntrospectionData() (*introspection.Data, error) {
169 | if s.caching == nil {
170 | return nil, nil // nolint:nilnil
171 | }
172 |
173 | data := new(introspection.Data)
174 |
175 | if _, err := s.caching.store.Get(s.context, schemaIntrospectionCacheKey, data); err != nil {
176 | return nil, err
177 | }
178 |
179 | return data, nil
180 | }
181 |
182 | func (s *schemaFetcher) cachingIntrospectionData(data *introspection.Data) error {
183 | if s.caching == nil {
184 | return nil
185 | }
186 |
187 | return s.caching.store.Set(s.context, schemaIntrospectionCacheKey, data, nil)
188 | }
189 |
190 | func (s *schemaFetcher) schemaChanged(changedSchema *graphql.Schema) {
191 | var changedDocument *ast.Document
192 |
193 | defer func() {
194 | s.schema = changedSchema
195 | s.schemaDocument = changedDocument
196 | }()
197 |
198 | if s.onSchemaChanged == nil {
199 | return
200 | }
201 |
202 | document, _ := astparser.ParseGraphqlDocumentBytes(changedSchema.Document())
203 | changedDocument = &document
204 |
205 | if s.schema == nil {
206 | s.onSchemaChanged(nil, changedDocument, nil, changedSchema)
207 |
208 | return
209 | }
210 |
211 | oldHash, _ := s.schema.Hash() // nolint:ifshort
212 | newHash, _ := changedSchema.Hash()
213 |
214 | if oldHash != newHash {
215 | s.onSchemaChanged(s.schemaDocument, changedDocument, s.schema, changedSchema)
216 | }
217 | }
218 |
219 | func (*schemaFetcher) newIntrospectRequest() *graphql.Request {
220 | return &graphql.Request{
221 | OperationName: "IntrospectionQuery",
222 | Query: `
223 | query IntrospectionQuery {
224 | __schema {
225 | queryType { name }
226 | mutationType { name }
227 | subscriptionType { name }
228 | types {
229 | ...FullType
230 | }
231 | directives {
232 | name
233 | args {
234 | ...InputValue
235 | }
236 | locations
237 | }
238 | }
239 | }
240 |
241 | fragment FullType on __Type {
242 | kind
243 | name
244 | fields(includeDeprecated: true) {
245 | name
246 | args {
247 | ...InputValue
248 | }
249 | type {
250 | ...TypeRef
251 | }
252 | isDeprecated
253 | deprecationReason
254 | }
255 | inputFields {
256 | ...InputValue
257 | }
258 | interfaces {
259 | ...TypeRef
260 | }
261 | enumValues(includeDeprecated: true) {
262 | name
263 | isDeprecated
264 | deprecationReason
265 | }
266 | possibleTypes {
267 | ...TypeRef
268 | }
269 | }
270 |
271 | fragment InputValue on __InputValue {
272 | name
273 | type { ...TypeRef }
274 | }
275 |
276 | fragment TypeRef on __Type {
277 | kind
278 | name
279 | ofType {
280 | kind
281 | name
282 | ofType {
283 | kind
284 | name
285 | ofType {
286 | kind
287 | name
288 | ofType {
289 | kind
290 | name
291 | ofType {
292 | kind
293 | name
294 | ofType {
295 | kind
296 | name
297 | ofType {
298 | kind
299 | name
300 | }
301 | }
302 | }
303 | }
304 | }
305 | }
306 | }
307 | }
308 | `,
309 | }
310 | }
311 |
--------------------------------------------------------------------------------
/schema_fetcher_test.go:
--------------------------------------------------------------------------------
1 | package gbox
2 |
3 | import (
4 | "context"
5 | "net/http"
6 | "testing"
7 | "time"
8 |
9 | "github.com/99designs/gqlgen/graphql/handler"
10 | "github.com/caddyserver/caddy/v2"
11 | "github.com/gbox-proxy/gbox/internal/testserver"
12 | "github.com/gbox-proxy/gbox/internal/testserver/generated"
13 | "github.com/jensneuse/graphql-go-tools/pkg/ast"
14 | "github.com/jensneuse/graphql-go-tools/pkg/graphql"
15 | "github.com/stretchr/testify/suite"
16 | "go.uber.org/zap"
17 | )
18 |
19 | type SchemaFetcherTestSuite struct {
20 | suite.Suite
21 | }
22 |
23 | func (s *SchemaFetcherTestSuite) TestInterval() {
24 | ctx, cancel := context.WithCancel(context.Background())
25 | sh := schemaChangedHandler(func(oldDocument, newDocument *ast.Document, oldSchema, newSchema *graphql.Schema) {
26 | s.Require().NotNil(newDocument)
27 | s.Require().NotNil(newSchema)
28 | cancel()
29 | })
30 | f := &schemaFetcher{
31 | upstream: "http://localhost:9091",
32 | interval: caddy.Duration(time.Millisecond),
33 | onSchemaChanged: sh,
34 | context: ctx,
35 | header: make(http.Header),
36 | logger: zap.NewNop(),
37 | }
38 |
39 | f.startInterval()
40 | }
41 |
42 | func (s *SchemaFetcherTestSuite) TestProvision() {
43 | c := &Caching{}
44 | s.Require().NoError(c.Provision(caddy.Context{}))
45 |
46 | testCases := map[string]struct {
47 | upstream string
48 | expectedErrorMsg string
49 | caching *Caching
50 | }{
51 | "without_caching": {
52 | upstream: "http://localhost:9091",
53 | },
54 | "with_caching": {
55 | upstream: "http://localhost:9091",
56 | caching: c,
57 | },
58 | "invalid_upstream": {
59 | upstream: "http://localhost:9092",
60 | expectedErrorMsg: "connection refused",
61 | },
62 | }
63 |
64 | for name, testCase := range testCases {
65 | var called bool
66 | sh := schemaChangedHandler(func(oldDocument, newDocument *ast.Document, oldSchema, newSchema *graphql.Schema) {
67 | s.Require().NotNilf(newDocument, "case %s: new document should not be nil", name)
68 | s.Require().NotNil(newSchema, "case %s: new schema should not be nil", name)
69 | called = true
70 | })
71 | ctx, cancel := context.WithCancel(context.Background())
72 | f := &schemaFetcher{
73 | context: ctx,
74 | upstream: testCase.upstream,
75 | timeout: caddy.Duration(time.Millisecond * 50),
76 | onSchemaChanged: sh,
77 | header: make(http.Header),
78 | caching: c,
79 | logger: zap.NewNop(),
80 | }
81 |
82 | e := f.Provision(caddy.Context{})
83 |
84 | if testCase.expectedErrorMsg != "" {
85 | s.Require().Errorf(e, "case %s: should error", name)
86 | s.Require().Containsf(e.Error(), testCase.expectedErrorMsg, "case %s: unexpected error message", name)
87 | cancel()
88 |
89 | return
90 | }
91 |
92 | s.Require().NoErrorf(e, "case %s: should not error", name)
93 | s.Require().Truef(called, "case %s: should be call schema change handler", name)
94 | cancel()
95 | }
96 | }
97 |
98 | func TestSchemaFetcher(t *testing.T) {
99 | h := handler.NewDefaultServer(generated.NewExecutableSchema(generated.Config{Resolvers: &testserver.Resolver{}}))
100 | s := &http.Server{
101 | Addr: "localhost:9091",
102 | Handler: h,
103 | }
104 | defer s.Shutdown(context.Background())
105 |
106 | go func() {
107 | s.ListenAndServe()
108 | }()
109 |
110 | <-time.After(time.Millisecond * 10)
111 |
112 | suite.Run(t, new(SchemaFetcherTestSuite))
113 | }
114 |
--------------------------------------------------------------------------------
/utils.go:
--------------------------------------------------------------------------------
1 | package gbox
2 |
3 | import (
4 | "bytes"
5 | "net/http"
6 | "strings"
7 | "sync"
8 |
9 | "github.com/jensneuse/graphql-go-tools/pkg/astparser"
10 | "github.com/jensneuse/graphql-go-tools/pkg/graphql"
11 | "github.com/jensneuse/graphql-go-tools/pkg/operationreport"
12 | )
13 |
14 | var bufferPool = sync.Pool{
15 | New: func() interface{} {
16 | return new(bytes.Buffer)
17 | },
18 | }
19 |
20 | func writeResponseErrors(errors error, w http.ResponseWriter) error {
21 | gqlErrors := graphql.RequestErrorsFromError(errors)
22 | w.Header().Set("Content-Type", "application/json")
23 |
24 | if _, err := gqlErrors.WriteResponse(w); err != nil {
25 | return err
26 | }
27 |
28 | return nil
29 | }
30 |
31 | func normalizeGraphqlRequest(schema *graphql.Schema, gqlRequest *graphql.Request) error {
32 | if result, _ := gqlRequest.Normalize(schema); !result.Successful {
33 | return result.Errors
34 | }
35 |
36 | operation, _ := astparser.ParseGraphqlDocumentString(gqlRequest.Query)
37 | numOfOperations := operation.NumOfOperationDefinitions()
38 | operationName := strings.TrimSpace(gqlRequest.OperationName)
39 | report := &operationreport.Report{}
40 |
41 | if operationName == "" && numOfOperations > 1 {
42 | report.AddExternalError(operationreport.ErrRequiredOperationNameIsMissing())
43 |
44 | return report
45 | }
46 |
47 | if operationName == "" && numOfOperations == 1 {
48 | operationName = operation.OperationDefinitionNameString(0)
49 | }
50 |
51 | if !operation.OperationNameExists(operationName) {
52 | report.AddExternalError(operationreport.ErrOperationWithProvidedOperationNameNotFound(operationName))
53 |
54 | return report
55 | }
56 |
57 | gqlRequest.OperationName = operationName
58 |
59 | return nil
60 | }
61 |
--------------------------------------------------------------------------------
/ws.go:
--------------------------------------------------------------------------------
1 | package gbox
2 |
3 | import (
4 | "bufio"
5 | "bytes"
6 | "encoding/json"
7 | "io"
8 | "net"
9 | "net/http"
10 | "time"
11 |
12 | "github.com/caddyserver/caddy/v2/modules/caddyhttp"
13 | "github.com/gobwas/ws/wsutil"
14 | "github.com/jensneuse/graphql-go-tools/pkg/graphql"
15 | )
16 |
17 | type wsSubscriber interface {
18 | onWsSubscribe(*graphql.Request) error
19 | onWsClose(*graphql.Request, time.Duration)
20 | }
21 |
22 | func (h *Handler) onWsSubscribe(r *graphql.Request) (err error) {
23 | if err = normalizeGraphqlRequest(h.schema, r); err != nil {
24 | return err
25 | }
26 |
27 | if err = h.validateGraphqlRequest(r); err != nil {
28 | return err
29 | }
30 |
31 | h.addMetricsBeginRequest(r)
32 |
33 | return nil
34 | }
35 |
36 | func (h *Handler) onWsClose(r *graphql.Request, d time.Duration) {
37 | h.addMetricsEndRequest(r, d)
38 | }
39 |
40 | type wsResponseWriter struct {
41 | *caddyhttp.ResponseWriterWrapper
42 | subscriber wsSubscriber
43 | }
44 |
45 | func newWebsocketResponseWriter(w http.ResponseWriter, s wsSubscriber) *wsResponseWriter {
46 | return &wsResponseWriter{
47 | ResponseWriterWrapper: &caddyhttp.ResponseWriterWrapper{
48 | ResponseWriter: w,
49 | },
50 | subscriber: s,
51 | }
52 | }
53 |
54 | // Hijack connection for validating, and collecting metrics.
55 | func (r *wsResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
56 | c, w, e := r.ResponseWriterWrapper.Hijack()
57 |
58 | if c != nil {
59 | c = &wsConn{
60 | Conn: c,
61 | wsSubscriber: r.subscriber,
62 | }
63 | }
64 |
65 | return c, w, e
66 | }
67 |
68 | type wsConn struct {
69 | net.Conn
70 | wsSubscriber
71 | request *graphql.Request
72 | subscribeAt time.Time
73 | }
74 |
75 | type wsMessage struct {
76 | ID interface{} `json:"id"`
77 | Type string `json:"type"`
78 | Payload json.RawMessage `json:"payload,omitempty"`
79 | }
80 |
81 | func (c *wsConn) Read(b []byte) (n int, err error) {
82 | n, err = c.Conn.Read(b)
83 |
84 | if c.request != nil && err != nil {
85 | c.onWsClose(c.request, time.Since(c.subscribeAt))
86 | c.request = nil
87 | }
88 |
89 | if c.request != nil || err != nil {
90 | return n, err
91 | }
92 |
93 | buff := bufferPool.Get().(*bytes.Buffer)
94 | defer bufferPool.Put(buff)
95 | buff.Reset()
96 | buff.Write(b[:n])
97 |
98 | for {
99 | msg := new(wsMessage)
100 | request := new(graphql.Request)
101 | data, _, e := wsutil.ReadClientData(buff)
102 |
103 | if e != nil {
104 | return n, err
105 | }
106 |
107 | if e = json.Unmarshal(data, msg); e != nil {
108 | continue
109 | }
110 |
111 | if msg.Type != "subscribe" && msg.Type != "start" {
112 | continue
113 | }
114 |
115 | if e = json.Unmarshal(msg.Payload, request); e != nil {
116 | continue
117 | }
118 |
119 | if e = c.onWsSubscribe(request); e != nil {
120 | c.writeErrorMessage(msg.ID, e)
121 | c.writeCompleteMessage(msg.ID)
122 |
123 | return n, io.EOF
124 | }
125 |
126 | c.request = request
127 | c.subscribeAt = time.Now()
128 |
129 | return n, err
130 | }
131 | }
132 |
133 | func (c *wsConn) writeErrorMessage(id interface{}, errMsg error) error {
134 | errMsgRaw, errMsgErr := json.Marshal(graphql.RequestErrorsFromError(errMsg))
135 |
136 | if errMsgErr != nil {
137 | return errMsgErr
138 | }
139 |
140 | msg := &wsMessage{
141 | ID: id,
142 | Type: "error",
143 | Payload: json.RawMessage(errMsgRaw),
144 | }
145 |
146 | payload, err := json.Marshal(msg)
147 | if err != nil {
148 | return err
149 | }
150 |
151 | return wsutil.WriteServerText(c, payload)
152 | }
153 |
154 | func (c *wsConn) writeCompleteMessage(id interface{}) error {
155 | msg := &wsMessage{
156 | ID: id,
157 | Type: "complete",
158 | }
159 | payload, err := json.Marshal(msg)
160 | if err != nil {
161 | return err
162 | }
163 |
164 | return wsutil.WriteServerText(c, payload)
165 | }
166 |
--------------------------------------------------------------------------------
/ws_test.go:
--------------------------------------------------------------------------------
1 | package gbox
2 |
3 | import (
4 | "bufio"
5 | "bytes"
6 | "encoding/json"
7 | "errors"
8 | "io"
9 | "net"
10 | "net/http"
11 | "testing"
12 | "time"
13 |
14 | "github.com/gobwas/ws/wsutil"
15 | "github.com/jensneuse/graphql-go-tools/pkg/graphql"
16 | "github.com/stretchr/testify/require"
17 | )
18 |
19 | type testWsSubscriber struct {
20 | t *testing.T
21 | r *graphql.Request
22 | d time.Duration
23 | e error
24 | }
25 |
26 | func (t *testWsSubscriber) onWsSubscribe(request *graphql.Request) error {
27 | t.r = request
28 |
29 | return t.e
30 | }
31 |
32 | func (t *testWsSubscriber) onWsClose(request *graphql.Request, duration time.Duration) {
33 | require.Equal(t.t, t.r, request)
34 | t.d = duration
35 | }
36 |
37 | type testWsResponseWriter struct {
38 | http.ResponseWriter
39 | wsConnBuff *bytes.Buffer
40 | }
41 |
42 | type testWsConn struct {
43 | net.Conn
44 | buffer *bytes.Buffer
45 | }
46 |
47 | func (c *testWsConn) Read(b []byte) (n int, err error) {
48 | if b == nil {
49 | return 0, errors.New("end")
50 | }
51 |
52 | return len(b), nil
53 | }
54 |
55 | func (c *testWsConn) Write(b []byte) (n int, err error) {
56 | return c.buffer.Write(b)
57 | }
58 |
59 | func (t *testWsResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
60 | return &testWsConn{
61 | buffer: t.wsConnBuff,
62 | }, nil, nil
63 | }
64 |
65 | func newTestWsSubscriber(t *testing.T, err error) *testWsSubscriber {
66 | t.Helper()
67 |
68 | return &testWsSubscriber{
69 | t: t,
70 | e: err,
71 | }
72 | }
73 |
74 | func TestWsMetricsConn(t *testing.T) {
75 | s := newTestWsSubscriber(t, nil)
76 | w := newWebsocketResponseWriter(&testWsResponseWriter{wsConnBuff: new(bytes.Buffer)}, s)
77 | conn, _, _ := w.Hijack()
78 | buff := new(bytes.Buffer)
79 | wsutil.WriteClientText(buff, []byte(`{"type": "start", "payload":{"query": "subscription { users { id } }"}}`))
80 |
81 | n, err := conn.Read(buff.Bytes()) // subscribe
82 |
83 | require.NoError(t, err)
84 | require.Greater(t, n, 0)
85 | require.NotNil(t, s.r)
86 | require.Equal(t, s.d, time.Duration(0))
87 |
88 | conn.Read(nil) // end
89 | require.Greater(t, s.d, time.Duration(0))
90 | }
91 |
92 | func TestWsMetricsConnBadCases(t *testing.T) {
93 | testCases := map[string]struct {
94 | message string
95 | err error
96 | }{
97 | "invalid_json": {
98 | message: `invalid`,
99 | },
100 | "invalid_struct": {
101 | message: `{}`,
102 | },
103 | "invalid_message_payload": {
104 | message: `{"type": "start", "payload": "invalid"}`,
105 | },
106 | "invalid_ws_message": {},
107 | "invalid_query": {
108 | message: `{"type": "start", "payload": {"query": "query { user { id } }"}}`,
109 | err: errors.New("test"),
110 | },
111 | }
112 |
113 | for name, testCase := range testCases {
114 | wsConnBuff := new(bytes.Buffer)
115 | s := newTestWsSubscriber(t, testCase.err)
116 | w := newWebsocketResponseWriter(&testWsResponseWriter{wsConnBuff: wsConnBuff}, s)
117 | conn, _, _ := w.Hijack()
118 | buff := new(bytes.Buffer)
119 |
120 | if testCase.message != "invalid_ws_message" {
121 | wsutil.WriteClientText(buff, []byte(testCase.message))
122 | } else {
123 | buff.Write([]byte(name))
124 | }
125 |
126 | n, err := conn.Read(buff.Bytes())
127 |
128 | require.Greaterf(t, n, 0, "case %s: read bytes should greater than 0", name)
129 | require.Equalf(t, s.d, time.Duration(0), "case %s: duration should be 0", name)
130 |
131 | if s.e == nil {
132 | require.Nilf(t, s.r, "case %s: request should be nil", name)
133 | require.Nilf(t, err, "case %s: err should be nil", name)
134 | } else {
135 | require.Equalf(t, io.EOF, err, "case %s: should be EOF", name)
136 | require.NotNilf(t, s.r, "case %s: request should not be nil", name)
137 | data, _ := wsutil.ReadServerText(wsConnBuff)
138 | msg := &wsMessage{}
139 | json.Unmarshal(data, msg)
140 |
141 | require.Equalf(t, "error", msg.Type, "case %s: unexpected error type", name)
142 |
143 | data, _ = wsutil.ReadServerText(wsConnBuff)
144 | msg = &wsMessage{}
145 | json.Unmarshal(data, msg)
146 |
147 | require.Equalf(t, "complete", msg.Type, "case %s: msg type should be complete, but got %s", name, msg.Type)
148 | }
149 | }
150 | }
151 |
--------------------------------------------------------------------------------