├── .circleci
└── config.yml
├── .github
└── workflows
│ ├── main-router.yml
│ ├── main-serverless.yml
│ ├── main.yml
│ ├── managed.yml
│ ├── rebase.yml
│ ├── release.yml
│ ├── subgraph-check.yml
│ └── subgraph-deploy-publish.yml
├── .gitignore
├── .scripts
├── build-matrix.sh
├── bump-image-versions.sh
├── bump-package-versions.sh
├── check-all.sh
├── check-products.sh
├── compose.sh
├── config.sh
├── docker-prune.sh
├── graph-api-env.sh
├── k8s-ci.sh
├── k8s-down.sh
├── k8s-graph-dump.sh
├── k8s-nginx-dump.sh
├── k8s-smoke.sh
├── k8s-up.sh
├── publish.sh
├── query.sh
├── smoke.sh
├── subgraphs.sh
└── unpublish.sh
├── CODEOWNERS
├── LICENSE
├── Makefile
├── README.md
├── docker-compose.managed.yml
├── docker-compose.otel-collector.yml
├── docker-compose.otel-zipkin.yml
├── docker-compose.router-otel.yml
├── docker-compose.router.yml
├── docker-compose.serverless.yml
├── docker-compose.yml
├── docs
└── media
│ ├── apollo-sandbox.png
│ ├── ci
│ ├── publish-artifacts-workflow.png
│ ├── repository-dispatch-triggered.png
│ ├── schema-check-breaking-change.png
│ ├── supergraph-pr-automerged.png
│ ├── webhook-proxy.png
│ └── webhook-register.png
│ ├── honeycomb.png
│ ├── opentelemetry.png
│ ├── schema-check-mark-safe.png
│ ├── studio.png
│ └── supergraph.png
├── gateway
├── Dockerfile
├── gateway.js
└── package.json
├── k8s
├── clusters
│ └── kind-cluster.yaml
├── infra
│ ├── base
│ │ ├── kustomization.yaml
│ │ └── nginx-ingress.yaml
│ └── dev
│ │ └── kustomization.yaml
├── router
│ ├── base
│ │ ├── kustomization.yaml
│ │ ├── router.yaml
│ │ └── supergraph.graphql
│ └── dev
│ │ ├── kustomization.yaml
│ │ └── supergraph.graphql
└── subgraphs
│ ├── base
│ ├── kustomization.yaml
│ └── subgraphs.yaml
│ └── dev
│ └── kustomization.yaml
├── opentelemetry
├── collector-config.yml
└── prometheus.yml
├── renovate.json
├── router.yaml
├── serverless
├── Dockerfile
├── package.json
├── router.js
├── serverless.yml
├── subgraphs
│ ├── inventory.graphql
│ ├── inventory.js
│ ├── pandas.graphql
│ ├── pandas.js
│ ├── products.graphql
│ ├── products.js
│ ├── users.graphql
│ └── users.js
├── supergraph.graphql
└── supergraph.yaml
├── subgraphs
├── inventory
│ ├── Dockerfile
│ ├── inventory.graphql
│ ├── inventory.js
│ └── package.json
├── pandas
│ ├── Dockerfile
│ ├── package.json
│ ├── pandas.graphql
│ └── pandas.js
├── products
│ ├── Dockerfile
│ ├── package.json
│ ├── products.graphql
│ └── products.js
└── users
│ ├── Dockerfile
│ ├── package.json
│ ├── users.graphql
│ └── users.js
├── supergraph.graphql
└── supergraph.yaml
/.circleci/config.yml:
--------------------------------------------------------------------------------
1 | version: 2.1
2 |
3 | orbs:
4 | secops: apollo/circleci-secops-orb@2.0.7
5 |
6 | workflows:
7 | security-scans:
8 | jobs:
9 | - secops/gitleaks:
10 | context:
11 | - platform-docker-ro
12 | - github-orb
13 | - secops-oidc
14 | git-base-revision: <<#pipeline.git.base_revision>><><>
15 | git-revision: << pipeline.git.revision >>
16 | - secops/semgrep:
17 | context:
18 | - secops-oidc
19 | - github-orb
20 | git-base-revision: <<#pipeline.git.base_revision>><><>
21 |
--------------------------------------------------------------------------------
/.github/workflows/main-router.yml:
--------------------------------------------------------------------------------
1 | name: CI-router
2 | on:
3 | pull_request:
4 | branches: [ main ]
5 | schedule:
6 | - cron: '30 7 * * *'
7 | workflow_dispatch:
8 |
9 | jobs:
10 | ci-docker-local:
11 | name: CI
12 | runs-on: ubuntu-latest
13 | steps:
14 | - name: checkout
15 | uses: actions/checkout@v3
16 | -
17 | name: install rover
18 | run: |
19 | echo ---------------------------------------------------------------
20 | echo rover - installing ...
21 | echo ---------------------------------------------------------------
22 | curl -sSL https://rover.apollo.dev/nix/latest | sh
23 | echo "$HOME/.rover/bin" >> ${GITHUB_PATH}
24 | -
25 | name: update docker-compose
26 | run: |
27 | which docker-compose && exit 0 || true
28 | echo ---------------------------------------------------------------
29 | echo docker-compose - installing ...
30 | echo ---------------------------------------------------------------
31 | BIN_DIR=$HOME/.docker-compose/bin
32 | FILE=$BIN_DIR/docker-compose
33 | mkdir -p $BIN_DIR
34 | set -x
35 | curl -L --fail https://github.com/docker/compose/releases/download/1.29.1/docker-compose-`uname -s`-`uname -m` -o $FILE
36 | chmod +x $FILE
37 | echo "downloaded $($FILE --version)"
38 | echo "$BIN_DIR" >> ${GITHUB_PATH}
39 | set +x
40 | echo ---------------------------------------------------------------
41 | -
42 | name: check tools
43 | run: |
44 | echo ---------------------------------------------------------------
45 | ( set -x; which rover )
46 | echo "$(rover --version)"
47 | echo ---------------------------------------------------------------
48 | ( set -x; which docker-compose )
49 | echo "$(docker-compose --version)"
50 | echo ---------------------------------------------------------------
51 | - name: make supergraph
52 | run: |
53 | make supergraph
54 | cat supergraph.graphql
55 | - name: docker-compose build
56 | run: |
57 | ( set -x; docker-compose -f docker-compose.router.yml build --no-cache --pull --parallel --progress plain )
58 | - name: docker-compose up -d
59 | run: |
60 | ( set -x; docker-compose -f docker-compose.router.yml up -d )
61 | sleep 3
62 | docker-compose -f docker-compose.router.yml logs
63 | - name: smoke test
64 | run: .scripts/smoke.sh 4000
65 | - name: docker-compose down
66 | run: docker-compose -f docker-compose.router.yml down
67 |
--------------------------------------------------------------------------------
/.github/workflows/main-serverless.yml:
--------------------------------------------------------------------------------
1 | name: CI-serverless
2 | on:
3 | pull_request:
4 | branches: [ main ]
5 | schedule:
6 | - cron: '30 7 * * *'
7 | workflow_dispatch:
8 |
9 | jobs:
10 | ci-docker-local:
11 | name: CI
12 | runs-on: ubuntu-latest
13 | steps:
14 | - name: checkout
15 | uses: actions/checkout@v3
16 | -
17 | name: install rover
18 | run: |
19 | echo ---------------------------------------------------------------
20 | echo rover - installing ...
21 | echo ---------------------------------------------------------------
22 | curl -sSL https://rover.apollo.dev/nix/latest | sh
23 | echo "$HOME/.rover/bin" >> ${GITHUB_PATH}
24 | -
25 | name: update docker-compose
26 | run: |
27 | which docker-compose && exit 0 || true
28 | echo ---------------------------------------------------------------
29 | echo docker-compose - installing ...
30 | echo ---------------------------------------------------------------
31 | BIN_DIR=$HOME/.docker-compose/bin
32 | FILE=$BIN_DIR/docker-compose
33 | mkdir -p $BIN_DIR
34 | set -x
35 | curl -L --fail https://github.com/docker/compose/releases/download/1.29.1/docker-compose-`uname -s`-`uname -m` -o $FILE
36 | chmod +x $FILE
37 | echo "downloaded $($FILE --version)"
38 | echo "$BIN_DIR" >> ${GITHUB_PATH}
39 | set +x
40 | echo ---------------------------------------------------------------
41 | -
42 | name: check tools
43 | run: |
44 | echo ---------------------------------------------------------------
45 | ( set -x; which rover )
46 | echo "$(rover --version)"
47 | echo ---------------------------------------------------------------
48 | ( set -x; which docker-compose )
49 | echo "$(docker-compose --version)"
50 | echo ---------------------------------------------------------------
51 | - name: make supergraph-serverless
52 | run: |
53 | make supergraph
54 | cat serverless/supergraph.graphql
55 | - name: docker-compose build
56 | run: |
57 | ( set -x; docker-compose -f docker-compose.serverless.yml build --no-cache --pull --parallel --progress plain )
58 | - name: docker-compose up -d
59 | run: |
60 | ( set -x; docker-compose -f docker-compose.serverless.yml up -d )
61 | sleep 6
62 | docker-compose -f docker-compose.serverless.yml logs
63 | - name: smoke test
64 | run: .scripts/smoke.sh 4000
65 | - name: docker-compose down
66 | run: docker-compose -f docker-compose.serverless.yml down
67 |
--------------------------------------------------------------------------------
/.github/workflows/main.yml:
--------------------------------------------------------------------------------
1 | name: CI-gateway
2 | on:
3 | pull_request:
4 | branches: [ main ]
5 | schedule:
6 | - cron: '30 7 * * *'
7 |
8 | jobs:
9 | ci-docker-local:
10 | name: CI
11 | runs-on: ubuntu-latest
12 | steps:
13 | - name: checkout
14 | uses: actions/checkout@v3
15 | -
16 | name: install rover
17 | run: |
18 | echo ---------------------------------------------------------------
19 | echo rover - installing ...
20 | echo ---------------------------------------------------------------
21 | curl -sSL https://rover.apollo.dev/nix/latest | sh
22 | echo "$HOME/.rover/bin" >> ${GITHUB_PATH}
23 | -
24 | name: update docker-compose
25 | run: |
26 | which docker-compose && exit 0 || true
27 | echo ---------------------------------------------------------------
28 | echo docker-compose - installing ...
29 | echo ---------------------------------------------------------------
30 | BIN_DIR=$HOME/.docker-compose/bin
31 | FILE=$BIN_DIR/docker-compose
32 | mkdir -p $BIN_DIR
33 | set -x
34 | curl -L --fail https://github.com/docker/compose/releases/download/1.29.1/docker-compose-`uname -s`-`uname -m` -o $FILE
35 | chmod +x $FILE
36 | echo "downloaded $($FILE --version)"
37 | echo "$BIN_DIR" >> ${GITHUB_PATH}
38 | set +x
39 | echo ---------------------------------------------------------------
40 | -
41 | name: check tools
42 | run: |
43 | echo ---------------------------------------------------------------
44 | ( set -x; which rover )
45 | echo "$(rover --version)"
46 | echo ---------------------------------------------------------------
47 | ( set -x; which docker-compose )
48 | echo "$(docker-compose --version)"
49 | echo ---------------------------------------------------------------
50 | - name: make supergraph
51 | run: |
52 | make supergraph
53 | cat supergraph.graphql
54 | - name: docker-compose build
55 | run: |
56 | ( set -x; docker-compose build --no-cache --pull --parallel --progress plain )
57 | - name: docker-compose up -d
58 | run: |
59 | ( set -x; docker-compose up -d )
60 | sleep 3
61 | docker-compose logs
62 | - name: smoke test
63 | run: .scripts/smoke.sh 4000
64 | - name: docker-compose down
65 | run: docker-compose down
66 |
--------------------------------------------------------------------------------
/.github/workflows/managed.yml:
--------------------------------------------------------------------------------
1 | name: CI Managed Federation
2 | on:
3 | push:
4 | branches: [ main ]
5 | schedule:
6 | - cron: '30 7 * * *'
7 |
8 | concurrency:
9 | group: ${{ github.workflow }}-${{ github.ref }}
10 | cancel-in-progress: true
11 |
12 | jobs:
13 | ci-docker-managed:
14 | name: CI Managed Federation
15 | runs-on: ubuntu-latest
16 | env:
17 | CI: "true"
18 | APOLLO_KEY: ${{ secrets.APOLLO_KEY }}
19 | APOLLO_GRAPH_REF: ${{ secrets.APOLLO_GRAPH_REF }}
20 | steps:
21 | - name: checkout
22 | uses: actions/checkout@v3
23 | -
24 | name: install rover
25 | run: |
26 | echo ---------------------------------------------------------------
27 | echo rover - installing ...
28 | echo ---------------------------------------------------------------
29 | curl -sSL https://rover.apollo.dev/nix/latest | sh
30 | echo "$HOME/.rover/bin" >> ${GITHUB_PATH}
31 | -
32 | name: update docker-compose
33 | run: |
34 | which docker-compose && exit 0 || true
35 | echo ---------------------------------------------------------------
36 | echo docker-compose - installing ...
37 | echo ---------------------------------------------------------------
38 | BIN_DIR=$HOME/.docker-compose/bin
39 | FILE=$BIN_DIR/docker-compose
40 | mkdir -p $BIN_DIR
41 | set -x
42 | curl -L --fail https://github.com/docker/compose/releases/download/1.29.1/docker-compose-`uname -s`-`uname -m` -o $FILE
43 | chmod +x $FILE
44 | echo "downloaded $($FILE --version)"
45 | echo "$BIN_DIR" >> ${GITHUB_PATH}
46 | set +x
47 | echo ---------------------------------------------------------------
48 | -
49 | name: check tools
50 | run: |
51 | echo ---------------------------------------------------------------
52 | ( set -x; which rover )
53 | echo "$(rover --version)"
54 | echo ---------------------------------------------------------------
55 | ( set -x; which docker-compose )
56 | echo "$(docker-compose --version)"
57 | echo ---------------------------------------------------------------
58 | - name: unpublish
59 | run: |
60 | make unpublish || true
61 | - name: publish
62 | run: |
63 | make publish
64 | - name: docker-compose build
65 | run: |
66 | ( set -x; docker-compose build --no-cache --pull --parallel --progress plain )
67 | - name: docker-compose -f docker-compose.managed.yml up -d
68 | run: |
69 | ( set -x; docker-compose -f docker-compose.managed.yml up -d )
70 | sleep 3
71 | docker-compose logs
72 | - name: smoke test
73 | run: .scripts/smoke.sh 4000
74 | - name: docker-compose down
75 | run: docker-compose down
76 |
--------------------------------------------------------------------------------
/.github/workflows/rebase.yml:
--------------------------------------------------------------------------------
1 | name: Rebase
2 | on:
3 | issue_comment:
4 | types: [created]
5 | jobs:
6 | verify:
7 | name: Verify
8 | runs-on: ubuntu-latest
9 | env:
10 | OK: |
11 | ${{
12 | contains(github.event.comment.body, '/rebase') &&
13 | (github.event.issue.pull_request != '') &&
14 | (
15 | (github.event.issue.author_association == 'OWNER') ||
16 | (github.event.issue.author_association == 'COLLABORATOR') ||
17 | (github.event.issue.author_association == 'CONTRIBUTOR') ||
18 | (github.event.issue.author_association == 'MEMBER')
19 | )
20 | }}
21 | outputs:
22 | ok: ${{ env.OK }}
23 | comment-id: ${{ steps.comment.outputs.comment-id }}
24 | steps:
25 | - name: Create or update comment
26 | id: comment
27 | if: env.OK == 'true'
28 | uses: peter-evans/create-or-update-comment@v2
29 | with:
30 | issue-number: ${{ github.event.issue.number }}
31 | body: Rebasing ...
32 | - name: Info
33 | env:
34 | GITHUB_CONTEXT: ${{ toJson(github) }}
35 | run: echo "$GITHUB_CONTEXT"
36 |
37 | rebase:
38 | name: Rebase
39 | runs-on: ubuntu-latest
40 | needs: verify
41 | if: needs.verify.outputs.ok == 'true'
42 | steps:
43 | - name: with PAT
44 | uses: actions/checkout@v3
45 | with:
46 | token: ${{ secrets.PAT }}
47 | fetch-depth: 0 # otherwise, you will fail to push refs to dest repo
48 | - name: Automatic Rebase
49 | uses: cirrus-actions/rebase@1.8
50 | env:
51 | GITHUB_TOKEN: ${{ secrets.PAT }}
52 |
53 | ok-msg:
54 | name: Success comment
55 | runs-on: ubuntu-latest
56 | needs: [verify, rebase]
57 | if: success() && needs.verify.outputs.ok == 'true'
58 | steps:
59 | - name: Create or update comment
60 | id: comment
61 | uses: peter-evans/create-or-update-comment@v2
62 | with:
63 | issue-number: ${{ github.event.issue.number }}
64 | comment-id: ${{ needs.verify.outputs.comment-id }}
65 | body: |
66 | Rebase complete!
67 | edit-mode: replace
68 | reactions: hooray
69 |
70 | fail-msg:
71 | name: Failure comment
72 | runs-on: ubuntu-latest
73 | needs: [verify, rebase]
74 | if: failure() && needs.verify.outputs.ok == 'true'
75 | steps:
76 | - name: Create or update comment
77 | uses: peter-evans/create-or-update-comment@v2
78 | with:
79 | issue-number: ${{ github.event.issue.number }}
80 | comment-id: ${{ needs.verify.outputs.comment-id }}
81 | body: |
82 | Unable to rebase. Check the action logs for details.
83 | edit-mode: replace
84 | reactions: confused
85 |
--------------------------------------------------------------------------------
/.github/workflows/release.yml:
--------------------------------------------------------------------------------
1 | name: Publish artifacts
2 |
3 | on:
4 | push:
5 | branches:
6 | - 'main'
7 | paths:
8 | - "router/**"
9 | - "subgraphs/**"
10 | workflow_dispatch:
11 | inputs:
12 | rebuildAll:
13 | description: 'Rebuild/publish all packages (yes/no)'
14 | required: false
15 | default: 'yes'
16 | forceVersionBump:
17 | description: 'Force version bump all packages (yes/no)'
18 | required: false
19 | default: 'no'
20 |
21 | concurrency:
22 | group: ${{ github.workflow }}-${{ github.ref }}
23 | cancel-in-progress: true
24 |
25 | jobs:
26 | good-commits:
27 | name: Check commits
28 | runs-on: ubuntu-latest
29 | outputs:
30 | count: ${{ steps.commits.outputs.count }}
31 | steps:
32 | - uses: actions/checkout@v3
33 | - name: Info
34 | env:
35 | GITHUB_CONTEXT: ${{ toJson(github) }}
36 | run: echo "$GITHUB_CONTEXT"
37 | -
38 | name: check commits
39 | id: commits
40 | env:
41 | COMMITS: ${{ toJson(github.event.commits) }}
42 | run: |
43 | if [[ "$COMMITS" != "null" ]]; then
44 | MATCHING_COMMITS=$(jq -c ' . | map(select( .message | contains("CI: Bump artifact versions") | not )) ' <<< "$COMMITS" )
45 | echo "$MATCHING_COMMITS" | jq
46 | RESULT=$( echo "$MATCHING_COMMITS" | jq ' length ')
47 | echo "RESULT: $RESULT"
48 | echo "::set-output name=count::$RESULT"
49 | else
50 | echo "::set-output name=count::1"
51 | fi
52 |
53 | found-good-commits:
54 | name: Found good commits
55 | runs-on: ubuntu-latest
56 | needs: good-commits
57 | if: needs.good-commits.outputs.count > 0
58 | steps:
59 | - name: Found good commits
60 | env:
61 | COMMITS: ${{ toJson(github.event.commits) }}
62 | run: |
63 | echo "ALL COMMITS"
64 | echo "$COMMITS" | jq
65 |
66 | build-matrix:
67 | name: Create build matrix
68 | runs-on: ubuntu-latest
69 | needs: good-commits
70 | if: needs.good-commits.outputs.count > 0
71 | outputs:
72 | packages: ${{ steps.matrix.outputs.packages }}
73 | matrix: ${{ steps.matrix.outputs.matrix }}
74 | count: ${{ steps.matrix.outputs.count }}
75 | steps:
76 | - uses: actions/checkout@v3
77 | -
78 | name: generate matrix
79 | id: matrix
80 | env:
81 | GITHUB_SHA: ${{ env.GITHUB_SHA}}
82 | GITHUB_EVENT_BEFORE: ${{ github.event.before }}
83 | REBUILD_ALL: ${{ github.event.inputs && github.event.inputs.rebuildAll == 'yes' }}
84 | FORCE_VERSION_BUMP: ${{ github.event.inputs && github.event.inputs.forceVersionBump == 'yes' }}
85 | run: |
86 | if [[ "$FORCE_VERSION_BUMP" == "true" ]]; then
87 | PACKAGES=$(.scripts/build-matrix.sh main force-version-bump)
88 | else
89 | PACKAGES=$(.scripts/build-matrix.sh)
90 | fi
91 |
92 | if [[ "$REBUILD_ALL" == "true" ]]; then
93 | MATRIX="$PACKAGES"
94 | else
95 | MATRIX=$(jq -c ' .include |= map(select(.changes=="1")) ' <<< "$PACKAGES")
96 | fi
97 | echo "::set-output name=packages::$( echo "$PACKAGES" )"
98 | echo "::set-output name=matrix::$( echo "$MATRIX" )"
99 | echo "::set-output name=count::$( echo "$MATRIX" | jq ' .include | length ' )"
100 | -
101 | name: packages
102 | env:
103 | PACKAGES: ${{ steps.matrix.outputs.packages }}
104 | MATRIX: ${{ steps.matrix.outputs.matrix }}
105 | COUNT: ${{ steps.matrix.outputs.count }}
106 | REBUILD_ALL: ${{ github.event.inputs && github.event.inputs.rebuildAll == 'yes' }}
107 | run: |
108 | echo "--------------------------------"
109 | echo " ALL PACKAGES "
110 | echo "--------------------------------"
111 | echo "$PACKAGES" | jq
112 |
113 | echo "--------------------------------"
114 | echo " BUILD MATRIX "
115 | echo "--------------------------------"
116 | if [[ "$REBUILD_ALL" == "true" ]]; then
117 | echo " *** FORCE REBUILD *** "
118 | fi
119 | if [[ "$COUNT" -eq 0 ]]; then
120 | echo " *** EMPTY MATRIX *** "
121 | fi
122 | echo "$MATRIX" | jq
123 |
124 | found-packages-to-build:
125 | name: Package changes detected
126 | runs-on: ubuntu-latest
127 | needs: build-matrix
128 | if: needs.build-matrix.outputs.count > 0
129 | steps:
130 | - name: Found ${{ needs.build-matrix.outputs.count }} packages to build
131 | env:
132 | PACKAGES: ${{ needs.build-matrix.outputs.packages }}
133 | MATRIX: ${{ needs.build-matrix.outputs.matrix }}
134 | COUNT: ${{ needs.build-matrix.outputs.count }}
135 | REBUILD_ALL: ${{ github.event.inputs && github.event.inputs.rebuildAll == 'yes' }}
136 | run: |
137 | echo "--------------------------------"
138 | echo " ALL PACKAGES "
139 | echo "--------------------------------"
140 | echo "$PACKAGES" | jq
141 |
142 | echo "--------------------------------"
143 | echo " BUILD MATRIX "
144 | echo "--------------------------------"
145 | if [[ "$REBUILD_ALL" == "true" ]]; then
146 | echo " *** FORCE REBUILD *** "
147 | fi
148 | if [[ "$COUNT" -eq 0 ]]; then
149 | echo " *** EMPTY MATRIX *** "
150 | fi
151 | echo "$MATRIX" | jq
152 |
153 | docker-ci:
154 | name: Docker CI
155 | needs: [build-matrix]
156 | if: needs.build-matrix.outputs.count > 0
157 | runs-on: ubuntu-latest
158 | steps:
159 | - name: checkout
160 | uses: actions/checkout@v3
161 | -
162 | name: install rover
163 | run: |
164 | echo ---------------------------------------------------------------
165 | echo rover - installing ...
166 | echo ---------------------------------------------------------------
167 | curl -sSL https://rover.apollo.dev/nix/latest | sh
168 | echo "$HOME/.rover/bin" >> ${GITHUB_PATH}
169 | -
170 | name: update docker-compose
171 | run: |
172 | which docker-compose && exit 0 || true
173 | echo ---------------------------------------------------------------
174 | echo docker-compose - installing ...
175 | echo ---------------------------------------------------------------
176 | BIN_DIR=$HOME/.docker-compose/bin
177 | FILE=$BIN_DIR/docker-compose
178 | mkdir -p $BIN_DIR
179 | set -x
180 | curl -L --fail https://github.com/docker/compose/releases/download/1.29.1/docker-compose-`uname -s`-`uname -m` -o $FILE
181 | chmod +x $FILE
182 | echo "downloaded $($FILE --version)"
183 | echo "$BIN_DIR" >> ${GITHUB_PATH}
184 | set +x
185 | echo ---------------------------------------------------------------
186 | -
187 | name: check tools
188 | run: |
189 | echo ---------------------------------------------------------------
190 | ( set -x; which rover )
191 | echo "$(rover --version)"
192 | echo ---------------------------------------------------------------
193 | ( set -x; which docker-compose )
194 | echo "$(docker-compose --version)"
195 | echo ---------------------------------------------------------------
196 | - name: make supergraph
197 | run: |
198 | make supergraph
199 | - name: docker-compose up -d
200 | run: |
201 | docker-compose up -d
202 | sleep 3
203 | docker-compose logs
204 | - name: smoke test
205 | run: .scripts/smoke.sh 4000
206 | - name: docker-compose down
207 | run: docker-compose down
208 |
209 | build-push:
210 | runs-on: ubuntu-latest
211 | needs: [docker-ci, build-matrix]
212 | if: needs.build-matrix.outputs.count > 0
213 | strategy:
214 | matrix: ${{ fromJson(needs.build-matrix.outputs.matrix) }}
215 | name: Build-push ${{ matrix.name }}
216 | steps:
217 | -
218 | name: Info
219 | run: |
220 | echo --------------------------------------------
221 | echo name: ${{ matrix.name }}
222 | echo dir: ${{ matrix.dir }}
223 | echo versionOld: ${{ matrix.versionOld }}
224 | echo versionNew: ${{ matrix.versionNew }}
225 | echo changes: ${{ matrix.changes }}
226 | -
227 | name: Checkout
228 | uses: actions/checkout@v3
229 | -
230 | name: Set up QEMU
231 | uses: docker/setup-qemu-action@v2
232 | -
233 | name: Set up Docker Buildx
234 | uses: docker/setup-buildx-action@v1
235 | -
236 | name: Login to DockerHub
237 | uses: docker/login-action@v2
238 | with:
239 | username: ${{ secrets.DOCKERHUB_USERNAME }}
240 | password: ${{ secrets.DOCKERHUB_TOKEN }}
241 | -
242 | name: Build and push
243 | id: docker_build
244 | uses: docker/build-push-action@v2
245 | with:
246 | context: ./${{ matrix.dir }}
247 | push: true
248 | tags: prasek/${{ matrix.name }}:${{ matrix.versionNew }}, prasek/${{ matrix.name }}:latest
249 | -
250 | name: Image digest
251 | run: |
252 | SHA=${{ steps.docker_build.outputs.digest }}
253 | echo $SHA
254 | echo "::set-output name=sha-${{ matrix.name }}::$( echo "$SHA" )"
255 |
256 | k8s:
257 | name: Smoke test - k8s
258 | runs-on: ubuntu-latest
259 | needs: [build-matrix, build-push]
260 | if: |
261 | always() &&
262 | needs.build-matrix.outputs.count > 0
263 | steps:
264 | -
265 | name: Checkout
266 | uses: actions/checkout@v3
267 | -
268 | name: update kind
269 | run: |
270 | curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.11.1/kind-linux-amd64
271 | chmod +x ./kind
272 | mkdir -p $HOME/.kind/bin
273 | mv ./kind $HOME/.kind/bin/kind
274 | echo "PATH=$HOME/.kind/bin:$PATH" >> ${GITHUB_ENV}
275 | -
276 | name: test-k8s
277 | run: .scripts/k8s-ci.sh
278 |
279 | version-bump:
280 | name: "Source PR: Bump versions"
281 | runs-on: ubuntu-latest
282 | needs: [build-matrix, build-push, k8s]
283 | if: needs.build-matrix.outputs.count > 0
284 | env:
285 | PACKAGES: ${{ needs.build-matrix.outputs.packages }}
286 | MATRIX: ${{ needs.build-matrix.outputs.matrix }}
287 | steps:
288 | - uses: actions/checkout@v3
289 | -
290 | name: Info
291 | run: |
292 | echo --------------------------------
293 | echo ALL PACKAGES
294 | echo --------------------------------
295 | echo "$PACKAGES" | jq
296 |
297 | echo --------------------------------
298 | echo BUILD MATRIX
299 | echo --------------------------------
300 | echo "$MATRIX" | jq
301 | -
302 | name: verify-clean
303 | env:
304 | MATRIX: ${{ needs.build-matrix.outputs.matrix }}
305 | run: |
306 | # verify no changes
307 | git diff --exit-code
308 | - name: install kustomize
309 | run: |
310 | BIN_DIR="$HOME/.kustomize/bin"
311 | mkdir -p $BIN_DIR
312 | cd $BIN_DIR
313 | curl -s "https://raw.githubusercontent.com/kubernetes-sigs/kustomize/master/hack/install_kustomize.sh" | bash
314 | echo "PATH=$PATH:$BIN_DIR" >> ${GITHUB_ENV}
315 | -
316 | name: version bump
317 | id: version-bump
318 | run: |
319 | .scripts/bump-package-versions.sh "$MATRIX" >> $GITHUB_ENV
320 | .scripts/bump-image-versions.sh "$MATRIX" >> $GITHUB_ENV
321 | echo "$(git diff)"
322 | -
323 | name: create source repo pull request
324 | id: cpr
325 | uses: peter-evans/create-pull-request@v4
326 | with:
327 | token: ${{ secrets.PAT }}
328 | committer: Supergraph Demo Bot
329 | author: Supergraph Demo Bot
330 | commit-message: 'CI: Bump artifact versions'
331 | title: 'CI: Bump artifact versions'
332 | body: |
333 | ${{ env.PACKAGE_BUMP_PR_MSG }}
334 |
335 | ${{ env.IMAGE_BUMP_PR_MSG }}
336 | branch: version-bump
337 | base: main
338 | delete-branch: true
339 | -
340 | name: enable pull request automerge
341 | if: |
342 | steps.cpr.outputs.pull-request-operation == 'created' ||
343 | steps.cpr.outputs.pull-request-operation == 'updated'
344 | uses: peter-evans/enable-pull-request-automerge@v2
345 | with:
346 | token: ${{ secrets.PAT }}
347 | pull-request-number: ${{ steps.cpr.outputs.pull-request-number }}
348 | merge-method: rebase
349 | - name: Check output
350 | if: |
351 | steps.cpr.outputs.pull-request-operation == 'created' ||
352 | steps.cpr.outputs.pull-request-operation == 'updated'
353 | run: |
354 | echo PR CREATED or MODIFIED
355 | echo "Pull Request Number - ${{ steps.cpr.outputs.pull-request-number }}"
356 | echo "Pull Request URL - ${{ steps.cpr.outputs.pull-request-url }}"
357 | - name: No changes detected
358 | if: |
359 | steps.cpr.outputs.pull-request-operation != 'created' &&
360 | steps.cpr.outputs.pull-request-operation != 'updated'
361 | run: |
362 | echo "No changes detected."
363 |
364 | config-pr:
365 | name: "Config PR: Bump versions"
366 | needs: [build-matrix, build-push, k8s]
367 | env:
368 | PACKAGES: ${{ needs.build-matrix.outputs.packages }}
369 | MATRIX: ${{ needs.build-matrix.outputs.matrix }}
370 | runs-on: ubuntu-latest
371 | steps:
372 | -
373 | name: checkout supergraph-demo-k8s-graph-ops
374 | uses: actions/checkout@v3
375 | with:
376 | token: ${{ secrets.PAT }}
377 | repository: apollographql/supergraph-demo-k8s-graph-ops
378 | -
379 | name: version bump
380 | id: version-bump
381 | run: |
382 | .scripts/bump-image-versions.sh "$MATRIX" >> $GITHUB_ENV
383 | echo "$(git diff)"
384 | -
385 | name: create pull request
386 | id: cpr
387 | uses: peter-evans/create-pull-request@v4
388 | with:
389 | token: ${{ secrets.PAT }}
390 | committer: Supergraph Demo Bot
391 | author: Supergraph Demo Bot
392 | commit-message: Bump image versions
393 | title: Bump image versions
394 | body: |
395 | ${{ env.IMAGE_BUMP_PR_MSG }}
396 | branch: bump-image-versions
397 | base: main
398 | delete-branch: true
399 | -
400 | name: enable pull request automerge
401 | if: |
402 | steps.cpr.outputs.pull-request-operation == 'created' ||
403 | steps.cpr.outputs.pull-request-operation == 'updated'
404 | uses: peter-evans/enable-pull-request-automerge@v2
405 | with:
406 | token: ${{ secrets.PAT }}
407 | repository: apollographql/supergraph-demo-k8s-graph-ops
408 | pull-request-number: ${{ steps.cpr.outputs.pull-request-number }}
409 | merge-method: rebase
410 | -
411 | name: Check output
412 | if: |
413 | steps.cpr.outputs.pull-request-operation == 'created' ||
414 | steps.cpr.outputs.pull-request-operation == 'updated'
415 | run: |
416 | echo PR CREATED or MODIFIED
417 | echo "Pull Request Number - ${{ steps.cpr.outputs.pull-request-number }}"
418 | echo "Pull Request URL - ${{ steps.cpr.outputs.pull-request-url }}"
419 | -
420 | name: No changes detected
421 | if: |
422 | steps.cpr.outputs.pull-request-operation != 'created' &&
423 | steps.cpr.outputs.pull-request-operation != 'updated'
424 | run: |
425 | echo "No changes detected."
426 |
--------------------------------------------------------------------------------
/.github/workflows/subgraph-check.yml:
--------------------------------------------------------------------------------
1 | name: Schema check
2 | on:
3 | pull_request:
4 | branches: [ main ]
5 | paths:
6 | - "subgraphs/**"
7 |
8 | jobs:
9 | subgraph-check:
10 | runs-on: ubuntu-latest
11 | strategy:
12 | matrix:
13 | rover-version: ["latest"]
14 | subgraph: ["products", "users", "inventory"]
15 | env:
16 | APOLLO_KEY: ${{ secrets.APOLLO_KEY }}
17 | APOLLO_GRAPH_REF: supergraph-router@dev
18 |
19 | name: ${{ matrix.subgraph }}
20 | steps:
21 | - uses: actions/checkout@v3
22 | - name: install rover
23 | env:
24 | ROVER_VERSION: ${{ matrix.rover-version }}
25 | run: |
26 | curl -sSL https://rover.apollo.dev/nix/$ROVER_VERSION | sh
27 | echo "PATH=$PATH:$HOME/.rover/bin" >> ${GITHUB_ENV}
28 | - name: run schema check
29 | if: env.APOLLO_KEY != 0
30 | run: |
31 | set -x
32 | rover subgraph check $APOLLO_GRAPH_REF --schema subgraphs/${{ matrix.subgraph }}/${{ matrix.subgraph }}.graphql --name ${{ matrix.subgraph }}
33 | - name: skip check
34 | if: env.APOLLO_KEY == 0
35 | run: echo skipping subgraph check due to public repo fork PR not having secrets access.
36 |
--------------------------------------------------------------------------------
/.github/workflows/subgraph-deploy-publish.yml:
--------------------------------------------------------------------------------
1 | name: Subgraph Publish
2 | on:
3 | push:
4 | branches: [ main ]
5 | paths:
6 | - "subgraphs/**"
7 | workflow_dispatch: {}
8 |
9 | jobs:
10 | subgraph-publish:
11 | runs-on: ubuntu-latest
12 | strategy:
13 | matrix:
14 | include:
15 | - subgraph: "products"
16 | routing_url: "http://products:4000/graphql"
17 | rover-version: "latest"
18 | - subgraph: "users"
19 | routing_url: "http://users:4000/graphql"
20 | rover-version: "latest"
21 | - subgraph: "inventory"
22 | routing_url: "http://inventory:4000/graphql"
23 | rover-version: "latest"
24 | env:
25 | APOLLO_KEY: ${{ secrets.APOLLO_KEY }}
26 | APOLLO_GRAPH_REF: supergraph-router@dev
27 |
28 | name: ${{ matrix.subgraph }}
29 |
30 | steps:
31 | - uses: actions/checkout@v3
32 | -
33 | name: install rover
34 | env:
35 | ROVER_VERSION: ${{ matrix.rover-version }}
36 | run: |
37 | curl -sSL https://rover.apollo.dev/nix/$ROVER_VERSION | sh
38 | echo "PATH=$PATH:$HOME/.rover/bin" >> ${GITHUB_ENV}
39 | -
40 | name: subgraph check
41 | run: |
42 | set -x
43 | rover subgraph check $APOLLO_GRAPH_REF --schema subgraphs/${{ matrix.subgraph }}/${{ matrix.subgraph }}.graphql --name ${{ matrix.subgraph }}
44 | -
45 | name: "TODO: deploy your subgraph to dev"
46 | run: |
47 | echo "TODO: ADD YOUR DEPLOYMENT STEPS HERE"
48 | echo "which should only complete when the new version of the subgraph is deployed"
49 | echo "so the subgraph schema can be published AFTER the subgraph service is deployed"
50 | echo ""
51 | echo "see https://github.com/apollographql/supergraph-demo-k8s-graphops"
52 | echo "for a more scalable way of doing this in a Kubernetes-native way."
53 | -
54 | name: subgraph publish
55 | run: |
56 | set -x
57 | rover subgraph publish $APOLLO_GRAPH_REF --routing-url ${{ matrix.routing_url }} --schema subgraphs/${{ matrix.subgraph }}/${{ matrix.subgraph }}.graphql --name ${{ matrix.subgraph }}
58 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Logs
2 | logs
3 | *.log
4 | npm-debug.log*
5 | yarn-debug.log*
6 | yarn-error.log*
7 | lerna-debug.log*
8 |
9 | graph-api.env
10 | docker.secrets
11 |
12 | router/router
13 |
14 | # Diagnostic reports (https://nodejs.org/api/report.html)
15 | report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json
16 |
17 | # Runtime data
18 | pids *.pid
19 | *.seed
20 | *.pid.lock
21 |
22 | # Directory for instrumented libs generated by jscoverage/JSCover
23 | lib-cov
24 |
25 | # Coverage directory used by tools like istanbul
26 | coverage
27 | *.lcov
28 |
29 | # nyc test coverage
30 | .nyc_output
31 |
32 | # Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files)
33 | .grunt
34 |
35 | .DS_Store
36 |
37 | # Bower dependency directory (https://bower.io/)
38 | bower_components
39 |
40 | # node-waf configuration
41 | .lock-wscript
42 |
43 | # Compiled binary addons (https://nodejs.org/api/addons.html)
44 | build/Release
45 |
46 | # Dependency directories
47 | node_modules/
48 | jspm_packages/
49 |
50 | # lock file for demo to keep it simple
51 | package-lock.json
52 |
53 | # TypeScript v1 declaration files
54 | typings/
55 |
56 | # TypeScript cache
57 | *.tsbuildinfo
58 |
59 | # Optional npm cache directory
60 | .npm
61 |
62 | # Optional eslint cache
63 | .eslintcache
64 |
65 | # Microbundle cache
66 | .rpt2_cache/
67 | .rts2_cache_cjs/
68 | .rts2_cache_es/
69 | .rts2_cache_umd/
70 |
71 | # Optional REPL history
72 | .node_repl_history
73 |
74 | # Output of 'npm pack'
75 | *.tgz
76 |
77 | # Yarn Integrity file
78 | .yarn-integrity
79 |
80 | # dotenv environment variables file
81 | .env
82 | .env.test
83 |
84 | # parcel-bundler cache (https://parceljs.org/)
85 | .cache
86 |
87 | # Next.js build output
88 | .next
89 |
90 | # Nuxt.js build / generate output
91 | .nuxt
92 | dist
93 |
94 | # Gatsby files
95 | .cache/
96 | # Comment in the public line in if your project uses Gatsby and *not* Next.js
97 | # https://nextjs.org/blog/next-9-1#public-directory-support
98 | # public
99 |
100 | # vuepress build output
101 | .vuepress/dist
102 |
103 | # Serverless directories
104 | .serverless/
105 |
106 | # FuseBox cache
107 | .fusebox/
108 |
109 | # DynamoDB Local files
110 | .dynamodb/
111 |
112 | # TernJS port file
113 | .tern-port
114 |
--------------------------------------------------------------------------------
/.scripts/build-matrix.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | GITHUB_SHA="${GITHUB_SHA:-HEAD}"
4 |
5 | if [[ "$1" != "local" ]]; then
6 | GITHUB_EVENT_BEFORE=${GITHUB_EVENT_BEFORE:-HEAD^}
7 | fi
8 |
9 | FORCE_VERSION_BUMP=$2
10 |
11 | # for dynamic build matrix in GitHub actions, see:
12 | # https://github.community/t/check-pushed-file-changes-with-git-diff-tree-in-github-actions/17220/10
13 |
14 | if [[ -n "$GITHUB_BASE_REF" ]]; then
15 | # Pull Request
16 | >&2 echo "fetching GITHUB_BASE_REF: $GITHUB_BASE_REF"
17 | git fetch origin $GITHUB_BASE_REF --depth=1
18 | else
19 | # Push
20 | if [[ -n "$GITHUB_EVENT_BEFORE" ]]; then
21 | # only fetch in CI if not present
22 | if [[ "$(git cat-file -t $GITHUB_EVENT_BEFORE)" != "commit" ]]; then
23 | >&2 echo "fetching GITHUB_EVENT_BEFORE: $GITHUB_EVENT_BEFORE"
24 | git fetch origin $GITHUB_EVENT_BEFORE --depth=1
25 | fi
26 | fi
27 |
28 | >&2 echo "found GITHUB_EVENT_BEFORE: $GITHUB_EVENT_BEFORE"
29 | >&2 echo "found GITHUB_SHA: $GITHUB_SHA"
30 | fi
31 |
32 | function diff_name_only() {
33 | if [[ -n "$GITHUB_BASE_REF" ]]; then
34 | # Pull Request
35 | git diff --name-only origin/$GITHUB_BASE_REF $GITHUB_SHA $1
36 | else
37 | # Push
38 | git diff --name-only $GITHUB_EVENT_BEFORE $GITHUB_SHA $1
39 | fi
40 | }
41 |
42 | WORKDIR=$(pwd)
43 | TMPFILE=$(mktemp)
44 |
45 | cat >$TMPFILE <&2 echo "------------------------------"
58 | >&2 echo "$DIR changes"
59 | >&2 echo "------------------------------"
60 | if [[ -n "$DIFF" || "$FORCE_VERSION_BUMP" == "force-version-bump" ]]; then
61 | >&2 echo "$DIFF"
62 | (cd $DIR; cp package.json package.json.bak)
63 | NEW_VERSION=$(cd $DIR; npm version --git-tag-version=false patch | sed 's|^v||')
64 | (cd $DIR; rm package.json; mv package.json.bak package.json)
65 | CHANGES=1
66 | else
67 | NEW_VERSION=$OLD_VERSION
68 | CHANGES=0
69 | fi
70 |
71 | cat >>$TMPFILE < ${NEW_VERSION} 🚀"
20 | if [[ "$NAME" == "supergraph-router" ]]; then
21 | (set -x; cd ${ROOT_DIR}router/dev; kustomize edit set image prasek/$NAME:latest=prasek/$NAME:$NEW_VERSION)
22 | else
23 | (set -x; cd ${ROOT_DIR}subgraphs/dev; kustomize edit set image prasek/$NAME:latest=prasek/$NAME:$NEW_VERSION)
24 | fi
25 | fi
26 | done
27 |
28 | echo "EOF"
29 |
30 |
--------------------------------------------------------------------------------
/.scripts/bump-package-versions.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # inputs from .scripts/build-matrix.sh
4 | MATRIX=$1
5 |
6 | echo "PACKAGE_BUMP_PR_MSG< ${NEW_VERSION} 🚀"
19 | ( set -x; cd $DIR; >&2 npm version --git-tag-version=false v${NEW_VERSION} )
20 | fi
21 | done
22 |
23 | echo "EOF"
24 |
--------------------------------------------------------------------------------
/.scripts/check-all.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 | echo "======================================="
6 | echo "SUBGRAPH CHECK"
7 | echo "======================================="
8 |
9 | source "$(dirname $0)/subgraphs.sh"
10 | source "$(dirname $0)/graph-api-env.sh"
11 |
12 | echo "checking all subgraphs:"
13 | for subgraph in ${subgraphs[@]}; do
14 | echo "---------------------------------------"
15 | echo "subgraph: ${subgraph}"
16 | echo "---------------------------------------"
17 | (set -x; ${ROVER_BIN:-'rover'} subgraph check ${APOLLO_GRAPH_REF} --schema subgraphs/${subgraph}/${subgraph}.graphql --name $subgraph)
18 | done
19 |
--------------------------------------------------------------------------------
/.scripts/check-products.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | echo "======================================="
4 | echo "SUBGRAPH CHECK"
5 | echo "======================================="
6 |
7 | source "$(dirname $0)/subgraphs.sh"
8 | source "$(dirname $0)/graph-api-env.sh"
9 |
10 | echo "---------------------------------------"
11 | echo "subgraph: products"
12 | echo "---------------------------------------"
13 | ( set -x; ${ROVER_BIN:-'rover'} subgraph check ${APOLLO_GRAPH_REF} --schema subgraphs/products/products.graphql --name products )
14 |
--------------------------------------------------------------------------------
/.scripts/compose.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 | echo -------------------------------------------------------------------------------------------
6 | ( set -x; ${ROVER_BIN:-'rover'} supergraph compose --config ./supergraph.yaml > ./supergraph.graphql)
7 | echo -------------------------------------------------------------------------------------------
8 | cp supergraph.graphql k8s/router/base
9 | cp supergraph.graphql k8s/router/dev
10 |
--------------------------------------------------------------------------------
/.scripts/config.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | source "$(dirname $0)/subgraphs.sh"
4 |
5 | echo "subgraphs:"
6 | for subgraph in ${subgraphs[@]}; do
7 | url="url_$subgraph"
8 | echo " ${subgraph}:"
9 | echo " routing_url: ${!url}"
10 | echo " schema:"
11 | echo " file: ./subgraphs/${subgraph}/${subgraph}.graphql"
12 | done
13 |
--------------------------------------------------------------------------------
/.scripts/docker-prune.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | docker kill $(docker ps -aq)
4 | docker rm $(docker ps -aq)
5 | docker volume rm $(docker volume ls -qf dangling=true)
6 |
--------------------------------------------------------------------------------
/.scripts/graph-api-env.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | if [[ "${CI}" != "true" ]]; then
4 |
5 | # optional overrides
6 | graph=$1
7 | key=$2
8 |
9 | # load defaults if present
10 | if ls graph-api.env > /dev/null 2>&1; then
11 | eval "$(cat graph-api.env)"
12 | fi
13 |
14 | # --------------------------------------------------------------------------
15 | # APOLLO_KEY
16 | # --------------------------------------------------------------------------
17 | if [[ "$key" == "default" ]]; then
18 | if [[ -n $APOLLO_KEY ]]; then
19 | key=$APOLLO_KEY
20 | echo "---------------------------------------"
21 | echo "Using default APOLLO_KEY"
22 | echo "---------------------------------------"
23 | else
24 | unset key
25 | fi
26 | fi
27 |
28 | if [[ -z "${key}" ]]; then
29 | echo "---------------------------------------"
30 | echo "Enter your APOLLO_KEY"
31 | echo "---------------------------------------"
32 | echo "Go to your graph settings in https://studio.apollographql.com/"
33 | echo "then create a Graph API Key with Contributor permissions"
34 | echo "(for metrics reporting) and enter it at the prompt below."
35 |
36 | if [[ -n "$APOLLO_KEY" ]]; then
37 | echo ""
38 | echo "press to use existing key: *************** (from ./graph-api.env)"
39 | fi
40 |
41 | read -s -p "> " key
42 | echo
43 | if [[ -z "$key" ]]; then
44 | if [[ -n "$APOLLO_KEY" ]]; then
45 | key=$APOLLO_KEY
46 | else
47 | >&2 echo "---------------------------------------"
48 | >&2 echo "APOLLO_KEY not found"
49 | >&2 echo "---------------------------------------"
50 | exit 1
51 | fi
52 | fi
53 | fi
54 |
55 | export APOLLO_KEY=$key
56 |
57 | # --------------------------------------------------------------------------
58 | # APOLLO_GRAPH_REF
59 | # --------------------------------------------------------------------------
60 | echo ""
61 | if [[ "$graph" == "default" ]]; then
62 | if [[ -n $APOLLO_GRAPH_REF ]]; then
63 | graph=$APOLLO_GRAPH_REF
64 | echo "---------------------------------------"
65 | echo "Using APOLLO_GRAPH_REF: ${graph}"
66 | echo "---------------------------------------"
67 | else
68 | unset graph
69 | fi
70 | fi
71 |
72 | if [[ -z "${graph}" ]]; then
73 | echo "---------------------------------------"
74 | echo "Enter your APOLLO_GRAPH_REF"
75 | echo "---------------------------------------"
76 | echo "Go to your graph settings in https://studio.apollographql.com/"
77 | echo "then copy your Graph NAME and optionally @ and enter it at the prompt below."
78 | echo "@ will default to @current, if omitted."
79 | echo ""
80 | echo "Enter the @ of a federated graph in Apollo Studio:"
81 | if [[ -n "$APOLLO_GRAPH_REF" ]]; then
82 | echo ""
83 | echo "press for default: $APOLLO_GRAPH_REF"
84 | fi
85 | read -p "> " graph
86 | if [[ -z "$graph" ]]; then
87 | if [[ -n "$APOLLO_GRAPH_REF" ]]; then
88 | graph=$APOLLO_GRAPH_REF
89 | else
90 | >&2 echo "---------------------------------------"
91 | >&2 echo "APOLLO_GRAPH_REF not found"
92 | >&2 echo "---------------------------------------"
93 | exit 1
94 | fi
95 | fi
96 | fi
97 |
98 | export APOLLO_GRAPH_REF=$graph
99 | fi
100 |
101 | # for docker-compose.managed.yaml env_file and to save defaults for next time
102 | echo "APOLLO_KEY=${APOLLO_KEY}" > graph-api.env
103 | echo "APOLLO_GRAPH_REF=${APOLLO_GRAPH_REF}" >> graph-api.env
104 |
105 | ok=1
106 | if [[ -z "${APOLLO_KEY}" ]]; then
107 | >&2 echo "---------------------------------------"
108 | >&2 echo "APOLLO_KEY not found"
109 | >&2 echo "---------------------------------------"
110 | ok=0
111 | fi
112 |
113 | if [[ -z "${APOLLO_GRAPH_REF}" ]]; then
114 | >&2 echo "---------------------------------------"
115 | >&2 echo "APOLLO_GRAPH_REF not found"
116 | >&2 echo "---------------------------------------"
117 | ok=0
118 | fi
119 |
120 | if [[ $ok -eq 0 ]]; then
121 | exit 1
122 | fi
123 |
--------------------------------------------------------------------------------
/.scripts/k8s-ci.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | .scripts/k8s-up.sh $1
4 | .scripts/k8s-smoke.sh
5 | code=$?
6 | .scripts/k8s-down.sh
7 | exit $code
8 |
--------------------------------------------------------------------------------
/.scripts/k8s-down.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | kind delete cluster
4 |
--------------------------------------------------------------------------------
/.scripts/k8s-graph-dump.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | echo ===============================================================
4 | echo "$1"
5 | echo ===============================================================
6 | kubectl get all
7 | kubectl get ingress
8 |
9 | echo ---------------------------------------------------------------
10 | kubectl describe deployment.apps/router-deployment
11 | echo ---------------------------------------------------------------
12 | kubectl describe deployment.apps/inventory
13 | echo ---------------------------------------------------------------
14 | kubectl describe deployment.apps/products
15 | echo ---------------------------------------------------------------
16 | kubectl describe deployment.apps/users
17 |
18 | echo ---------------------------------------------------------------
19 | kubectl describe pod
20 |
21 | echo ---------------------------------------------------------------
22 | kubectl logs -l app=subgraph-users
23 | echo ---------------------------------------------------------------
24 | kubectl logs -l app=subgraph-inventory
25 | echo ---------------------------------------------------------------
26 | kubectl logs -l app=subgraph-products
27 | echo ---------------------------------------------------------------
28 | kubectl logs -l app=router
29 |
30 | echo ===============================================================
31 | echo "$1"
32 | echo ===============================================================
33 |
34 | exit $code
35 |
--------------------------------------------------------------------------------
/.scripts/k8s-nginx-dump.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | echo ===============================================================
4 | echo "$1"
5 | echo ===============================================================
6 | kubectl get -n ingress-nginx all
7 |
8 | echo ---------------------------------------------------------------
9 | kubectl describe -n ingress-nginx deployment.apps/ingress-nginx-controller
10 |
11 | echo ---------------------------------------------------------------
12 | kubectl describe -n ingress-nginx pod
13 |
14 | echo ---------------------------------------------------------------
15 | kubectl logs -n ingress-nginx -l app.kubernetes.io/name=ingress-nginx
16 |
17 | echo ===============================================================
18 | echo "$1"
19 | echo ===============================================================
20 |
21 | exit $code
22 |
--------------------------------------------------------------------------------
/.scripts/k8s-smoke.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | retry=60
4 | code=1
5 | until [[ $retry -le 0 || $code -eq 0 ]]
6 | do
7 | kubectl get all
8 | .scripts/smoke.sh 80
9 |
10 | code=$?
11 |
12 | if [[ $code -eq 0 ]]
13 | then
14 | exit $code
15 | fi
16 |
17 | ((retry--))
18 | sleep 2
19 | done
20 |
21 | .scripts/k8s-nginx-dump.sh "smoke test failed"
22 |
23 | .scripts/k8s-graph-dump.sh "smoke test failed"
24 |
25 | exit $code
26 |
--------------------------------------------------------------------------------
/.scripts/k8s-up.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | KUSTOMIZE_ENV="${1:-base}"
4 |
5 | echo "Using ${KUSTOMIZE_ENV}/kustomization.yaml"
6 |
7 | kind --version
8 |
9 | if [ $(kind get clusters | grep -E 'kind') ]
10 | then
11 | kind delete cluster --name kind
12 | fi
13 | kind create cluster --image kindest/node:v1.21.1 --config=k8s/clusters/kind-cluster.yaml --wait 5m
14 |
15 | # important: use a newer kubectl version that supports extended kustomize resources
16 | kubectl apply -k k8s/infra/$KUSTOMIZE_ENV
17 |
18 | kubectl apply -k k8s/subgraphs/$KUSTOMIZE_ENV
19 |
20 | echo waiting for nginx controller to start ...
21 |
22 | kubectl wait --namespace ingress-nginx \
23 | --for=condition=ready pod \
24 | --selector=app.kubernetes.io/component=controller \
25 | --timeout=120s
26 |
27 | retry=60
28 | code=1
29 | last=""
30 | until [[ $retry -le 0 || $code -eq 0 ]]
31 | do
32 | result=$(kubectl apply -k k8s/router/$KUSTOMIZE_ENV 2>/dev/null)
33 | code=$?
34 |
35 | if [[ "$result" != "$last" ]]
36 | then
37 | echo "$result"
38 | fi
39 | last=$result
40 |
41 | if [[ $code -eq 0 ]]
42 | then
43 | exit $code
44 | fi
45 |
46 | ((retry--))
47 | echo waiting for nginx admission controller to start ...
48 | sleep 2
49 | done
50 |
51 | .scripts/k8s-nginx-dump.sh "timeout waiting for nginx admission controller to start"
52 |
53 | .scripts/k8s-graph-dump.sh "timeout waiting for nginx admission controller to start"
54 |
55 | exit $code
56 |
--------------------------------------------------------------------------------
/.scripts/publish.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | echo "======================================="
4 | echo "PUBLISH SUBGRAPHS TO APOLLO REGISTRY"
5 | echo "======================================="
6 |
7 | source "$(dirname $0)/subgraphs.sh"
8 | source "$(dirname $0)/graph-api-env.sh"
9 |
10 | # note: use --allow-invalid-routing-url to allow localhost without confirmation prompt
11 |
12 | for subgraph in ${subgraphs[@]}; do
13 | echo "---------------------------------------"
14 | echo "subgraph: ${subgraph}"
15 | echo "---------------------------------------"
16 | url="url_$subgraph"
17 | schema="subgraphs/$subgraph/$subgraph.graphql"
18 | (set -x; ${ROVER_BIN:-'rover'} subgraph publish ${APOLLO_GRAPH_REF} \
19 | --routing-url "${!url}" \
20 | --schema "${schema}" \
21 | --name ${subgraph} \
22 | --allow-invalid-routing-url \
23 | --convert)
24 | echo ""
25 | done
26 |
--------------------------------------------------------------------------------
/.scripts/query.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | PORT="${1:-4000}"
4 |
5 | read -r -d '' QUERY <<"EOF"
6 | {
7 | allProducts {
8 | id,
9 | sku,
10 | createdBy {
11 | email,
12 | totalProductsCreated
13 | }
14 | }
15 | }
16 | EOF
17 |
18 | QUERY=$(echo "${QUERY}" | awk -v ORS= -v OFS= '{$1=$1}1')
19 |
20 | echo -------------------------------------------------------------------------------------------
21 | ACT=$(set -x; curl -X POST -H 'Content-Type: application/json' --data '{ "query": "'"${QUERY}"'" }' http://localhost:$PORT/)
22 | echo ""
23 | echo "Result:"
24 | echo "$ACT"
25 | echo -------------------------------------------------------------------------------------------
26 |
--------------------------------------------------------------------------------
/.scripts/smoke.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | PORT="${1:-4000}"
4 | COUNT="${2:-1}"
5 |
6 | OK_CHECK="\xE2\x9C\x85"
7 | FAIL_MARK="\xE2\x9D\x8C"
8 | ROCKET="\xF0\x9F\x9A\x80"
9 |
10 | err() {
11 | local red=`tput setaf 1 2>/dev/null || echo ''`
12 | local reset=`tput sgr0 2>/dev/null || echo ''`
13 | echo "${red}ERROR${reset}: $1" >&2
14 | exit 1
15 | }
16 |
17 | exec_curl() {
18 | ERR=""
19 | RES=$("$@" 2>/dev/null)
20 | EXIT_CODE=$?
21 | if [ $EXIT_CODE -ne 0 ]; then
22 | echo "$@"
23 | if [ $EXIT_CODE -eq 7 ]; then
24 | ERR="CURL ERROR 7: Failed to connect() to host or proxy."
25 | elif [ $EXIT_CODE -eq 52 ]; then
26 | ERR="CURL ERROR 52: Empty reply from server."
27 | elif [ $EXIT_CODE -eq 56 ]; then
28 | ERR="CURL ERROR 56: Recv failure: Connection reset by peer."
29 | else
30 | ERR="CURL ERROR $EXIT_CODE\n"
31 | fi
32 | fi
33 | return $EXIT_CODE
34 | }
35 |
36 | TESTS=(1 2 4)
37 | DEFER_TESTS=(5 6)
38 |
39 | HAS_DEFER=0
40 |
41 | # introspection query
42 | exec_curl curl -X POST http://localhost:$PORT/ \
43 | -H "Content-Type: application/json" \
44 | --data '{ "query": "query { __schema { directives { name }}}" }'
45 | if [ $? != 0 ]; then err "$ERR"; fi
46 | printf "$OK_CHECK Introspection Success!\n"
47 |
48 | if echo "$RES" | grep -q '{"name":"defer"}'; then HAS_DEFER=1; fi
49 |
50 | if [ $HAS_DEFER -eq 1 ]; then
51 | echo " - has @defer support"
52 | TESTS=("${TESTS[@]}" "${DEFER_TESTS[@]}")
53 | else
54 | echo " - no @defer support"
55 | fi
56 | printf "\n"
57 |
58 | # --------------------------------------------------------------------
59 | # TEST 1
60 | # --------------------------------------------------------------------
61 | DESCR_1="allProducts with delivery"
62 | OPNAME_1="allProdDelivery"
63 | ACCEPT_1="application/json"
64 | read -r -d '' QUERY_1 <<"EOF"
65 | query allProdDelivery {
66 | allProducts {
67 | delivery {
68 | estimatedDelivery,
69 | fastestDelivery
70 | },
71 | createdBy {
72 | name,
73 | email
74 | }
75 | }
76 | }
77 | EOF
78 |
79 | OP_1=equals
80 |
81 | read -r -d '' EXP_1 <<"EOF"
82 | {"data":{"allProducts":[{"delivery":{"estimatedDelivery":"6/25/2021","fastestDelivery":"6/24/2021"},"createdBy":{"name":"Apollo Studio Support","email":"support@apollographql.com"}},{"delivery":{"estimatedDelivery":"6/25/2021","fastestDelivery":"6/24/2021"},"createdBy":{"name":"Apollo Studio Support","email":"support@apollographql.com"}}]}}
83 | EOF
84 |
85 | # --------------------------------------------------------------------
86 | # TEST 2
87 | # --------------------------------------------------------------------
88 | DESCR_2="allProducts with totalProductsCreated"
89 | OPNAME_2="allProdCreated"
90 | ACCEPT_2="application/json"
91 | read -r -d '' QUERY_2 <<"EOF"
92 | query allProdCreated {
93 | allProducts {
94 | id,
95 | sku,
96 | createdBy {
97 | email,
98 | totalProductsCreated
99 | }
100 | }
101 | }
102 | EOF
103 |
104 | OP_2=equals
105 |
106 | read -r -d '' EXP_2 <<"EOF"
107 | {"data":{"allProducts":[{"id":"apollo-federation","sku":"federation","createdBy":{"email":"support@apollographql.com","totalProductsCreated":1337}},{"id":"apollo-studio","sku":"studio","createdBy":{"email":"support@apollographql.com","totalProductsCreated":1337}}]}}
108 | EOF
109 |
110 | # --------------------------------------------------------------------
111 | # TEST 3 - @inaccessible in subgraphs - disabled for Fed 1
112 | # --------------------------------------------------------------------
113 | DESCR_3="hidden: String @inaccessible should return error"
114 | OPNAME_3="inaccessibleError"
115 | ACCEPT_3="application/json"
116 | read -r -d '' QUERY_3 <<"EOF"
117 | query inaccessibleError {
118 | allProducts {
119 | id,
120 | hidden,
121 | dimensions {
122 | size,
123 | weight
124 | }
125 | }
126 | }
127 | EOF
128 |
129 | OP_3=contains
130 |
131 | read -r -d '' EXP_3 <<"EOF"
132 | Cannot query field \"hidden\" on type \"ProductItf\".
133 | EOF
134 |
135 | # --------------------------------------------------------------------
136 | # TEST 4
137 | # --------------------------------------------------------------------
138 | DESCR_4="exampleQuery with pandas"
139 | OPNAME_4="exampleQuery"
140 | ACCEPT_4="application/json"
141 | read -r -d '' QUERY_4 <<"EOF"
142 | query exampleQuery {
143 | allProducts {
144 | id,
145 | sku,
146 | dimensions {
147 | size,
148 | weight
149 | }
150 | delivery {
151 | estimatedDelivery,
152 | fastestDelivery
153 | }
154 | }
155 | allPandas {
156 | name,
157 | favoriteFood
158 | }
159 | }
160 | EOF
161 |
162 | OP_4=equals
163 |
164 | read -r -d '' EXP_4 <<"EOF"
165 | {"data":{"allProducts":[{"id":"apollo-federation","sku":"federation","dimensions":{"size":"1","weight":1},"delivery":{"estimatedDelivery":"6/25/2021","fastestDelivery":"6/24/2021"}},{"id":"apollo-studio","sku":"studio","dimensions":{"size":"1","weight":1},"delivery":{"estimatedDelivery":"6/25/2021","fastestDelivery":"6/24/2021"}}],"allPandas":[{"name":"Basi","favoriteFood":"bamboo leaves"},{"name":"Yun","favoriteFood":"apple"}]}}
166 | EOF
167 |
168 | # --------------------------------------------------------------------
169 | # TEST 5
170 | # --------------------------------------------------------------------
171 | DESCR_5="defer variation query"
172 | OPNAME_5="deferVariation"
173 | ACCEPT_5="multipart/mixed; deferSpec=20220824, application/json"
174 | read -r -d '' QUERY_5 <<"EOF"
175 | query deferVariation {
176 | allProducts {
177 | variation {
178 | ...MyFragment @defer
179 | },
180 | sku,
181 | id
182 | }
183 | }
184 | fragment MyFragment on ProductVariation {
185 | id
186 | }
187 | EOF
188 | OP_5=equals
189 |
190 | IFS= read -r -d '' EXP_5 <<"EOF"
191 |
192 | --graphql
193 | content-type: application/json
194 |
195 | {"data":{"allProducts":[{"sku":"federation","id":"apollo-federation"},{"sku":"studio","id":"apollo-studio"}]},"hasNext":true}
196 | --graphql
197 | content-type: application/json
198 |
199 | {"hasNext":false,"incremental":[{"data":{"id":"OSS"},"path":["allProducts",0,"variation"]},{"data":{"id":"platform"},"path":["allProducts",1,"variation"]}]}
200 | --graphql--
201 | EOF
202 |
203 | # --------------------------------------------------------------------
204 | # TEST 6
205 | # --------------------------------------------------------------------
206 | DESCR_6="deferred user query"
207 | OPNAME_6="deferUser"
208 | ACCEPT_6="multipart/mixed; deferSpec=20220824, application/json"
209 | read -r -d '' QUERY_6 <<"EOF"
210 | query deferUser {
211 | allProducts {
212 | createdBy {
213 | ...MyFragment @defer
214 | }
215 | sku
216 | id
217 | }
218 | }
219 |
220 | fragment MyFragment on User { name }
221 | EOF
222 |
223 | OP_6=equals
224 |
225 | IFS= read -r -d '' EXP_6 <<"EOF"
226 |
227 | --graphql
228 | content-type: application/json
229 |
230 | {"data":{"allProducts":[{"sku":"federation","id":"apollo-federation"},{"sku":"studio","id":"apollo-studio"}]},"hasNext":true}
231 | --graphql
232 | content-type: application/json
233 |
234 | {"hasNext":false,"incremental":[{"data":{"name":"Apollo Studio Support"},"path":["allProducts",0,"createdBy"]},{"data":{"name":"Apollo Studio Support"},"path":["allProducts",1,"createdBy"]}]}
235 | --graphql--
236 | EOF
237 |
238 | set -e
239 |
240 | printf "Running smoke tests ... $ROCKET $ROCKET $ROCKET\n"
241 | trap 'rm -f *.tmp' EXIT
242 | sleep 2
243 |
244 | run_tests ( ){
245 | for (( i=1; i<=$COUNT; i++ )); do
246 | for test in ${TESTS[@]}; do
247 | descr_var="DESCR_$test"
248 | query_var="QUERY_$test"
249 | exp_var="EXP_$test"
250 | op_var="OP_$test"
251 | opname_var="OPNAME_$test"
252 | accept_var="ACCEPT_$test"
253 | is_slow_var="ISSLOW_$test"
254 |
255 | DESCR="${!descr_var}"
256 | QUERY=$(echo "${!query_var}" | tr '\n' ' ' | awk '$1=$1')
257 | EXP="${!exp_var}"
258 | OP="${!op_var}"
259 | OPNAME="${!opname_var}"
260 | ACCEPT="${!accept_var}"
261 | ISSLOW="${!is_slow_var}"
262 | CMD=(curl -i -X POST -H "Content-Type: application/json" -H "apollographql-client-name: smoke-test" -H "accept:${ACCEPT}" --data "{ \"query\": \"${QUERY}\", \"operationName\": \"$OPNAME\" }" http://localhost:$PORT/ )
263 |
264 | if [ $i -gt 1 ]; then
265 | if [ "$ISSLOW" == "true" ]; then
266 | continue
267 | fi
268 | fi
269 |
270 | if [ $COUNT -le 1 ]; then
271 | echo ""
272 | echo "=============================================================="
273 | echo "TEST $test: $DESCR"
274 | echo "=============================================================="
275 | echo "${CMD[@]}"
276 | fi
277 |
278 | # execute operation
279 | set +e
280 | RESULT=$("${CMD[@]}" 2>/dev/null)
281 | EXIT_CODE=$?
282 | if [ $EXIT_CODE -ne 0 ]; then
283 | if [ $EXIT_CODE -eq 7 ]; then
284 | printf "CURL ERROR 7: Failed to connect() to host or proxy.\n"
285 | elif [ $EXIT_CODE -eq 52 ]; then
286 | printf "CURL ERROR 52: Empty reply from server.\n"
287 | elif [ $EXIT_CODE -eq 56 ]; then
288 | printf "CURL ERROR 56: Recv failure: Connection reset by peer.\n"
289 | else
290 | printf "CURL ERROR $EXIT_CODE\n"
291 | fi
292 | printf "${RESULT}"
293 | printf '\n'
294 | exit 1
295 | fi
296 | set -e
297 |
298 | echo "$RESULT" | awk -v bl=1 'bl{bl=0; h=($0 ~ /HTTP\/1/)} /^\r?$/{bl=1} {print $0>(h?"headers.tmp":"body.tmp")}'
299 | HEADERS=$( unpublish.log
11 | echo "subgraphs:"
12 | for subgraph in ${subgraphs[@]}; do
13 | ( set -x; ${ROVER_BIN:-'rover'} subgraph delete ${APOLLO_GRAPH_REF} --name ${subgraph} --confirm 2>> unpublish.log )
14 | done
15 |
16 | if grep -Eq 'error:(.+) Graph has no implementing services' unpublish.log; then
17 | echo "Success, all subgraphs removed!"
18 | rm unpublish.log
19 | exit 0
20 | elif grep -Eq 'error:(.+) invalid input syntax for uuid: ""' unpublish.log; then
21 | echo "Success, no subgraphs found!"
22 | rm unpublish.log
23 | exit 0
24 | else
25 | cat unpublish.log
26 | rm unpublish.log
27 | exit 1
28 | fi
29 |
--------------------------------------------------------------------------------
/CODEOWNERS:
--------------------------------------------------------------------------------
1 | # This file was automatically generated by the Apollo SecOps team
2 | # Please customize this file as needed prior to merging.
3 |
4 | * @prasek
5 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2021 Apollo GraphQL
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | .PHONY: default
2 | default: demo
3 |
4 | .PHONY: ci
5 | ci: supergraph docker-build-force docker-up smoke docker-down
6 |
7 | .PHONY: ci-router
8 | ci-router: supergraph docker-build-force docker-up-local-router smoke docker-down-router
9 |
10 | .PHONY: demo
11 | demo: supergraph docker-up smoke docker-down
12 |
13 | .PHONY: demo-managed
14 | demo-managed: publish take-five docker-up-managed smoke docker-down
15 |
16 | .PHONY: demo-k8s
17 | demo-k8s: k8s-up k8s-smoke k8s-down
18 |
19 | .PHONY: demo-serverless
20 | demo-serverless: supergraph-serverless docker-up-serverless smoke docker-down-serverless
21 |
22 | .PHONY: docker-up
23 | docker-up:
24 | docker-compose up -d
25 | @sleep 2
26 | @docker logs apollo-gateway
27 |
28 | .PHONY: docker-build
29 | docker-build:
30 | docker-compose build
31 |
32 | .PHONY: docker-build-force
33 | docker-build-force:
34 | docker-compose build --no-cache --pull --parallel --progress plain
35 |
36 | .PHONY: docker-build-router
37 | docker-build-router:
38 | @docker build -t supergraph-demo_apollo-router router/. --no-cache
39 |
40 | .PHONY: docker-build-serverless
41 | docker-build-serverless:
42 | docker-compose -f docker-compose.serverless.yml build --no-cache
43 |
44 | .PHONY: docker-up-managed
45 | docker-up-managed:
46 | docker-compose -f docker-compose.managed.yml up -d
47 | @sleep 2
48 | @docker logs apollo-gateway
49 |
50 | .PHONY: demo-local-router
51 | demo-local-router: supergraph docker-up-local-router smoke docker-down-router
52 |
53 | .PHONY: docker-up-local-router
54 | docker-up-local-router:
55 | docker-compose -f docker-compose.router.yml up -d
56 | @sleep 2
57 | @docker logs apollo-router
58 |
59 | .PHONY: query
60 | query:
61 | @.scripts/query.sh
62 |
63 | .PHONY: smoke
64 | smoke:
65 | @.scripts/smoke.sh
66 |
67 | .PHONY: docker-down
68 | docker-down:
69 | docker-compose down --remove-orphans
70 |
71 | .PHONY: docker-down-router
72 | docker-down-router:
73 | docker-compose -f docker-compose.router.yml down --remove-orphans
74 |
75 | .PHONY: supergraph
76 | supergraph: config compose
77 |
78 | .PHONY: config
79 | config:
80 | .scripts/config.sh > ./supergraph.yaml
81 |
82 | .PHONY: compose
83 | compose:
84 | .scripts/compose.sh
85 |
86 | .PHONY: publish
87 | publish:
88 | .scripts/publish.sh
89 |
90 | .PHONY: unpublish
91 | unpublish:
92 | .scripts/unpublish.sh
93 |
94 | .PHONY: graph-api-env
95 | graph-api-env:
96 | @.scripts/graph-api-env.sh
97 |
98 | .PHONY: check-products
99 | check-products:
100 | .scripts/check-products.sh
101 |
102 | .PHONY: check-all
103 | check-all:
104 | .scripts/check-all.sh
105 |
106 | .PHONY: docker-up-zipkin
107 | docker-up-zipkin:
108 | docker-compose -f docker-compose.otel-zipkin.yml up -d
109 | @sleep 2
110 | docker-compose -f docker-compose.otel-zipkin.yml logs
111 |
112 | .PHONY: docker-down-zipkin
113 | docker-down-zipkin:
114 | docker-compose -f docker-compose.otel-zipkin.yml down
115 |
116 | .PHONY: docker-up-otel-collector
117 | docker-up-otel-collector:
118 | docker-compose -f docker-compose.otel-collector.yml up -d
119 | @sleep 2
120 | docker-compose -f docker-compose.otel-collector.yml logs
121 |
122 | .PHONY: docker-up-local-router-otel
123 | docker-up-local-router-otel:
124 | docker-compose -f docker-compose.router-otel.yml up -d
125 | @sleep 2
126 | docker-compose -f docker-compose.router-otel.yml logs
127 |
128 |
129 | .PHONY: docker-down-otel-collector
130 | docker-down-otel-collector:
131 | docker-compose -f docker-compose.otel-collector.yml down
132 |
133 | .PHONY: supergraph-serverless
134 | supergraph-serverless:
135 | rover supergraph compose --config serverless/supergraph.yaml > serverless/supergraph.graphql
136 |
137 | .PHONY: docker-up-serverless
138 | docker-up-serverless:
139 | docker-compose -f docker-compose.serverless.yml up -d
140 | @sleep 6
141 | docker-compose -f docker-compose.serverless.yml logs
142 |
143 | .PHONY: docker-down-serverless
144 | docker-down-serverless:
145 | docker-compose -f docker-compose.serverless.yml down
146 |
147 | .PHONY: k8s-up
148 | k8s-up:
149 | .scripts/k8s-up.sh
150 |
151 | .PHONY: k8s-up-dev
152 | k8s-up-dev:
153 | .scripts/k8s-up.sh dev
154 |
155 | .PHONY: k8s-query
156 | k8s-query:
157 | .scripts/query.sh 80
158 |
159 | .PHONY: k8s-smoke
160 | k8s-smoke:
161 | .scripts/k8s-smoke.sh 80
162 |
163 | .PHONY: k8s-nginx-dump
164 | k8s-nginx-dump:
165 | .scripts/k8s-nginx-dump.sh "k8s-nginx-dump"
166 |
167 | .PHONY: k8s-graph-dump
168 | k8s-graph-dump:
169 | .scripts/k8s-graph-dump.sh "k8s-graph-dump"
170 |
171 | .PHONY: k8s-down
172 | k8s-down:
173 | .scripts/k8s-down.sh
174 |
175 | .PHONY: k8s-ci
176 | k8s-ci:
177 | @.scripts/k8s-ci.sh
178 |
179 | .PHONY: k8s-ci-dev
180 | k8s-ci-dev:
181 | @.scripts/k8s-ci.sh dev
182 |
183 | .PHONY: dep-act
184 | dep-act:
185 | curl https://raw.githubusercontent.com/nektos/act/master/install.sh | bash -s v0.2.23
186 |
187 | ubuntu-latest=ubuntu-latest=catthehacker/ubuntu:act-latest
188 |
189 | .PHONY: act
190 | act: act-ci-local
191 |
192 | .PHONY: act-ci-local
193 | act-ci-local:
194 | act -P $(ubuntu-latest) -W .github/workflows/main.yml --detect-event
195 |
196 | .PHONY: act-ci-local-router
197 | act-ci-local-router:
198 | act -P $(ubuntu-latest) -W .github/workflows/main-router.yml --detect-event
199 |
200 | .PHONY: act-ci-local-serverless
201 | act-ci-local-serverless:
202 | act -P $(ubuntu-latest) -W .github/workflows/main-serverless.yml --detect-event
203 |
204 | .PHONY: act-ci-managed
205 | act-ci-managed:
206 | act -P $(ubuntu-latest) -W .github/workflows/managed.yml --secret-file graph-api.env --detect-event -j ci-docker-managed
207 |
208 | .PHONY: act-rebase
209 | act-rebase:
210 | act -P $(ubuntu-latest) -W .github/workflows/rebase.yml -s GITHUB_TOKEN --secret-file docker.secrets --detect-event
211 |
212 | .PHONY: act-release
213 | act-release:
214 | act -P $(ubuntu-latest) -W .github/workflows/release.yml --secret-file docker.secrets
215 |
216 | .PHONY: act-subgraph-check
217 | act-subgraph-check:
218 | act -P $(ubuntu-latest) -W .github/workflows/subgraph-check.yml --secret-file graph-api.env --detect-event
219 |
220 | .PHONY: act-subgraph-deploy-publish
221 | act-subgraph-deploy-publish:
222 | act -P $(ubuntu-latest) -W .github/workflows/subgraph-deploy-publish.yml --secret-file graph-api.env --detect-event
223 |
224 | .PHONY: docker-prune
225 | docker-prune:
226 | .scripts/docker-prune.sh
227 |
228 | .PHONY: take-five
229 | take-five:
230 | @echo waiting for robots to finish work ...
231 | @sleep 5
232 |
233 | .PHONY: copy-local-otel-tar
234 | copy-local-otel-tar:
235 | cp ../supergraph-demo-opentelemetry/dist/js/supergraph-demo-opentelemetry-v0.0.0.tgz ./gateway/
236 | cp ../supergraph-demo-opentelemetry/dist/js/supergraph-demo-opentelemetry-v0.0.0.tgz ./subgraphs/products
237 | cp ../supergraph-demo-opentelemetry/dist/js/supergraph-demo-opentelemetry-v0.0.0.tgz ./subgraphs/inventory
238 | cp ../supergraph-demo-opentelemetry/dist/js/supergraph-demo-opentelemetry-v0.0.0.tgz ./subgraphs/users
239 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Supergraph Demo
2 |
3 | 
4 | [](https://renovatebot.com)
5 |
6 | > 📣 [Apollo Federation 2 is generally available](https://www.apollographql.com/blog/announcement/backend/apollo-federation-2-is-now-generally-available/)!
7 | View the [Federation 2 demo](https://github.com/apollographql/supergraph-demo-fed2/blob/main/README.md)!
8 |
9 | Moving from dynamic composition to static composition with supergraphs.
10 |
11 | Contents:
12 |
13 | * [Welcome](#welcome)
14 | * [Prerequisites](#prerequisites)
15 | * [Local Development](#local-development)
16 | * [Local Supergraph Composition](#local-supergraph-composition)
17 | * [Apollo Sandbox for Local Development](#apollo-sandbox-for-local-development)
18 | * [Tracing with Open Telemetry](#tracing-with-open-telemetry)
19 | * [Apollo Studio](#apollo-studio)
20 | * [Composition in Apollo Studio](#composition-in-apollo-studio)
21 | * [Ship Faster Without Breaking Changes](#ship-faster-without-breaking-changes)
22 |
23 | * [Standard CI/CD](#standard-cicd)
24 | * [Subgraph CI](#subgraph-ci)
25 | * [Subgraph Deployment](#subgraph-deployment)
26 | * [Gateway CI](#gateway-ci)
27 | * [Gateway Deployment](#gateway-deployment)
28 | * [Deployment Examples](#deployment-examples)
29 | * [Kubernetes with Supergraph ConfigMap](#kubernetes-with-supergraph-configmap)
30 | * [Serverless](#serverless)
31 | * [Apollo Router](#apollo-router)
32 | * [Learn More](#learn-more)
33 |
34 | See also:
35 | - [apollographql/supergraph-demo-fed2](https://github.com/apollographql/supergraph-demo-fed2/blob/main/README.md)
36 | - [apollographql/supergraph-demo-k8s-graph-ops](https://github.com/apollographql/supergraph-demo-k8s-graph-ops)
37 |
38 | ## Welcome
39 |
40 | [Apollo Federation](https://www.apollographql.com/docs/federation/) and [Managed Federation](https://www.apollographql.com/docs/federation/managed-federation/overview/) have delivered significant
41 | improvements over schema stitching and alternate approaches. Static
42 | composition introduces another big step forward as we move composition out of
43 | the Gateway and into the CI pipeline where federated graph changes can be
44 | validated sooner and built into static artifacts that define how a Gateway
45 | should route requests across the subgraphs in a federation.
46 |
47 | Most contemporary federated GraphQL implementations dynamically compose a
48 | list of implementing services (subgraphs) into a GraphQL Gateway at runtime.
49 | There is no static artifact that can be versioned, validated, or reasoned
50 | about across a fleet of Gateway instances that are common in scale-out
51 | federated graph deployments. Gateways often rely on hard-coded behavior for
52 | directives like `join` or accept additional non-GraphQL configuration.
53 |
54 | With static composition, you can compose subgraphs into a supergraph at
55 | build-time resulting in a static artifact (supergraph schema) that describes
56 | the machinery to power a graph router at runtime. The supergraph schema
57 | includes directives like `join` that instruct a graph router how federate
58 | multiple subgraphs into a single graph for consumers to use.
59 |
60 | 
61 |
62 | See also: [New Federation UX - Docs](https://www.apollographql.com/docs/federation/quickstart/)
63 |
64 | ## Prerequisites
65 |
66 | You'll need:
67 |
68 | * [docker](https://docs.docker.com/get-docker/)
69 | * [docker-compose](https://docs.docker.com/compose/install/)
70 | * `rover` [our new CLI](https://www.apollographql.com/docs/rover/getting-started)
71 |
72 | To install `rover`:
73 |
74 | ```sh
75 | curl -sSL https://rover.apollo.dev/nix/latest | sh
76 | ```
77 |
78 | ## Local Development
79 |
80 | ### Local Supergraph Composition
81 |
82 | See also: [Apollo Federation docs](https://www.apollographql.com/docs/federation/quickstart/)
83 |
84 | You can federate multiple subgraphs into a supergraph using:
85 |
86 | ```sh
87 | make demo
88 | ```
89 |
90 | which does the following:
91 |
92 | ```sh
93 | # build a supergraph from 3 subgraphs: products, users, inventory
94 | make supergraph
95 | ```
96 |
97 | which runs:
98 |
99 | ```
100 | rover supergraph compose --config ./supergraph.yaml > supergraph.graphql
101 | ```
102 |
103 | and then runs:
104 |
105 | ```
106 | docker-compose up -d
107 |
108 | Creating apollo-gateway ... done
109 | Creating inventory ... done
110 | Creating users ... done
111 | Creating products ... done
112 |
113 | Starting Apollo Gateway in local mode ...
114 | Using local: supergraph.graphql
115 | 🚀 Graph Router ready at http://localhost:4000/
116 | ```
117 |
118 | `make demo` then issues a curl request to the graph router via:
119 |
120 | ```sh
121 | make query
122 | ```
123 |
124 | which issues the following query that fetches across 3 subgraphs:
125 |
126 | ```ts
127 | query Query {
128 | allProducts {
129 | id
130 | sku
131 | createdBy {
132 | email
133 | totalProductsCreated
134 | }
135 | }
136 | }
137 | ```
138 |
139 | with results like:
140 |
141 | ```ts
142 | {
143 | data: {
144 | allProducts: [
145 | {
146 | id: "apollo-federation",
147 | sku: "federation",
148 | createdBy: {
149 | email: "support@apollographql.com",
150 | totalProductsCreated: 1337
151 | }
152 | },{
153 | id: "apollo-studio",
154 | sku: "studio",
155 | createdBy:{
156 | email: "support@apollographql.com",
157 | totalProductsCreated: 1337
158 | }
159 | }
160 | ]
161 | }
162 | }
163 | ```
164 |
165 | `make demo` then shuts down the graph router:
166 |
167 | ```
168 | docker-compose down
169 | ```
170 |
171 | ### Apollo Sandbox for Local Development
172 |
173 | #### Deploy Graph
174 |
175 | ```
176 | make docker-up
177 | ```
178 |
179 | #### Query using Apollo Sandbox
180 |
181 | 1. Open [http://localhost:4000/](http://localhost:4000/)
182 | 2. Click `Query your server`
183 | 3. Run a query:
184 |
185 | ```ts
186 | query Query {
187 | allProducts {
188 | id
189 | sku
190 | createdBy {
191 | email
192 | totalProductsCreated
193 | }
194 | }
195 | }
196 | ```
197 |
198 | View results:
199 |
200 | 
201 |
202 | #### Cleanup
203 |
204 | ```
205 | make docker-down
206 | ```
207 |
208 | ### Tracing with Open Telemetry
209 |
210 | #### Deploy Graph with Open Telemetry Collector
211 |
212 | ```
213 | make docker-up-otel-collector
214 | ```
215 |
216 | #### Run Queries
217 |
218 | ```
219 | make smoke
220 | ```
221 |
222 | #### View Open Telemetry Traces
223 |
224 | browse to [http://localhost:9411/](http://localhost:9411/)
225 |
226 | 
227 |
228 | #### Cleanup
229 |
230 | ```
231 | make docker-down-otel-collector
232 | ```
233 |
234 | #### Send Open Telemetry Traces to Honeycomb
235 |
236 | You can send Open Telemetry from the Gateway to Honeycomb with the following [collector-config.yml](opentelemetry/collector-config.yml):
237 |
238 | ```
239 | receivers:
240 | otlp:
241 | protocols:
242 | grpc:
243 | http:
244 | cors_allowed_origins:
245 | - http://*
246 | - https://*
247 |
248 | exporters:
249 | otlp:
250 | endpoint: "api.honeycomb.io:443"
251 | headers:
252 | "x-honeycomb-team": "your-api-key"
253 | "x-honeycomb-dataset": "your-dataset-name"
254 |
255 | service:
256 | pipelines:
257 | traces:
258 | receivers: [otlp]
259 | exporters: [otlp]
260 | ```
261 |
262 | 
263 |
264 | #### Learn More
265 |
266 | * Docs: [Open Telemetry for Apollo Federation](https://www.apollographql.com/docs/federation/opentelemetry/)
267 | * Docker compose file: [docker-compose.otel-collector.yml](docker-compose.otel-collector.yml)
268 | * Helper library: [supergraph-demo-opentelemetry](https://github.com/prasek/supergraph-demo-opentelemetry)
269 | * See usage in:
270 | * [gateway/gateway.js](gateway/gateway.js)
271 | * [subgraphs/products/products.js](subgraphs/products/products.js)
272 |
273 | ## Apollo Studio
274 |
275 | ### Composition in Apollo Studio
276 |
277 | See also: [Apollo Studio docs](https://www.apollographql.com/docs/federation/quickstart-pt-2/)
278 |
279 | [Managed Federation](https://www.apollographql.com/docs/federation/managed-federation/overview/) in Apollo Studio enables teams to independently publish subgraphs to the Apollo Registry, so they can be automatically composed into a supergraph for apps to use.
280 |
281 | #### Create a Federated Graph in Apollo Studio
282 |
283 | To get started with Managed Federation, create your Apollo account:
284 |
285 | * Signup for a free Team trial: https://studio.apollographql.com/signup
286 | * Create an organization
287 | * **Important:** use the `Team` trial which gives you access Apollo features like `Schema Checks`.
288 |
289 | Then create a `Graph` of type `Deployed` with the `Federation` option.
290 |
291 | Once you have created your graph in Apollo Studio, run the following:
292 |
293 | ```sh
294 | make demo-managed
295 | ```
296 |
297 | #### Connect to your Graph in Apollo Studio
298 |
299 | which will prompt for your `graph key` and `graph ref` and save them to `./graph-api.env`:
300 |
301 | * `graph key` - the graph API key used to authenticate with Apollo Studio.
302 | * `graph ref` - a reference to the graph in Apollo's registry the graph router should pull from.
303 | * in the form `@`
304 | * `@` is optional and will default to `@current`
305 | * examples: `my-graph@dev`, `my-graph@stage`, `my-graph@prod`
306 | * see [configuration reference](https://www.apollographql.com/docs/apollo-server/api/apollo-server/#apollo) for details.
307 |
308 | Note: The generated `./graph-api.env` holds your `APOLLO_KEY` and `APOLLO_GRAPH_REF`.
309 |
310 | #### Publish Subgraph Schemas to the Apollo Registry
311 |
312 | `make demo-managed` will publish the `subgraphs/**.graphql` schemas to your new `Federated` graph in the Apollo Registry, which performs managed composition and schema checks, to prevent breaking changes:
313 |
314 | ```sh
315 | make publish
316 | ```
317 |
318 | Temporary composition errors may surface as each subgraph is published:
319 |
320 | ```
321 | + rover subgraph publish supergraph-router@dev --routing-url http://products:4000/graphql --schema subgraphs/products/products.graphql --name products
322 |
323 | Publishing SDL to supergraph-router:dev (subgraph: products) using credentials from the default profile.
324 |
325 | A new subgraph called 'products' for the 'supergraph-router' graph was created.
326 |
327 | The gateway for the 'supergraph-router' graph was NOT updated with a new schema
328 |
329 | WARN: The following composition errors occurred:
330 | Unknown type "User".
331 | [products] Query -> `Query` is an extension type, but `Query` is not defined in any service
332 | ```
333 |
334 | Success! Once all subgraphs are published the supergraph will be updated, for example:
335 |
336 | ```
337 | + rover subgraph publish supergraph-router@dev --routing-url https://users:4000/graphql --schema subgraphs/users/users.graphql --name users
338 |
339 | Publishing SDL to supergraph-router:dev (subgraph: users) using credentials from the default profile.
340 |
341 | A new subgraph called 'users' for the 'supergraph-router' graph was created
342 |
343 | The gateway for the 'supergraph-router' graph was updated with a new schema, composed from the updated 'users' subgraph
344 | ```
345 |
346 | Viewing the `Federated` graph in Apollo Studio we can see the supergraph and the subgraphs it's composed from:
347 | 
348 |
349 | #### Run the Graph Router and Subgraph Containers
350 |
351 | The graph router and subgraph services will be started by `make demo-managed` next.
352 |
353 | using `docker-compose.managed.yml`:
354 |
355 | ```yaml
356 | version: '3'
357 | services:
358 | apollo-gateway:
359 | container_name: apollo-gateway
360 | build: ./gateway
361 | environment:
362 | - APOLLO_SCHEMA_CONFIG_DELIVERY_ENDPOINT=https://uplink.api.apollographql.com/
363 | env_file: # created with: make graph-api-env
364 | - graph-api.env
365 | ports:
366 | - "4000:4000"
367 | products:
368 | container_name: products
369 | build: ./subgraphs/products
370 | inventory:
371 | container_name: inventory
372 | build: ./subgraphs/inventory
373 | users:
374 | container_name: users
375 | build: ./subgraphs/users
376 | ```
377 |
378 | ```sh
379 | make docker-up-managed
380 | ```
381 |
382 | which shows:
383 |
384 | ```
385 | docker-compose -f docker-compose.managed.yml up -d
386 | Creating network "supergraph-demo_default" with the default driver
387 | Creating apollo-gateway ... done
388 |
389 | Starting Apollo Gateway in managed mode ...
390 | Apollo usage reporting starting! See your graph at https://studio.apollographql.com/graph/supergraph-router@dev/
391 | 🚀 Server ready at http://localhost:4000/
392 | ```
393 |
394 | #### Make a Federated Query
395 |
396 | `make demo-managed` then issues a curl request to the graph router:
397 |
398 | ```sh
399 | make query
400 | ```
401 |
402 | which has the same query and response as above.
403 |
404 | #### Clean Up
405 |
406 | `make demo-managed` then shuts down the graph router:
407 |
408 | ```sh
409 | make docker-down
410 | ```
411 |
412 | ### Ship Faster Without Breaking Changes
413 |
414 | See also: [working with subgraphs docs](https://www.apollographql.com/docs/federation/quickstart-pt-3/)
415 |
416 | Apollo Schema Checks help ensure subgraph changes don't break the federated graph, reducing downtime and enabling teams to ship faster.
417 |
418 | #### The Graph Router will Update In Place
419 |
420 | With Managed Federation you can leave the graph router running and it will
421 | update automatically when subgraph changes are published and they successfully
422 | compose and pass all schema checks in Apollo Studio:
423 |
424 | ```sh
425 | make docker-up-managed
426 | ```
427 |
428 | ```
429 | Starting Apollo Gateway in managed mode ...
430 | Apollo usage reporting starting! See your graph at https://studio.apollographql.com/graph/supergraph-router@dev/
431 | 🚀 Server ready at http://localhost:4000/
432 | ```
433 |
434 | #### Simulating a Change to the Product Subgraph
435 |
436 | To simulate a change to the products subgraph, add a `Color` `enum` to `.subgraphs/products.graphql`:
437 |
438 | ```ts
439 | enum Color {
440 | BLUE
441 | GREEN
442 | }
443 | ```
444 |
445 | Then `publish` the changes to the registry:
446 |
447 | ```sh
448 | make publish
449 | ```
450 |
451 | Then remove the `Color` `enum` from `.subgraphs/products.graphql`:
452 |
453 | ```ts
454 | enum Color {
455 | BLUE
456 | GREEN
457 | }
458 | ```
459 |
460 | #### Run a Schema Check
461 |
462 | Run a schema `check` against the published version in the registry:
463 |
464 | ```sh
465 | make check-products
466 | ```
467 |
468 | This detects the schema changes and compares them against the known graph `operations` to determine that even though there are schema changes, there is no impact to actual operations so changes can be safely published:
469 |
470 | ```sh
471 | Checked the proposed subgraph against supergraph-demo@current
472 | Compared 3 schema changes against 2 operations
473 | ┌────────┬─────────────────────────┬──────────────────────────────────────────┐
474 | │ Change │ Code │ Description │
475 | ├────────┼─────────────────────────┼──────────────────────────────────────────┤
476 | │ PASS │ TYPE_REMOVED │ type `Color`: removed │
477 | ├────────┼─────────────────────────┼──────────────────────────────────────────┤
478 | │ PASS │ VALUE_REMOVED_FROM_ENUM │ enum type `Color`: value `BLUE` removed │
479 | ├────────┼─────────────────────────┼──────────────────────────────────────────┤
480 | │ PASS │ VALUE_REMOVED_FROM_ENUM │ enum type `Color`: value `GREEN` removed │
481 | └────────┴─────────────────────────┴──────────────────────────────────────────┘
482 | ```
483 |
484 | #### Publish Validated Subgraph Schemas to Apollo Registry
485 |
486 | Then `publish` the changes and `check` again:
487 |
488 | ```sh
489 | make publish
490 |
491 | make check-products
492 | ```
493 |
494 | which shows:
495 |
496 | ```
497 | Checked the proposed subgraph against supergraph-demo@current
498 | There were no changes detected in the composed schema.
499 | ```
500 |
501 | #### Recap
502 |
503 | Using `rover` in a local dev environment helps catch potentially breaking changes sooner. The next section covers how `rover` can be integrated into your CI/CD environments, and how Managed Federation catches breaking changes before they are delivered to the graph router.
504 |
505 | ## Standard CI/CD
506 |
507 | This example repo is a monorepo, but this same basic CI/CD workflow applies for single-repo-per-package scenarios.
508 |
509 | ### Subgraph CI
510 |
511 | * Create [graph variants](https://www.apollographql.com/docs/studio/org/graphs/) in Apollo Studio for `dev`, `staging`, and `prod`:
512 | * Configure [schema checks](https://www.apollographql.com/docs/studio/schema-checks/) for your graph:
513 | * [Federated composition checks](https://www.apollographql.com/docs/studio/schema-checks/#federated-composition-checks) will run against the subgraph schemas published to each variant.
514 | * [Operation checks](https://www.apollographql.com/docs/studio/schema-checks/#types-of-checks) should be configured to validate real world [schema usage](https://www.apollographql.com/docs/studio/check-configurations/#using-apollo-studio-recommended) with usage data from `staging` and `prod` variants.
515 | * Configure Gateway deployments to provide [usage reporting](https://www.apollographql.com/docs/apollo-server/api/plugin/usage-reporting/#gatsby-focus-wrapper) data for operation checks.
516 | * CI for each subgraph for source pull requests: [subgraph-check.yml](https://github.com/apollographql/supergraph-demo/blob/main/.github/workflows/subgraph-check.yml)
517 | * `rover subgraph check`
518 | * schema checks against schema in the `dev` variant
519 | * operation checks against usage data in the `prod` and typically `stage` variants.
520 |
521 | * If you’re in a monorepo:
522 | * Consider using 3-way merges and [overriding the APOLLO_VCS_COMMIT and/or APOLLO_VCS_BRANCH](https://www.apollographql.com/docs/rover/configuring/#overriding) to correlate schema changes for subgraph changes.
523 |
524 | With this approach, failed schema checks ([example](https://github.com/apollographql/supergraph-demo/pull/32)) are caught as close to the source of
525 | the change as possible, but only fully validated supergraph schemas are
526 | published for use.
527 |
528 | 
529 |
530 | Breaking changes are sometimes intentional, and to accommodate this, Apollo
531 | Studio has the option to mark certain changes as safe in the UI, that provides a
532 | check report URL in your CI, so you can easily navigate to Apollo Studio to:
533 | review the check, mark things safe and then re-run your pipeline.
534 |
535 | 
536 |
537 | ### Subgraph Deployment
538 |
539 | * Run a deployment workflow like this simple example [subgraph-deploy-publish.yml](https://github.com/apollographql/supergraph-demo/blob/main/.github/workflows/subgraph-deploy-publish.yml)
540 | * Before subgraph deployment
541 | * Do a reality check with `rover subgraph check`
542 | * Deploy subgraph service
543 | * Should have the service deployed before publishing the subgraph schema
544 | * After subgraph deployment
545 | * Publish the subgraph schema to the registry with `rover subgraph publish`
546 | * Managed Federation will run central schema checks and operation check and publish a new supergraph schema for the Gateways in the fleet for each environment
547 |
548 | Publishing subgraph schema changes with `rover subgraph publish` always stores a new subgraph schema version to the Apollo Registry, even if schema checks don’t pass.
549 |
550 | [Managed Federation](https://www.apollographql.com/docs/federation/managed-federation/overview/) ultimately catches all breaking changes prior before a new supergraph schema is published:
551 |
552 | * Runs schema checks after each `rover subgraph publish`
553 | * Composes a supergraph schema if all checks pass
554 | * Makes the supergraph schema available in the:
555 | * `Apollo Uplink` - that the Gateway can poll for live updates (default).
556 | * `Apollo Registry` - for retrieval via `rover supergraph fetch`.
557 | * `Apollo Supergraph Build Webhook` - for custom integrations
558 |
559 | Key benefits to Managed Federation:
560 |
561 | * CI for multiple concurrent `rover subgraph publish` from multiple service repos
562 | * Central point of control & governance
563 | * Globally consistent schema checks and composition
564 | * Catches breaking changes at supergraph build time before a new supergraph is published, before they're published for Gateways to use.
565 |
566 | ### Gateway CI
567 |
568 | The Gateway image and configuration can be managed using standard CI practices.
569 |
570 | For example, if using Docker images:
571 |
572 | * see [example monorepo release workflow](https://github.com/apollographql/supergraph-demo/blob/main/.github/workflows/release.yml)
573 | * bumps package versions in this `source repo`
574 | * build & push Gateway docker images to DockerHub
575 |
576 | ### Gateway Deployment
577 |
578 | The default configuration for the Gateway is to update in place by pulling new supergraph schema versions as they're published to the Apollo Uplink. Gateways in the fleet poll the Uplink every 10 seconds by default, so there will be a fast rolling upgrade as Gateways check the Uplink, without the need to restart the Gateway.
579 |
580 | Update in place is useful for any long-lived Gateway instance where an immediate update of the Gateway instance's supergraph schema is desired.
581 |
582 | Update-in-place with Managed Federation is useful for:
583 | * long-lived VMs
584 | * Kubernetes `Deployments`
585 | * Serverless functions that may be cached outside of operator control.
586 |
587 | [Configure the Gateways in each fleet](https://www.apollographql.com/docs/federation/managed-federation/setup/#3-modify-the-gateway-if-necessary) `dev`, `staging`, `prod` to:
588 |
589 | * pull supergraph schema from their respective graph variants, via the [Apollo Uplink](https://www.apollographql.com/docs/federation/quickstart-pt-2/#managed-federation-basics).
590 | * provide [usage reporting](https://www.apollographql.com/docs/apollo-server/api/plugin/usage-reporting/#gatsby-focus-wrapper) data for operation checks.
591 |
592 | ### Custom Gateway Deployments
593 |
594 | You can do custom CD with the following hooks and the `rover` CLI:
595 |
596 | * [supergraph build webhook](https://www.apollographql.com/blog/announcement/webhooks/) - pushes from Managed Federation.
597 | * `rover supergraph fetch` - pulls from the `Apollo Registry`.
598 |
599 | See [Kubernetes-native GraphOps](https://github.com/apollographql/supergraph-demo-k8s-graph-ops) to learn more about using custom CD with Kubernetes and GitOps.
600 |
601 | ## Deployment Examples
602 |
603 | ### Kubernetes with Supergraph ConfigMap
604 |
605 | You'll need the latest versions of:
606 |
607 | * [kubectl](https://kubernetes.io/docs/tasks/tools/) - with expanded `kustomize` support for `resources`
608 | * [kind](https://kind.sigs.k8s.io/docs/user/quick-start/#installation)
609 |
610 | then run:
611 |
612 | ```sh
613 | make demo-k8s
614 | ```
615 |
616 | which generates a graph router `Deployment` and supergraph `ConfigMap` using:
617 |
618 | ```
619 | kubectl kustomize k8s/router/base
620 | ```
621 |
622 | and then creates:
623 |
624 | * local k8s cluster with the NGINX Ingress Controller
625 | * graph-router `Deployment` configured to use a supergraph `ConfigMap`
626 | * graph-router `Service` and `Ingress`
627 |
628 | ### Gateway Deployment with Supergraph ConfigMap
629 |
630 | using [k8s/router/base/router.yaml](k8s/router/base/router.yaml) via:
631 |
632 | ```sh
633 | kubectl apply -k k8s/router/base
634 | ```
635 |
636 | ```yaml
637 | apiVersion: apps/v1
638 | kind: Deployment
639 | metadata:
640 | labels:
641 | app: router
642 | name: router-deployment
643 | spec:
644 | replicas: 1
645 | selector:
646 | matchLabels:
647 | app: router
648 | template:
649 | metadata:
650 | labels:
651 | app: router
652 | spec:
653 | containers:
654 | - env:
655 | - name: APOLLO_SCHEMA_CONFIG_EMBEDDED
656 | value: "true"
657 | image: prasek/supergraph-router:latest
658 | name: router
659 | ports:
660 | - containerPort: 4000
661 | volumeMounts:
662 | - mountPath: /etc/config
663 | name: supergraph-volume
664 | volumes:
665 | - configMap:
666 | name: supergraph-c22698b7b9
667 | name: supergraph-volume
668 | ---
669 | apiVersion: v1
670 | kind: ConfigMap
671 | metadata:
672 | name: supergraph-c22698b7b9
673 | data:
674 | supergraph.graphql: |
675 | schema
676 | @core(feature: "https://specs.apollo.dev/core/v0.1"),
677 | @core(feature: "https://specs.apollo.dev/join/v0.1")
678 | {
679 | query: Query
680 | }
681 |
682 | ...
683 |
684 | enum join__Graph {
685 | INVENTORY @join__graph(name: "inventory" url: "http://inventory:4000/graphql")
686 | PRODUCTS @join__graph(name: "products" url: "http://products:4000/graphql")
687 | USERS @join__graph(name: "users" url: "https://users:4000/graphql")
688 | }
689 |
690 | type Product
691 | @join__owner(graph: PRODUCTS)
692 | @join__type(graph: PRODUCTS, key: "id")
693 | @join__type(graph: PRODUCTS, key: "sku package")
694 | @join__type(graph: PRODUCTS, key: "sku variation{id}")
695 | @join__type(graph: INVENTORY, key: "id")
696 | {
697 | id: ID! @join__field(graph: PRODUCTS)
698 | sku: String @join__field(graph: PRODUCTS)
699 | package: String @join__field(graph: PRODUCTS)
700 | variation: ProductVariation @join__field(graph: PRODUCTS)
701 | dimensions: ProductDimension @join__field(graph: PRODUCTS)
702 | createdBy: User @join__field(graph: PRODUCTS, provides: "totalProductsCreated")
703 | delivery(zip: String): DeliveryEstimates @join__field(graph: INVENTORY, requires: "dimensions{size weight}")
704 | }
705 |
706 | type ProductDimension {
707 | size: String
708 | weight: Float
709 | }
710 |
711 | type ProductVariation {
712 | id: ID!
713 | }
714 |
715 | type Query {
716 | allProducts: [Product] @join__field(graph: PRODUCTS)
717 | product(id: ID!): Product @join__field(graph: PRODUCTS)
718 | }
719 |
720 | type User
721 | @join__owner(graph: USERS)
722 | @join__type(graph: USERS, key: "email")
723 | @join__type(graph: PRODUCTS, key: "email")
724 | {
725 | email: ID! @join__field(graph: USERS)
726 | name: String @join__field(graph: USERS)
727 | totalProductsCreated: Int @join__field(graph: USERS)
728 | }
729 | ---
730 | apiVersion: v1
731 | kind: Service
732 | metadata:
733 | name: router-service
734 | spec:
735 | ports:
736 | - port: 4000
737 | protocol: TCP
738 | targetPort: 4000
739 | selector:
740 | app: router
741 | ---
742 | apiVersion: networking.k8s.io/v1
743 | kind: Ingress
744 | metadata:
745 | annotations:
746 | kubernetes.io/ingress.class: nginx
747 | name: router-ingress
748 | spec:
749 | rules:
750 | - http:
751 | paths:
752 | - backend:
753 | service:
754 | name: router-service
755 | port:
756 | number: 4000
757 | path: /
758 | pathType: Prefix
759 | ```
760 |
761 | and 3 subgraph services [k8s/subgraphs/base/subgraphs.yaml](k8s/subgraphs/base/subgraphs.yaml) via:
762 |
763 | ```sh
764 | kubectl kustomize k8s/subgraphs/base
765 | ```
766 |
767 | ### Make a GraphQL Query
768 |
769 | `make demo-k8s` then runs the following in a loop until the query succeeds or 2 min timeout:
770 |
771 | ```sh
772 | kubectl get all
773 | make k8s-query
774 | ```
775 |
776 | which shows the following:
777 |
778 | ```
779 | NAME READY STATUS RESTARTS AGE
780 | pod/inventory-65494cbf8f-bhtft 1/1 Running 0 59s
781 | pod/products-6d75ff449c-9sdnd 1/1 Running 0 59s
782 | pod/router-deployment-84cbc9f689-8fcnf 1/1 Running 0 20s
783 | pod/users-d85ccf5d9-cgn4k 1/1 Running 0 59s
784 |
785 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
786 | service/inventory ClusterIP 10.96.108.120 4000/TCP 59s
787 | service/kubernetes ClusterIP 10.96.0.1 443/TCP 96s
788 | service/products ClusterIP 10.96.65.206 4000/TCP 59s
789 | service/router-service ClusterIP 10.96.178.206 4000/TCP 20s
790 | service/users ClusterIP 10.96.98.53 4000/TCP 59s
791 |
792 | NAME READY UP-TO-DATE AVAILABLE AGE
793 | deployment.apps/inventory 1/1 1 1 59s
794 | deployment.apps/products 1/1 1 1 59s
795 | deployment.apps/router-deployment 1/1 1 1 20s
796 | deployment.apps/users 1/1 1 1 59s
797 |
798 | NAME DESIRED CURRENT READY AGE
799 | replicaset.apps/inventory-65494cbf8f 1 1 1 59s
800 | replicaset.apps/products-6d75ff449c 1 1 1 59s
801 | replicaset.apps/router-deployment-84cbc9f689 1 1 1 20s
802 | replicaset.apps/users-d85ccf5d9 1 1 1 59s
803 | Smoke test
804 | -------------------------------------------------------------------------------------------
805 | ++ curl -X POST -H 'Content-Type: application/json' --data '{ "query": "{ allProducts { id, sku, createdBy { email, totalProductsCreated } } }" }' http://localhost:80/
806 | % Total % Received % Xferd Average Speed Time Time Time Current
807 | Dload Upload Total Spent Left Speed
808 | 100 352 100 267 100 85 3000 955 --:--:-- --:--:-- --:--:-- 3911
809 | {"data":{"allProducts":[{"id":"apollo-federation","sku":"federation","createdBy":{"email":"support@apollographql.com","totalProductsCreated":1337}},{"id":"apollo-studio","sku":"studio","createdBy":{"email":"support@apollographql.com","totalProductsCreated":1337}}]}}
810 | Success!
811 | -------------------------------------------------------------------------------------------
812 | ```
813 |
814 | ### Cleanup
815 |
816 | `make demo-k8s` then cleans up:
817 |
818 | ```
819 | deployment.apps "graph-router" deleted
820 | service "graphql-service" deleted
821 | ingress.networking.k8s.io "graphql-ingress" deleted
822 | Deleting cluster "kind" ...
823 | ```
824 |
825 | ## Serverless
826 |
827 | See [serverless.yml](serverless/serverless.yml)
828 |
829 | ```
830 | make demo-serverless
831 | ```
832 |
833 | which does the following:
834 |
835 | ```
836 | rover supergraph compose --config serverless/supergraph.yaml > serverless/supergraph.graphql
837 | docker-compose -f docker-compose.serverless.yml up -d
838 | Creating network "supergraph-demo_default" with the default driver
839 | Creating serverless ... done
840 | docker-compose -f docker-compose.serverless.yml logs
841 | Attaching to serverless
842 | serverless | Serverless: Running "serverless" installed locally (in service node_modules)
843 | serverless | offline: Starting Offline: dev/us-east-1.
844 | serverless | offline: Offline [http for lambda] listening on http://0.0.0.0:3002
845 | serverless | offline: Function names exposed for local invocation by aws-sdk:
846 | serverless | * router: supergraph-serverless-dev-router
847 | serverless | * inventory: supergraph-serverless-dev-inventory
848 | serverless | * products: supergraph-serverless-dev-products
849 | serverless | * users: supergraph-serverless-dev-users
850 | serverless |
851 | serverless | ┌───────────────────────────────────────────────────────────────────────────┐
852 | serverless | │ │
853 | serverless | │ ANY | http://0.0.0.0:4000/ │
854 | serverless | │ POST | http://0.0.0.0:4000/2015-03-31/functions/router/invocations │
855 | serverless | │ ANY | http://0.0.0.0:4000/inventory │
856 | serverless | │ POST | http://0.0.0.0:4000/2015-03-31/functions/inventory/invocations │
857 | serverless | │ ANY | http://0.0.0.0:4000/products │
858 | serverless | │ POST | http://0.0.0.0:4000/2015-03-31/functions/products/invocations │
859 | serverless | │ ANY | http://0.0.0.0:4000/users │
860 | serverless | │ POST | http://0.0.0.0:4000/2015-03-31/functions/users/invocations │
861 | serverless | │ │
862 | serverless | └───────────────────────────────────────────────────────────────────────────┘
863 | serverless |
864 | serverless | offline: [HTTP] server ready: http://0.0.0.0:4000 🚀
865 | serverless | offline:
866 | serverless | offline: Enter "rp" to replay the last request
867 | -------------------------------------------------------------------------------------------
868 | ++ curl -X POST -H 'Content-Type: application/json' --data '{ "query": "{allProducts{id,sku,createdBy{email,totalProductsCreated}}}" }' http://localhost:4000/
869 | % Total % Received % Xferd Average Speed Time Time Time Current
870 | Dload Upload Total Spent Left Speed
871 | 100 341 100 267 100 74 331 91 --:--:-- --:--:-- --:--:-- 423
872 |
873 | Result:
874 | {"data":{"allProducts":[{"id":"apollo-federation","sku":"federation","createdBy":{"email":"support@apollographql.com","totalProductsCreated":1337}},{"id":"apollo-studio","sku":"studio","createdBy":{"email":"support@apollographql.com","totalProductsCreated":1337}}]}}
875 | -------------------------------------------------------------------------------------------
876 | docker-compose -f docker-compose.serverless.yml down
877 | Stopping serverless ... done
878 | Removing serverless ... done
879 | Removing network supergraph-demo_default
880 | ```
881 |
882 | ## Apollo Router
883 |
884 | [The Apollo Router](https://www.apollographql.com/blog/announcement/backend/apollo-router-our-graphql-federation-runtime-in-rust) is our next-generation GraphQL Federation runtime written in Rust, and it is fast.
885 |
886 | As a Graph Router, the Apollo Router plays the same role as the Apollo Gateway. The same subgraph schemas and composed supergraph schema can be used in both the Router and the Gateway.
887 |
888 | This demo shows using the Apollo Router with a Federation 1 supergraph schema, composed using the Fed 1 `rover supergraph compose` command. To see the Router working with Federation 2 composition, checkout the Apollo Router section of [apollographql/supergraph-demo-fed2](https://github.com/apollographql/supergraph-demo-fed2/blob/main/README.md#apollo-router).
889 |
890 | [Early benchmarks](https://www.apollographql.com/blog/announcement/backend/apollo-router-our-graphql-federation-runtime-in-rust) show that the Router adds less than 10ms of latency to each operation, and it can process 8x the load of the JavaScript Apollo Gateway.
891 |
892 | To get started with the Router:
893 |
894 | ```
895 | make demo-local-router
896 | ```
897 |
898 | this uses a simple [docker-compose.router.yml](docker-compose.router.yml) file:
899 | ```yaml
900 | version: '3'
901 | services:
902 | apollo-router:
903 | container_name: apollo-router
904 | build: ./router
905 | volumes:
906 | - ./supergraph.graphql:/etc/config/supergraph.graphql
907 | - ./router/configuration.yaml:/etc/config/configuration.yaml
908 | ports:
909 | - "4000:4000"
910 | products:
911 | container_name: products
912 | build: ./subgraphs/products
913 | inventory:
914 | container_name: inventory
915 | build: ./subgraphs/inventory
916 | users:
917 | container_name: users
918 | build: ./subgraphs/users
919 | ```
920 |
921 | which uses the following [Dockerfile](router/Dockerfile)
922 | ```
923 | from ubuntu
924 |
925 | WORKDIR /usr/src/app
926 | RUN apt-get update && apt-get install -y \
927 | libssl-dev \
928 | curl \
929 | jq
930 |
931 | COPY install.sh .
932 | COPY run.sh .
933 | RUN ./install.sh
934 |
935 | CMD [ "/usr/src/app/run.sh" ]
936 | ```
937 |
938 | see [./router](router) for more details.
939 |
940 | ## Learn More
941 |
942 | Apollo tools and services help you develop, maintain, operate, and scale your data graph.
943 |
944 | * [Shipping faster with managed federation and schema checks](https://www.apollographql.com/docs/studio/)
945 |
946 | * [Kubernetes-native GraphOps](https://github.com/apollographql/supergraph-demo-k8s-graph-ops)
947 |
--------------------------------------------------------------------------------
/docker-compose.managed.yml:
--------------------------------------------------------------------------------
1 | version: '3'
2 | services:
3 | apollo-gateway:
4 | container_name: apollo-gateway
5 | build: ./gateway
6 | env_file: # create with make graph-api-env
7 | - graph-api.env
8 | ports:
9 | - "4000:4000"
10 | products:
11 | container_name: products
12 | build: ./subgraphs/products
13 | inventory:
14 | container_name: inventory
15 | build: ./subgraphs/inventory
16 | users:
17 | container_name: users
18 | build: ./subgraphs/users
19 | pandas:
20 | container_name: pandas
21 | build: ./subgraphs/pandas
22 |
--------------------------------------------------------------------------------
/docker-compose.otel-collector.yml:
--------------------------------------------------------------------------------
1 | version: '3'
2 | services:
3 | apollo-gateway:
4 | container_name: apollo-gateway
5 | build: ./gateway
6 | environment:
7 | - APOLLO_SCHEMA_CONFIG_EMBEDDED=true
8 | - APOLLO_OTEL_EXPORTER_TYPE=collector
9 | - APOLLO_OTEL_EXPORTER_HOST=collector
10 | - APOLLO_OTEL_EXPORTER_PORT=4318
11 | volumes:
12 | - ./supergraph.graphql:/etc/config/supergraph.graphql
13 | ports:
14 | - "4000:4000"
15 | products:
16 | container_name: products
17 | build: ./subgraphs/products
18 | environment:
19 | - APOLLO_OTEL_EXPORTER_TYPE=collector
20 | - APOLLO_OTEL_EXPORTER_HOST=collector
21 | - APOLLO_OTEL_EXPORTER_PORT=4318
22 | inventory:
23 | container_name: inventory
24 | build: ./subgraphs/inventory
25 | environment:
26 | - APOLLO_OTEL_EXPORTER_TYPE=collector
27 | - APOLLO_OTEL_EXPORTER_HOST=collector
28 | - APOLLO_OTEL_EXPORTER_PORT=4318
29 | users:
30 | container_name: users
31 | build: ./subgraphs/users
32 | environment:
33 | - APOLLO_OTEL_EXPORTER_TYPE=collector
34 | - APOLLO_OTEL_EXPORTER_HOST=collector
35 | - APOLLO_OTEL_EXPORTER_PORT=4318
36 | pandas:
37 | container_name: pandas
38 | build: ./subgraphs/pandas
39 | collector:
40 | container_name: collector
41 | image: otel/opentelemetry-collector:0.119.0
42 | command: ["--config=/conf/collector-config.yml"]
43 | volumes:
44 | - ./opentelemetry/collector-config.yml:/conf/collector-config.yml
45 | ports:
46 | - "9464:9464"
47 | - "4317:4317"
48 | - "4318:4318"
49 | - "55679:55679"
50 | depends_on:
51 | - zipkin
52 | zipkin:
53 | container_name: zipkin
54 | image: openzipkin/zipkin:3.4.4
55 | ports:
56 | - "9411:9411"
57 | prometheus:
58 | container_name: prometheus
59 | image: prom/prometheus:v2.55.1
60 | volumes:
61 | - ./opentelemetry/prometheus.yml:/etc/prometheus/prometheus.yml
62 | ports:
63 | - "9090:9090"
64 |
--------------------------------------------------------------------------------
/docker-compose.otel-zipkin.yml:
--------------------------------------------------------------------------------
1 | version: '3'
2 | services:
3 | apollo-gateway:
4 | container_name: apollo-gateway
5 | build: ./gateway
6 | environment:
7 | - APOLLO_SCHEMA_CONFIG_EMBEDDED=true
8 | - APOLLO_OTEL_EXPORTER_TYPE=zipkin
9 | - APOLLO_OTEL_EXPORTER_HOST=zipkin
10 | volumes:
11 | - ./supergraph.graphql:/etc/config/supergraph.graphql
12 | ports:
13 | - "4000:4000"
14 | depends_on:
15 | - zipkin
16 | products:
17 | container_name: products
18 | build: ./subgraphs/products
19 | environment:
20 | - APOLLO_OTEL_EXPORTER_TYPE=zipkin
21 | - APOLLO_OTEL_EXPORTER_HOST=zipkin
22 | inventory:
23 | container_name: inventory
24 | build: ./subgraphs/inventory
25 | environment:
26 | - APOLLO_OTEL_EXPORTER_TYPE=zipkin
27 | - APOLLO_OTEL_EXPORTER_HOST=zipkin
28 | users:
29 | container_name: users
30 | build: ./subgraphs/users
31 | environment:
32 | - APOLLO_OTEL_EXPORTER_TYPE=zipkin
33 | - APOLLO_OTEL_EXPORTER_HOST=zipkin
34 | pandas:
35 | container_name: pandas
36 | build: ./subgraphs/pandas
37 | zipkin:
38 | container_name: zipkin
39 | image: openzipkin/zipkin:3.4.4
40 | ports:
41 | - "9411:9411"
42 |
--------------------------------------------------------------------------------
/docker-compose.router-otel.yml:
--------------------------------------------------------------------------------
1 | version: '3'
2 | services:
3 | apollo-router:
4 | container_name: apollo-router
5 | image: ghcr.io/apollographql/router:v1.59.2
6 | volumes:
7 | - ./supergraph.graphql:/dist/schema/supergraph.graphql
8 | - ./router.yaml:/dist/config/router.yaml
9 | command: [ "--dev", "-c", "config/router.yaml", "-s", "schema/supergraph.graphql", "--log", "info" ]
10 | ports:
11 | - "4000:4000"
12 | products:
13 | container_name: products
14 | build: ./subgraphs/products
15 | environment:
16 | - APOLLO_OTEL_EXPORTER_TYPE=collector
17 | - APOLLO_OTEL_EXPORTER_HOST=collector
18 | - APOLLO_OTEL_EXPORTER_PORT=4318
19 | volumes:
20 | - "/etc/localtime:/etc/localtime:ro"
21 | inventory:
22 | container_name: inventory
23 | build: ./subgraphs/inventory
24 | environment:
25 | - APOLLO_OTEL_EXPORTER_TYPE=collector
26 | - APOLLO_OTEL_EXPORTER_HOST=collector
27 | - APOLLO_OTEL_EXPORTER_PORT=4318
28 | volumes:
29 | - "/etc/localtime:/etc/localtime:ro"
30 | users:
31 | container_name: users
32 | build: ./subgraphs/users
33 | environment:
34 | - APOLLO_OTEL_EXPORTER_TYPE=collector
35 | - APOLLO_OTEL_EXPORTER_HOST=collector
36 | - APOLLO_OTEL_EXPORTER_PORT=4318
37 | volumes:
38 | - "/etc/localtime:/etc/localtime:ro"
39 | pandas:
40 | container_name: pandas
41 | build: ./subgraphs/pandas
42 | volumes:
43 | - "/etc/localtime:/etc/localtime:ro"
44 | collector:
45 | container_name: collector
46 | image: otel/opentelemetry-collector:0.119.0
47 | command: ["--config=/conf/collector-config.yml"]
48 | volumes:
49 | - ./opentelemetry/collector-config.yml:/conf/collector-config.yml
50 | ports:
51 | - "9464:9464"
52 | - "4317:4317"
53 | - "4318:4318"
54 | - "55679:55679"
55 | depends_on:
56 | - zipkin
57 | zipkin:
58 | container_name: zipkin
59 | image: openzipkin/zipkin:3.4.4
60 | ports:
61 | - "9411:9411"
62 | prometheus:
63 | container_name: prometheus
64 | image: prom/prometheus:v2.55.1
65 | volumes:
66 | - ./opentelemetry/prometheus.yml:/etc/prometheus/prometheus.yml
67 | ports:
68 | - "9090:9090"
69 |
--------------------------------------------------------------------------------
/docker-compose.router.yml:
--------------------------------------------------------------------------------
1 | version: '3'
2 | services:
3 | apollo-router:
4 | container_name: apollo-router
5 | image: ghcr.io/apollographql/router:v1.59.2
6 | volumes:
7 | - ./supergraph.graphql:/dist/schema/supergraph.graphql
8 | - ./router.yaml:/dist/config/router.yaml
9 | command: [ "--dev", "-c", "config/router.yaml", "-s", "schema/supergraph.graphql", "--log", "info" ]
10 | ports:
11 | - "4000:4000"
12 | products:
13 | container_name: products
14 | build: ./subgraphs/products
15 | inventory:
16 | container_name: inventory
17 | build: ./subgraphs/inventory
18 | users:
19 | container_name: users
20 | build: ./subgraphs/users
21 | pandas:
22 | container_name: pandas
23 | build: ./subgraphs/pandas
24 |
--------------------------------------------------------------------------------
/docker-compose.serverless.yml:
--------------------------------------------------------------------------------
1 | version: '3'
2 | services:
3 | serverless:
4 | container_name: serverless
5 | build: ./serverless
6 | environment:
7 | - APOLLO_SCHEMA_CONFIG_EMBEDDED=true
8 | volumes:
9 | - ./serverless/supergraph.graphql:/etc/config/supergraph.graphql
10 | ports:
11 | - "4000:4000"
12 |
--------------------------------------------------------------------------------
/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '3'
2 | services:
3 | apollo-gateway:
4 | container_name: apollo-gateway
5 | build: ./gateway
6 | environment:
7 | - APOLLO_SCHEMA_CONFIG_EMBEDDED=true
8 | volumes:
9 | - ./supergraph.graphql:/etc/config/supergraph.graphql
10 | ports:
11 | - "4000:4000"
12 | products:
13 | container_name: products
14 | build: ./subgraphs/products
15 | inventory:
16 | container_name: inventory
17 | build: ./subgraphs/inventory
18 | users:
19 | container_name: users
20 | build: ./subgraphs/users
21 | pandas:
22 | container_name: pandas
23 | build: ./subgraphs/pandas
24 |
--------------------------------------------------------------------------------
/docs/media/apollo-sandbox.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apollographql/supergraph-demo/c385e0b79d988921b88ca3280141a06076c83a56/docs/media/apollo-sandbox.png
--------------------------------------------------------------------------------
/docs/media/ci/publish-artifacts-workflow.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apollographql/supergraph-demo/c385e0b79d988921b88ca3280141a06076c83a56/docs/media/ci/publish-artifacts-workflow.png
--------------------------------------------------------------------------------
/docs/media/ci/repository-dispatch-triggered.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apollographql/supergraph-demo/c385e0b79d988921b88ca3280141a06076c83a56/docs/media/ci/repository-dispatch-triggered.png
--------------------------------------------------------------------------------
/docs/media/ci/schema-check-breaking-change.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apollographql/supergraph-demo/c385e0b79d988921b88ca3280141a06076c83a56/docs/media/ci/schema-check-breaking-change.png
--------------------------------------------------------------------------------
/docs/media/ci/supergraph-pr-automerged.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apollographql/supergraph-demo/c385e0b79d988921b88ca3280141a06076c83a56/docs/media/ci/supergraph-pr-automerged.png
--------------------------------------------------------------------------------
/docs/media/ci/webhook-proxy.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apollographql/supergraph-demo/c385e0b79d988921b88ca3280141a06076c83a56/docs/media/ci/webhook-proxy.png
--------------------------------------------------------------------------------
/docs/media/ci/webhook-register.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apollographql/supergraph-demo/c385e0b79d988921b88ca3280141a06076c83a56/docs/media/ci/webhook-register.png
--------------------------------------------------------------------------------
/docs/media/honeycomb.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apollographql/supergraph-demo/c385e0b79d988921b88ca3280141a06076c83a56/docs/media/honeycomb.png
--------------------------------------------------------------------------------
/docs/media/opentelemetry.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apollographql/supergraph-demo/c385e0b79d988921b88ca3280141a06076c83a56/docs/media/opentelemetry.png
--------------------------------------------------------------------------------
/docs/media/schema-check-mark-safe.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apollographql/supergraph-demo/c385e0b79d988921b88ca3280141a06076c83a56/docs/media/schema-check-mark-safe.png
--------------------------------------------------------------------------------
/docs/media/studio.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apollographql/supergraph-demo/c385e0b79d988921b88ca3280141a06076c83a56/docs/media/studio.png
--------------------------------------------------------------------------------
/docs/media/supergraph.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apollographql/supergraph-demo/c385e0b79d988921b88ca3280141a06076c83a56/docs/media/supergraph.png
--------------------------------------------------------------------------------
/gateway/Dockerfile:
--------------------------------------------------------------------------------
1 | from node:16-alpine
2 |
3 | WORKDIR /usr/src/app
4 |
5 | COPY package.json .
6 |
7 | RUN npm install
8 |
9 | COPY gateway.js .
10 |
11 | CMD [ "node", "gateway.js" ]
12 |
--------------------------------------------------------------------------------
/gateway/gateway.js:
--------------------------------------------------------------------------------
1 | // Open Telemetry (optional)
2 | const { ApolloOpenTelemetry } = require('supergraph-demo-opentelemetry');
3 |
4 | if (process.env.APOLLO_OTEL_EXPORTER_TYPE) {
5 | new ApolloOpenTelemetry({
6 | type: 'router',
7 | name: 'router',
8 | exporter: {
9 | type: process.env.APOLLO_OTEL_EXPORTER_TYPE, // console, zipkin, collector
10 | host: process.env.APOLLO_OTEL_EXPORTER_HOST,
11 | port: process.env.APOLLO_OTEL_EXPORTER_PORT,
12 | }
13 | }).setupInstrumentation();
14 | }
15 |
16 | // Main
17 | const { ApolloServer } = require('apollo-server');
18 | const { ApolloGateway } = require('@apollo/gateway');
19 | const { readFileSync } = require('fs');
20 |
21 | const port = process.env.APOLLO_PORT || 4000;
22 | const embeddedSchema = process.env.APOLLO_SCHEMA_CONFIG_EMBEDDED == "true" ? true : false;
23 |
24 | const config = {};
25 |
26 | if (embeddedSchema){
27 | const supergraph = "/etc/config/supergraph.graphql"
28 | config['supergraphSdl'] = readFileSync(supergraph).toString();
29 | console.log('Starting Apollo Gateway in local mode ...');
30 | console.log(`Using local: ${supergraph}`)
31 | } else {
32 | console.log('Starting Apollo Gateway in managed mode ...');
33 | }
34 |
35 | const gateway = new ApolloGateway(config);
36 |
37 | const server = new ApolloServer({
38 | gateway,
39 | debug: true,
40 | // Subscriptions are unsupported but planned for a future Gateway version.
41 | subscriptions: false
42 | });
43 |
44 | server.listen( {port: port} ).then(({ url }) => {
45 | console.log(`🚀 Graph Router ready at ${url}`);
46 | }).catch(err => {console.error(err)});
47 |
--------------------------------------------------------------------------------
/gateway/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "supergraph-router",
3 | "version": "1.1.44",
4 | "description": "",
5 | "main": "router.js",
6 | "scripts": {
7 | "start": "node router.js"
8 | },
9 | "dependencies": {
10 | "@apollo/gateway": "0.54.1",
11 | "apollo-server": "3.13.0",
12 | "supergraph-demo-opentelemetry": "0.2.4",
13 | "graphql": "16.10.0"
14 | },
15 | "keywords": [],
16 | "author": "",
17 | "license": "MIT"
18 | }
19 |
--------------------------------------------------------------------------------
/k8s/clusters/kind-cluster.yaml:
--------------------------------------------------------------------------------
1 | kind: Cluster
2 | apiVersion: kind.x-k8s.io/v1alpha4
3 | nodes:
4 | - role: control-plane
5 | kubeadmConfigPatches:
6 | - |
7 | kind: InitConfiguration
8 | nodeRegistration:
9 | kubeletExtraArgs:
10 | node-labels: "ingress-ready=true"
11 | extraPortMappings:
12 | - containerPort: 80
13 | hostPort: 80
14 | protocol: TCP
15 | - containerPort: 443
16 | hostPort: 443
17 | protocol: TCP
18 |
--------------------------------------------------------------------------------
/k8s/infra/base/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 | resources:
4 | - nginx-ingress.yaml
5 |
--------------------------------------------------------------------------------
/k8s/infra/base/nginx-ingress.yaml:
--------------------------------------------------------------------------------
1 |
2 | apiVersion: v1
3 | kind: Namespace
4 | metadata:
5 | name: ingress-nginx
6 | labels:
7 | app.kubernetes.io/name: ingress-nginx
8 | app.kubernetes.io/instance: ingress-nginx
9 |
10 | ---
11 | # Source: ingress-nginx/templates/controller-serviceaccount.yaml
12 | apiVersion: v1
13 | kind: ServiceAccount
14 | metadata:
15 | labels:
16 | helm.sh/chart: ingress-nginx-3.33.0
17 | app.kubernetes.io/name: ingress-nginx
18 | app.kubernetes.io/instance: ingress-nginx
19 | app.kubernetes.io/version: 0.47.0
20 | app.kubernetes.io/managed-by: Helm
21 | app.kubernetes.io/component: controller
22 | name: ingress-nginx
23 | namespace: ingress-nginx
24 | automountServiceAccountToken: true
25 | ---
26 | # Source: ingress-nginx/templates/controller-configmap.yaml
27 | apiVersion: v1
28 | kind: ConfigMap
29 | metadata:
30 | labels:
31 | helm.sh/chart: ingress-nginx-3.33.0
32 | app.kubernetes.io/name: ingress-nginx
33 | app.kubernetes.io/instance: ingress-nginx
34 | app.kubernetes.io/version: 0.47.0
35 | app.kubernetes.io/managed-by: Helm
36 | app.kubernetes.io/component: controller
37 | name: ingress-nginx-controller
38 | namespace: ingress-nginx
39 | data:
40 | ---
41 | # Source: ingress-nginx/templates/clusterrole.yaml
42 | apiVersion: rbac.authorization.k8s.io/v1
43 | kind: ClusterRole
44 | metadata:
45 | labels:
46 | helm.sh/chart: ingress-nginx-3.33.0
47 | app.kubernetes.io/name: ingress-nginx
48 | app.kubernetes.io/instance: ingress-nginx
49 | app.kubernetes.io/version: 0.47.0
50 | app.kubernetes.io/managed-by: Helm
51 | name: ingress-nginx
52 | rules:
53 | - apiGroups:
54 | - ''
55 | resources:
56 | - configmaps
57 | - endpoints
58 | - nodes
59 | - pods
60 | - secrets
61 | verbs:
62 | - list
63 | - watch
64 | - apiGroups:
65 | - ''
66 | resources:
67 | - nodes
68 | verbs:
69 | - get
70 | - apiGroups:
71 | - ''
72 | resources:
73 | - services
74 | verbs:
75 | - get
76 | - list
77 | - watch
78 | - apiGroups:
79 | - extensions
80 | - networking.k8s.io # k8s 1.14+
81 | resources:
82 | - ingresses
83 | verbs:
84 | - get
85 | - list
86 | - watch
87 | - apiGroups:
88 | - ''
89 | resources:
90 | - events
91 | verbs:
92 | - create
93 | - patch
94 | - apiGroups:
95 | - extensions
96 | - networking.k8s.io # k8s 1.14+
97 | resources:
98 | - ingresses/status
99 | verbs:
100 | - update
101 | - apiGroups:
102 | - networking.k8s.io # k8s 1.14+
103 | resources:
104 | - ingressclasses
105 | verbs:
106 | - get
107 | - list
108 | - watch
109 | ---
110 | # Source: ingress-nginx/templates/clusterrolebinding.yaml
111 | apiVersion: rbac.authorization.k8s.io/v1
112 | kind: ClusterRoleBinding
113 | metadata:
114 | labels:
115 | helm.sh/chart: ingress-nginx-3.33.0
116 | app.kubernetes.io/name: ingress-nginx
117 | app.kubernetes.io/instance: ingress-nginx
118 | app.kubernetes.io/version: 0.47.0
119 | app.kubernetes.io/managed-by: Helm
120 | name: ingress-nginx
121 | roleRef:
122 | apiGroup: rbac.authorization.k8s.io
123 | kind: ClusterRole
124 | name: ingress-nginx
125 | subjects:
126 | - kind: ServiceAccount
127 | name: ingress-nginx
128 | namespace: ingress-nginx
129 | ---
130 | # Source: ingress-nginx/templates/controller-role.yaml
131 | apiVersion: rbac.authorization.k8s.io/v1
132 | kind: Role
133 | metadata:
134 | labels:
135 | helm.sh/chart: ingress-nginx-3.33.0
136 | app.kubernetes.io/name: ingress-nginx
137 | app.kubernetes.io/instance: ingress-nginx
138 | app.kubernetes.io/version: 0.47.0
139 | app.kubernetes.io/managed-by: Helm
140 | app.kubernetes.io/component: controller
141 | name: ingress-nginx
142 | namespace: ingress-nginx
143 | rules:
144 | - apiGroups:
145 | - ''
146 | resources:
147 | - namespaces
148 | verbs:
149 | - get
150 | - apiGroups:
151 | - ''
152 | resources:
153 | - configmaps
154 | - pods
155 | - secrets
156 | - endpoints
157 | verbs:
158 | - get
159 | - list
160 | - watch
161 | - apiGroups:
162 | - ''
163 | resources:
164 | - services
165 | verbs:
166 | - get
167 | - list
168 | - watch
169 | - apiGroups:
170 | - extensions
171 | - networking.k8s.io # k8s 1.14+
172 | resources:
173 | - ingresses
174 | verbs:
175 | - get
176 | - list
177 | - watch
178 | - apiGroups:
179 | - extensions
180 | - networking.k8s.io # k8s 1.14+
181 | resources:
182 | - ingresses/status
183 | verbs:
184 | - update
185 | - apiGroups:
186 | - networking.k8s.io # k8s 1.14+
187 | resources:
188 | - ingressclasses
189 | verbs:
190 | - get
191 | - list
192 | - watch
193 | - apiGroups:
194 | - ''
195 | resources:
196 | - configmaps
197 | resourceNames:
198 | - ingress-controller-leader-nginx
199 | verbs:
200 | - get
201 | - update
202 | - apiGroups:
203 | - ''
204 | resources:
205 | - configmaps
206 | verbs:
207 | - create
208 | - apiGroups:
209 | - ''
210 | resources:
211 | - events
212 | verbs:
213 | - create
214 | - patch
215 | ---
216 | # Source: ingress-nginx/templates/controller-rolebinding.yaml
217 | apiVersion: rbac.authorization.k8s.io/v1
218 | kind: RoleBinding
219 | metadata:
220 | labels:
221 | helm.sh/chart: ingress-nginx-3.33.0
222 | app.kubernetes.io/name: ingress-nginx
223 | app.kubernetes.io/instance: ingress-nginx
224 | app.kubernetes.io/version: 0.47.0
225 | app.kubernetes.io/managed-by: Helm
226 | app.kubernetes.io/component: controller
227 | name: ingress-nginx
228 | namespace: ingress-nginx
229 | roleRef:
230 | apiGroup: rbac.authorization.k8s.io
231 | kind: Role
232 | name: ingress-nginx
233 | subjects:
234 | - kind: ServiceAccount
235 | name: ingress-nginx
236 | namespace: ingress-nginx
237 | ---
238 | # Source: ingress-nginx/templates/controller-service-webhook.yaml
239 | apiVersion: v1
240 | kind: Service
241 | metadata:
242 | labels:
243 | helm.sh/chart: ingress-nginx-3.33.0
244 | app.kubernetes.io/name: ingress-nginx
245 | app.kubernetes.io/instance: ingress-nginx
246 | app.kubernetes.io/version: 0.47.0
247 | app.kubernetes.io/managed-by: Helm
248 | app.kubernetes.io/component: controller
249 | name: ingress-nginx-controller-admission
250 | namespace: ingress-nginx
251 | spec:
252 | type: ClusterIP
253 | ports:
254 | - name: https-webhook
255 | port: 443
256 | targetPort: webhook
257 | selector:
258 | app.kubernetes.io/name: ingress-nginx
259 | app.kubernetes.io/instance: ingress-nginx
260 | app.kubernetes.io/component: controller
261 | ---
262 | # Source: ingress-nginx/templates/controller-service.yaml
263 | apiVersion: v1
264 | kind: Service
265 | metadata:
266 | annotations:
267 | labels:
268 | helm.sh/chart: ingress-nginx-3.33.0
269 | app.kubernetes.io/name: ingress-nginx
270 | app.kubernetes.io/instance: ingress-nginx
271 | app.kubernetes.io/version: 0.47.0
272 | app.kubernetes.io/managed-by: Helm
273 | app.kubernetes.io/component: controller
274 | name: ingress-nginx-controller
275 | namespace: ingress-nginx
276 | spec:
277 | type: NodePort
278 | ports:
279 | - name: http
280 | port: 80
281 | protocol: TCP
282 | targetPort: http
283 | - name: https
284 | port: 443
285 | protocol: TCP
286 | targetPort: https
287 | selector:
288 | app.kubernetes.io/name: ingress-nginx
289 | app.kubernetes.io/instance: ingress-nginx
290 | app.kubernetes.io/component: controller
291 | ---
292 | # Source: ingress-nginx/templates/controller-deployment.yaml
293 | apiVersion: apps/v1
294 | kind: Deployment
295 | metadata:
296 | labels:
297 | helm.sh/chart: ingress-nginx-3.33.0
298 | app.kubernetes.io/name: ingress-nginx
299 | app.kubernetes.io/instance: ingress-nginx
300 | app.kubernetes.io/version: 0.47.0
301 | app.kubernetes.io/managed-by: Helm
302 | app.kubernetes.io/component: controller
303 | name: ingress-nginx-controller
304 | namespace: ingress-nginx
305 | spec:
306 | selector:
307 | matchLabels:
308 | app.kubernetes.io/name: ingress-nginx
309 | app.kubernetes.io/instance: ingress-nginx
310 | app.kubernetes.io/component: controller
311 | revisionHistoryLimit: 10
312 | strategy:
313 | rollingUpdate:
314 | maxUnavailable: 1
315 | type: RollingUpdate
316 | minReadySeconds: 0
317 | template:
318 | metadata:
319 | labels:
320 | app.kubernetes.io/name: ingress-nginx
321 | app.kubernetes.io/instance: ingress-nginx
322 | app.kubernetes.io/component: controller
323 | spec:
324 | dnsPolicy: ClusterFirst
325 | containers:
326 | - name: controller
327 | image: k8s.gcr.io/ingress-nginx/controller:v0.46.0@sha256:52f0058bed0a17ab0fb35628ba97e8d52b5d32299fbc03cc0f6c7b9ff036b61a
328 | imagePullPolicy: IfNotPresent
329 | lifecycle:
330 | preStop:
331 | exec:
332 | command:
333 | - /wait-shutdown
334 | args:
335 | - /nginx-ingress-controller
336 | - --election-id=ingress-controller-leader
337 | - --ingress-class=nginx
338 | - --configmap=$(POD_NAMESPACE)/ingress-nginx-controller
339 | - --validating-webhook=:8443
340 | - --validating-webhook-certificate=/usr/local/certificates/cert
341 | - --validating-webhook-key=/usr/local/certificates/key
342 | - --publish-status-address=localhost
343 | securityContext:
344 | capabilities:
345 | drop:
346 | - ALL
347 | add:
348 | - NET_BIND_SERVICE
349 | runAsUser: 101
350 | allowPrivilegeEscalation: true
351 | env:
352 | - name: POD_NAME
353 | valueFrom:
354 | fieldRef:
355 | fieldPath: metadata.name
356 | - name: POD_NAMESPACE
357 | valueFrom:
358 | fieldRef:
359 | fieldPath: metadata.namespace
360 | - name: LD_PRELOAD
361 | value: /usr/local/lib/libmimalloc.so
362 | livenessProbe:
363 | failureThreshold: 5
364 | httpGet:
365 | path: /healthz
366 | port: 10254
367 | scheme: HTTP
368 | initialDelaySeconds: 10
369 | periodSeconds: 10
370 | successThreshold: 1
371 | timeoutSeconds: 1
372 | readinessProbe:
373 | failureThreshold: 3
374 | httpGet:
375 | path: /healthz
376 | port: 10254
377 | scheme: HTTP
378 | initialDelaySeconds: 10
379 | periodSeconds: 10
380 | successThreshold: 1
381 | timeoutSeconds: 1
382 | ports:
383 | - name: http
384 | containerPort: 80
385 | protocol: TCP
386 | hostPort: 80
387 | - name: https
388 | containerPort: 443
389 | protocol: TCP
390 | hostPort: 443
391 | - name: webhook
392 | containerPort: 8443
393 | protocol: TCP
394 | volumeMounts:
395 | - name: webhook-cert
396 | mountPath: /usr/local/certificates/
397 | readOnly: true
398 | resources:
399 | requests:
400 | cpu: 100m
401 | memory: 90Mi
402 | nodeSelector:
403 | ingress-ready: 'true'
404 | kubernetes.io/os: linux
405 | tolerations:
406 | - effect: NoSchedule
407 | key: node-role.kubernetes.io/master
408 | operator: Equal
409 | serviceAccountName: ingress-nginx
410 | terminationGracePeriodSeconds: 0
411 | volumes:
412 | - name: webhook-cert
413 | secret:
414 | secretName: ingress-nginx-admission
415 | ---
416 | # Source: ingress-nginx/templates/admission-webhooks/validating-webhook.yaml
417 | # before changing this value, check the required kubernetes version
418 | # https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#prerequisites
419 | apiVersion: admissionregistration.k8s.io/v1
420 | kind: ValidatingWebhookConfiguration
421 | metadata:
422 | labels:
423 | helm.sh/chart: ingress-nginx-3.33.0
424 | app.kubernetes.io/name: ingress-nginx
425 | app.kubernetes.io/instance: ingress-nginx
426 | app.kubernetes.io/version: 0.47.0
427 | app.kubernetes.io/managed-by: Helm
428 | app.kubernetes.io/component: admission-webhook
429 | name: ingress-nginx-admission
430 | webhooks:
431 | - name: validate.nginx.ingress.kubernetes.io
432 | matchPolicy: Equivalent
433 | rules:
434 | - apiGroups:
435 | - networking.k8s.io
436 | apiVersions:
437 | - v1beta1
438 | operations:
439 | - CREATE
440 | - UPDATE
441 | resources:
442 | - ingresses
443 | failurePolicy: Fail
444 | sideEffects: None
445 | admissionReviewVersions:
446 | - v1
447 | - v1beta1
448 | clientConfig:
449 | service:
450 | namespace: ingress-nginx
451 | name: ingress-nginx-controller-admission
452 | path: /networking/v1beta1/ingresses
453 | ---
454 | # Source: ingress-nginx/templates/admission-webhooks/job-patch/serviceaccount.yaml
455 | apiVersion: v1
456 | kind: ServiceAccount
457 | metadata:
458 | name: ingress-nginx-admission
459 | annotations:
460 | helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade
461 | helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
462 | labels:
463 | helm.sh/chart: ingress-nginx-3.33.0
464 | app.kubernetes.io/name: ingress-nginx
465 | app.kubernetes.io/instance: ingress-nginx
466 | app.kubernetes.io/version: 0.47.0
467 | app.kubernetes.io/managed-by: Helm
468 | app.kubernetes.io/component: admission-webhook
469 | namespace: ingress-nginx
470 | ---
471 | # Source: ingress-nginx/templates/admission-webhooks/job-patch/clusterrole.yaml
472 | apiVersion: rbac.authorization.k8s.io/v1
473 | kind: ClusterRole
474 | metadata:
475 | name: ingress-nginx-admission
476 | annotations:
477 | helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade
478 | helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
479 | labels:
480 | helm.sh/chart: ingress-nginx-3.33.0
481 | app.kubernetes.io/name: ingress-nginx
482 | app.kubernetes.io/instance: ingress-nginx
483 | app.kubernetes.io/version: 0.47.0
484 | app.kubernetes.io/managed-by: Helm
485 | app.kubernetes.io/component: admission-webhook
486 | rules:
487 | - apiGroups:
488 | - admissionregistration.k8s.io
489 | resources:
490 | - validatingwebhookconfigurations
491 | verbs:
492 | - get
493 | - update
494 | ---
495 | # Source: ingress-nginx/templates/admission-webhooks/job-patch/clusterrolebinding.yaml
496 | apiVersion: rbac.authorization.k8s.io/v1
497 | kind: ClusterRoleBinding
498 | metadata:
499 | name: ingress-nginx-admission
500 | annotations:
501 | helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade
502 | helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
503 | labels:
504 | helm.sh/chart: ingress-nginx-3.33.0
505 | app.kubernetes.io/name: ingress-nginx
506 | app.kubernetes.io/instance: ingress-nginx
507 | app.kubernetes.io/version: 0.47.0
508 | app.kubernetes.io/managed-by: Helm
509 | app.kubernetes.io/component: admission-webhook
510 | roleRef:
511 | apiGroup: rbac.authorization.k8s.io
512 | kind: ClusterRole
513 | name: ingress-nginx-admission
514 | subjects:
515 | - kind: ServiceAccount
516 | name: ingress-nginx-admission
517 | namespace: ingress-nginx
518 | ---
519 | # Source: ingress-nginx/templates/admission-webhooks/job-patch/role.yaml
520 | apiVersion: rbac.authorization.k8s.io/v1
521 | kind: Role
522 | metadata:
523 | name: ingress-nginx-admission
524 | annotations:
525 | helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade
526 | helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
527 | labels:
528 | helm.sh/chart: ingress-nginx-3.33.0
529 | app.kubernetes.io/name: ingress-nginx
530 | app.kubernetes.io/instance: ingress-nginx
531 | app.kubernetes.io/version: 0.47.0
532 | app.kubernetes.io/managed-by: Helm
533 | app.kubernetes.io/component: admission-webhook
534 | namespace: ingress-nginx
535 | rules:
536 | - apiGroups:
537 | - ''
538 | resources:
539 | - secrets
540 | verbs:
541 | - get
542 | - create
543 | ---
544 | # Source: ingress-nginx/templates/admission-webhooks/job-patch/rolebinding.yaml
545 | apiVersion: rbac.authorization.k8s.io/v1
546 | kind: RoleBinding
547 | metadata:
548 | name: ingress-nginx-admission
549 | annotations:
550 | helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade
551 | helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
552 | labels:
553 | helm.sh/chart: ingress-nginx-3.33.0
554 | app.kubernetes.io/name: ingress-nginx
555 | app.kubernetes.io/instance: ingress-nginx
556 | app.kubernetes.io/version: 0.47.0
557 | app.kubernetes.io/managed-by: Helm
558 | app.kubernetes.io/component: admission-webhook
559 | namespace: ingress-nginx
560 | roleRef:
561 | apiGroup: rbac.authorization.k8s.io
562 | kind: Role
563 | name: ingress-nginx-admission
564 | subjects:
565 | - kind: ServiceAccount
566 | name: ingress-nginx-admission
567 | namespace: ingress-nginx
568 | ---
569 | # Source: ingress-nginx/templates/admission-webhooks/job-patch/job-createSecret.yaml
570 | apiVersion: batch/v1
571 | kind: Job
572 | metadata:
573 | name: ingress-nginx-admission-create
574 | annotations:
575 | helm.sh/hook: pre-install,pre-upgrade
576 | helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
577 | labels:
578 | helm.sh/chart: ingress-nginx-3.33.0
579 | app.kubernetes.io/name: ingress-nginx
580 | app.kubernetes.io/instance: ingress-nginx
581 | app.kubernetes.io/version: 0.47.0
582 | app.kubernetes.io/managed-by: Helm
583 | app.kubernetes.io/component: admission-webhook
584 | namespace: ingress-nginx
585 | spec:
586 | template:
587 | metadata:
588 | name: ingress-nginx-admission-create
589 | labels:
590 | helm.sh/chart: ingress-nginx-3.33.0
591 | app.kubernetes.io/name: ingress-nginx
592 | app.kubernetes.io/instance: ingress-nginx
593 | app.kubernetes.io/version: 0.47.0
594 | app.kubernetes.io/managed-by: Helm
595 | app.kubernetes.io/component: admission-webhook
596 | spec:
597 | containers:
598 | - name: create
599 | image: docker.io/jettech/kube-webhook-certgen:v1.5.1
600 | imagePullPolicy: IfNotPresent
601 | args:
602 | - create
603 | - --host=ingress-nginx-controller-admission,ingress-nginx-controller-admission.$(POD_NAMESPACE).svc
604 | - --namespace=$(POD_NAMESPACE)
605 | - --secret-name=ingress-nginx-admission
606 | env:
607 | - name: POD_NAMESPACE
608 | valueFrom:
609 | fieldRef:
610 | fieldPath: metadata.namespace
611 | restartPolicy: OnFailure
612 | serviceAccountName: ingress-nginx-admission
613 | securityContext:
614 | runAsNonRoot: true
615 | runAsUser: 2000
616 | ---
617 | # Source: ingress-nginx/templates/admission-webhooks/job-patch/job-patchWebhook.yaml
618 | apiVersion: batch/v1
619 | kind: Job
620 | metadata:
621 | name: ingress-nginx-admission-patch
622 | annotations:
623 | helm.sh/hook: post-install,post-upgrade
624 | helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
625 | labels:
626 | helm.sh/chart: ingress-nginx-3.33.0
627 | app.kubernetes.io/name: ingress-nginx
628 | app.kubernetes.io/instance: ingress-nginx
629 | app.kubernetes.io/version: 0.47.0
630 | app.kubernetes.io/managed-by: Helm
631 | app.kubernetes.io/component: admission-webhook
632 | namespace: ingress-nginx
633 | spec:
634 | template:
635 | metadata:
636 | name: ingress-nginx-admission-patch
637 | labels:
638 | helm.sh/chart: ingress-nginx-3.33.0
639 | app.kubernetes.io/name: ingress-nginx
640 | app.kubernetes.io/instance: ingress-nginx
641 | app.kubernetes.io/version: 0.47.0
642 | app.kubernetes.io/managed-by: Helm
643 | app.kubernetes.io/component: admission-webhook
644 | spec:
645 | containers:
646 | - name: patch
647 | image: docker.io/jettech/kube-webhook-certgen:v1.5.1
648 | imagePullPolicy: IfNotPresent
649 | args:
650 | - patch
651 | - --webhook-name=ingress-nginx-admission
652 | - --namespace=$(POD_NAMESPACE)
653 | - --patch-mutating=false
654 | - --secret-name=ingress-nginx-admission
655 | - --patch-failure-policy=Fail
656 | env:
657 | - name: POD_NAMESPACE
658 | valueFrom:
659 | fieldRef:
660 | fieldPath: metadata.namespace
661 | restartPolicy: OnFailure
662 | serviceAccountName: ingress-nginx-admission
663 | securityContext:
664 | runAsNonRoot: true
665 | runAsUser: 2000
666 |
--------------------------------------------------------------------------------
/k8s/infra/dev/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 | bases:
4 | - ../base
5 |
--------------------------------------------------------------------------------
/k8s/router/base/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 | resources:
4 | - router.yaml
5 | configMapGenerator:
6 | - name: supergraph
7 | files:
8 | - ./supergraph.graphql
9 |
--------------------------------------------------------------------------------
/k8s/router/base/router.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: router-deployment
5 | labels:
6 | app: router
7 | spec:
8 | replicas: 1
9 | selector:
10 | matchLabels:
11 | app: router
12 | template:
13 | metadata:
14 | labels:
15 | app: router
16 | spec:
17 | containers:
18 | - name: router
19 | image: prasek/supergraph-router:latest
20 | volumeMounts:
21 | - name: supergraph-volume
22 | mountPath: /etc/config
23 | env:
24 | - name: APOLLO_SCHEMA_CONFIG_EMBEDDED
25 | value: "true"
26 | ports:
27 | - containerPort: 4000
28 | volumes:
29 | - name: supergraph-volume
30 | configMap:
31 | name: supergraph
32 | ---
33 | apiVersion: v1
34 | kind: Service
35 | metadata:
36 | name: router-service
37 | spec:
38 | selector:
39 | app: router
40 | ports:
41 | - protocol: TCP
42 | port: 4000
43 | targetPort: 4000
44 | ---
45 | apiVersion: networking.k8s.io/v1
46 | kind: Ingress
47 | metadata:
48 | name: router-ingress
49 | annotations:
50 | kubernetes.io/ingress.class: nginx
51 | spec:
52 | rules:
53 | - http:
54 | paths:
55 | - path: /
56 | pathType: Prefix
57 | backend:
58 | service:
59 | name: router-service
60 | port:
61 | number: 4000
62 |
--------------------------------------------------------------------------------
/k8s/router/base/supergraph.graphql:
--------------------------------------------------------------------------------
1 | schema
2 | @core(feature: "https://specs.apollo.dev/core/v0.2"),
3 | @core(feature: "https://specs.apollo.dev/join/v0.1", for: EXECUTION),
4 | @core(feature: "https://specs.apollo.dev/tag/v0.1")
5 | {
6 | query: Query
7 | }
8 |
9 | directive @core(as: String, feature: String!, for: core__Purpose) repeatable on SCHEMA
10 |
11 | directive @join__field(graph: join__Graph, provides: join__FieldSet, requires: join__FieldSet) on FIELD_DEFINITION
12 |
13 | directive @join__graph(name: String!, url: String!) on ENUM_VALUE
14 |
15 | directive @join__owner(graph: join__Graph!) on INTERFACE | OBJECT
16 |
17 | directive @join__type(graph: join__Graph!, key: join__FieldSet) repeatable on INTERFACE | OBJECT
18 |
19 | directive @tag(name: String!) repeatable on FIELD_DEFINITION | INTERFACE | OBJECT | UNION
20 |
21 | type DeliveryEstimates {
22 | estimatedDelivery: String
23 | fastestDelivery: String
24 | }
25 |
26 | type Panda {
27 | favoriteFood: String
28 | name: ID!
29 | }
30 |
31 | type Product
32 | @join__owner(graph: PRODUCTS)
33 | @join__type(graph: PRODUCTS, key: "id")
34 | @join__type(graph: PRODUCTS, key: "sku package")
35 | @join__type(graph: PRODUCTS, key: "sku variation{id}")
36 | @join__type(graph: INVENTORY, key: "id")
37 | {
38 | createdBy: User @join__field(graph: PRODUCTS, provides: "totalProductsCreated")
39 | delivery(zip: String): DeliveryEstimates @join__field(graph: INVENTORY, requires: "dimensions{size weight}")
40 | dimensions: ProductDimension @join__field(graph: PRODUCTS)
41 | id: ID! @join__field(graph: PRODUCTS) @tag(name: "hi-from-inventory") @tag(name: "hi-from-products")
42 | package: String @join__field(graph: PRODUCTS)
43 | sku: String @join__field(graph: PRODUCTS) @tag(name: "hi-from-products")
44 | variation: ProductVariation @join__field(graph: PRODUCTS)
45 | }
46 |
47 | type ProductDimension {
48 | size: String
49 | weight: Float @tag(name: "hi-from-inventory-value-type-field")
50 | }
51 |
52 | type ProductVariation {
53 | id: ID!
54 | }
55 |
56 | type Query {
57 | allPandas: [Panda] @join__field(graph: PANDAS)
58 | allProducts: [Product] @join__field(graph: PRODUCTS)
59 | panda(name: ID!): Panda @join__field(graph: PANDAS)
60 | product(id: ID!): Product @join__field(graph: PRODUCTS)
61 | }
62 |
63 | type User
64 | @join__owner(graph: USERS)
65 | @join__type(graph: USERS, key: "email")
66 | @join__type(graph: PRODUCTS, key: "email")
67 | {
68 | email: ID! @join__field(graph: USERS)
69 | name: String @join__field(graph: USERS)
70 | totalProductsCreated: Int @join__field(graph: USERS)
71 | }
72 |
73 | enum core__Purpose {
74 | """
75 | `EXECUTION` features provide metadata necessary to for operation execution.
76 | """
77 | EXECUTION
78 |
79 | """
80 | `SECURITY` features provide metadata necessary to securely resolve fields.
81 | """
82 | SECURITY
83 | }
84 |
85 | scalar join__FieldSet
86 |
87 | enum join__Graph {
88 | INVENTORY @join__graph(name: "inventory" url: "http://inventory:4000/graphql")
89 | PANDAS @join__graph(name: "pandas" url: "http://pandas:4000/graphql")
90 | PRODUCTS @join__graph(name: "products" url: "http://products:4000/graphql")
91 | USERS @join__graph(name: "users" url: "http://users:4000/graphql")
92 | }
93 |
--------------------------------------------------------------------------------
/k8s/router/dev/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 | configMapGenerator:
4 | - behavior: replace
5 | files:
6 | - ./supergraph.graphql
7 | name: supergraph
8 | resources:
9 | - ../base
10 | images:
11 | - name: prasek/supergraph-router:latest
12 | newName: prasek/supergraph-router
13 | newTag: 1.1.45
14 |
--------------------------------------------------------------------------------
/k8s/router/dev/supergraph.graphql:
--------------------------------------------------------------------------------
1 | schema
2 | @core(feature: "https://specs.apollo.dev/core/v0.2"),
3 | @core(feature: "https://specs.apollo.dev/join/v0.1", for: EXECUTION),
4 | @core(feature: "https://specs.apollo.dev/tag/v0.1")
5 | {
6 | query: Query
7 | }
8 |
9 | directive @core(as: String, feature: String!, for: core__Purpose) repeatable on SCHEMA
10 |
11 | directive @join__field(graph: join__Graph, provides: join__FieldSet, requires: join__FieldSet) on FIELD_DEFINITION
12 |
13 | directive @join__graph(name: String!, url: String!) on ENUM_VALUE
14 |
15 | directive @join__owner(graph: join__Graph!) on INTERFACE | OBJECT
16 |
17 | directive @join__type(graph: join__Graph!, key: join__FieldSet) repeatable on INTERFACE | OBJECT
18 |
19 | directive @tag(name: String!) repeatable on FIELD_DEFINITION | INTERFACE | OBJECT | UNION
20 |
21 | type DeliveryEstimates {
22 | estimatedDelivery: String
23 | fastestDelivery: String
24 | }
25 |
26 | type Panda {
27 | favoriteFood: String
28 | name: ID!
29 | }
30 |
31 | type Product
32 | @join__owner(graph: PRODUCTS)
33 | @join__type(graph: PRODUCTS, key: "id")
34 | @join__type(graph: PRODUCTS, key: "sku package")
35 | @join__type(graph: PRODUCTS, key: "sku variation{id}")
36 | @join__type(graph: INVENTORY, key: "id")
37 | {
38 | createdBy: User @join__field(graph: PRODUCTS, provides: "totalProductsCreated")
39 | delivery(zip: String): DeliveryEstimates @join__field(graph: INVENTORY, requires: "dimensions{size weight}")
40 | dimensions: ProductDimension @join__field(graph: PRODUCTS)
41 | id: ID! @join__field(graph: PRODUCTS) @tag(name: "hi-from-inventory") @tag(name: "hi-from-products")
42 | package: String @join__field(graph: PRODUCTS)
43 | sku: String @join__field(graph: PRODUCTS) @tag(name: "hi-from-products")
44 | variation: ProductVariation @join__field(graph: PRODUCTS)
45 | }
46 |
47 | type ProductDimension {
48 | size: String
49 | weight: Float @tag(name: "hi-from-inventory-value-type-field")
50 | }
51 |
52 | type ProductVariation {
53 | id: ID!
54 | }
55 |
56 | type Query {
57 | allPandas: [Panda] @join__field(graph: PANDAS)
58 | allProducts: [Product] @join__field(graph: PRODUCTS)
59 | panda(name: ID!): Panda @join__field(graph: PANDAS)
60 | product(id: ID!): Product @join__field(graph: PRODUCTS)
61 | }
62 |
63 | type User
64 | @join__owner(graph: USERS)
65 | @join__type(graph: USERS, key: "email")
66 | @join__type(graph: PRODUCTS, key: "email")
67 | {
68 | email: ID! @join__field(graph: USERS)
69 | name: String @join__field(graph: USERS)
70 | totalProductsCreated: Int @join__field(graph: USERS)
71 | }
72 |
73 | enum core__Purpose {
74 | """
75 | `EXECUTION` features provide metadata necessary to for operation execution.
76 | """
77 | EXECUTION
78 |
79 | """
80 | `SECURITY` features provide metadata necessary to securely resolve fields.
81 | """
82 | SECURITY
83 | }
84 |
85 | scalar join__FieldSet
86 |
87 | enum join__Graph {
88 | INVENTORY @join__graph(name: "inventory" url: "http://inventory:4000/graphql")
89 | PANDAS @join__graph(name: "pandas" url: "http://pandas:4000/graphql")
90 | PRODUCTS @join__graph(name: "products" url: "http://products:4000/graphql")
91 | USERS @join__graph(name: "users" url: "http://users:4000/graphql")
92 | }
93 |
--------------------------------------------------------------------------------
/k8s/subgraphs/base/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 | resources:
4 | - subgraphs.yaml
5 |
--------------------------------------------------------------------------------
/k8s/subgraphs/base/subgraphs.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: products
5 | labels:
6 | app: subgraph-products
7 | spec:
8 | replicas: 1
9 | selector:
10 | matchLabels:
11 | app: subgraph-products
12 | template:
13 | metadata:
14 | labels:
15 | app: subgraph-products
16 | spec:
17 | containers:
18 | - name: products
19 | image: prasek/subgraph-products:latest
20 | ports:
21 | - containerPort: 4000
22 | ---
23 | apiVersion: v1
24 | kind: Service
25 | metadata:
26 | name: products
27 | spec:
28 | selector:
29 | app: subgraph-products
30 | ports:
31 | - protocol: TCP
32 | port: 4000
33 | targetPort: 4000
34 | ---
35 | apiVersion: apps/v1
36 | kind: Deployment
37 | metadata:
38 | name: users
39 | labels:
40 | app: subgraph-users
41 | spec:
42 | replicas: 1
43 | selector:
44 | matchLabels:
45 | app: subgraph-users
46 | template:
47 | metadata:
48 | labels:
49 | app: subgraph-users
50 | spec:
51 | containers:
52 | - name: users
53 | image: prasek/subgraph-users:latest
54 | ports:
55 | - containerPort: 4000
56 | ---
57 | apiVersion: v1
58 | kind: Service
59 | metadata:
60 | name: users
61 | spec:
62 | selector:
63 | app: subgraph-users
64 | ports:
65 | - protocol: TCP
66 | port: 4000
67 | targetPort: 4000
68 | ---
69 | apiVersion: apps/v1
70 | kind: Deployment
71 | metadata:
72 | name: inventory
73 | labels:
74 | app: subgraph-inventory
75 | spec:
76 | replicas: 1
77 | selector:
78 | matchLabels:
79 | app: subgraph-inventory
80 | template:
81 | metadata:
82 | labels:
83 | app: subgraph-inventory
84 | spec:
85 | containers:
86 | - name: inventory
87 | image: prasek/subgraph-inventory:latest
88 | ports:
89 | - containerPort: 4000
90 | ---
91 | apiVersion: v1
92 | kind: Service
93 | metadata:
94 | name: inventory
95 | spec:
96 | selector:
97 | app: subgraph-inventory
98 | ports:
99 | - protocol: TCP
100 | port: 4000
101 | targetPort: 4000
102 | ---
103 | apiVersion: apps/v1
104 | kind: Deployment
105 | metadata:
106 | name: pandas
107 | labels:
108 | app: subgraph-pandas
109 | spec:
110 | replicas: 1
111 | selector:
112 | matchLabels:
113 | app: subgraph-pandas
114 | template:
115 | metadata:
116 | labels:
117 | app: subgraph-pandas
118 | spec:
119 | containers:
120 | - name: pandas
121 | image: prasek/subgraph-pandas:latest
122 | ports:
123 | - containerPort: 4000
124 | ---
125 | apiVersion: v1
126 | kind: Service
127 | metadata:
128 | name: pandas
129 | spec:
130 | selector:
131 | app: subgraph-pandas
132 | ports:
133 | - protocol: TCP
134 | port: 4000
135 | targetPort: 4000
136 |
--------------------------------------------------------------------------------
/k8s/subgraphs/dev/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 | resources:
4 | - ../base
5 | images:
6 | - name: prasek/subgraph-inventory:latest
7 | newName: prasek/subgraph-inventory
8 | newTag: 1.1.49
9 | - name: prasek/subgraph-pandas:latest
10 | newName: prasek/subgraph-pandas
11 | newTag: 1.1.38
12 | - name: prasek/subgraph-products:latest
13 | newName: prasek/subgraph-products
14 | newTag: 1.1.45
15 | - name: prasek/subgraph-users:latest
16 | newName: prasek/subgraph-users
17 | newTag: 1.1.47
18 | - name: prasek/supergraph-serverless:latest
19 | newName: prasek/supergraph-serverless
20 | newTag: 1.0.29
21 |
--------------------------------------------------------------------------------
/opentelemetry/collector-config.yml:
--------------------------------------------------------------------------------
1 | receivers:
2 | otlp:
3 | protocols:
4 | grpc:
5 | http:
6 | cors:
7 | allowed_origins:
8 | - http://*
9 | - https://*
10 |
11 | exporters:
12 | zipkin:
13 | endpoint: "http://zipkin:9411/api/v2/spans"
14 | prometheus:
15 | endpoint: "0.0.0.0:9464"
16 |
17 | processors:
18 | batch:
19 |
20 | extensions:
21 | zpages:
22 | endpoint: 0.0.0.0:55679
23 |
24 | service:
25 | extensions: [zpages]
26 | pipelines:
27 | traces:
28 | receivers: [otlp]
29 | exporters: [zipkin]
30 | processors: [batch]
31 | metrics:
32 | receivers: [otlp]
33 | exporters: [prometheus]
34 | processors: [batch]
35 |
--------------------------------------------------------------------------------
/opentelemetry/prometheus.yml:
--------------------------------------------------------------------------------
1 | global:
2 | scrape_interval: 1s
3 |
4 | scrape_configs:
5 | - job_name: 'collector'
6 | static_configs:
7 | - targets: ['localhost:9464', 'collector:9464']
--------------------------------------------------------------------------------
/renovate.json:
--------------------------------------------------------------------------------
1 | {
2 | "extends": [
3 | ":dependencyDashboard",
4 | ":semanticPrefixFixDepsChoreOthers",
5 | ":ignoreModulesAndTests",
6 | ":autodetectPinVersions",
7 | ":prHourlyLimitNone",
8 | ":prConcurrentLimit20",
9 | ":separateMajorReleases",
10 | "schedule:nonOfficeHours",
11 | "group:monorepos",
12 | "group:recommended",
13 | "workarounds:all"
14 | ],
15 | "packageRules": [
16 | { "automerge": true, "matchUpdateTypes": ["minor", "patch", "pin", "digest"]}
17 | ]
18 | }
19 |
--------------------------------------------------------------------------------
/router.yaml:
--------------------------------------------------------------------------------
1 | # see https://www.apollographql.com/docs/router/configuration/overview#yaml-config-file
2 |
3 | supergraph:
4 | listen: 0.0.0.0:4000
5 | cors:
6 | allow_any_origin: true
7 | telemetry:
8 | apollo:
9 | # The percentage of requests will include HTTP request and response headers in traces sent to Apollo Studio.
10 | # This is expensive and should be left at a low value.
11 | # This cannot be higher than tracing->trace_config->sampler
12 | field_level_instrumentation_sampler: 1 # (default)
13 | tracing:
14 | trace_config:
15 | sampler: 1 # The percentage of requests that will generate traces (a rate or `always_on` or `always_off`)
16 | service_name: "router"
17 | service_namespace: "apollo"
18 | otlp:
19 | endpoint: http://${env.APOLLO_OTEL_EXPORTER_HOST:-localhost}:4317
20 | protocol: grpc
21 | timeout: 42s
22 |
23 | # --------------------
24 | # note: `router --dev` has these default settings and enables the --hot-reload flag
25 | # --------------------
26 | # supergraph:
27 | # introspection: true
28 | # sandbox:
29 | # enabled: true
30 | # homepage:
31 | # enabled: false
32 | # include_subgraph_errors:
33 | # all: true # Propagate errors from all subgraphs
34 | # plugins:
35 | # experimental.expose_query_plan: true
36 |
--------------------------------------------------------------------------------
/serverless/Dockerfile:
--------------------------------------------------------------------------------
1 | from node:16-alpine
2 |
3 | WORKDIR /usr/src/app
4 |
5 | RUN npm install -g serverless
6 |
7 | COPY package.json .
8 |
9 | RUN npm install
10 |
11 | COPY . ./
12 |
13 | CMD [ "serverless", "offline" ]
14 |
--------------------------------------------------------------------------------
/serverless/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "supergraph-serverless",
3 | "version": "1.0.28",
4 | "license": "MIT",
5 | "scripts": {
6 | "dev": "serverless offline"
7 | },
8 | "dependencies": {
9 | "@apollo/gateway": "0.54.1",
10 | "apollo-server-lambda": "3.13.0",
11 | "@apollo/subgraph": "0.6.1",
12 | "graphql": "16.10.0"
13 | },
14 | "devDependencies": {
15 | "serverless": "3.40.0",
16 | "serverless-offline": "8.8.1"
17 | }
18 | }
19 |
--------------------------------------------------------------------------------
/serverless/router.js:
--------------------------------------------------------------------------------
1 | const { ApolloServer } = require("apollo-server-lambda");
2 | const { ApolloGateway } = require("@apollo/gateway");
3 | const { readFileSync } = require("fs");
4 |
5 | const supergraphPath = "/etc/config/supergraph.graphql"
6 | const supergraphSdl = readFileSync(supergraphPath).toString();
7 |
8 | const gateway = new ApolloGateway({
9 | supergraphSdl,
10 | });
11 |
12 | const server = new ApolloServer({
13 | gateway,
14 | });
15 |
16 | exports.handler = server.createHandler();
17 |
--------------------------------------------------------------------------------
/serverless/serverless.yml:
--------------------------------------------------------------------------------
1 | service: supergraph-serverless
2 |
3 | frameworkVersion: '3'
4 |
5 | plugins:
6 | - serverless-offline
7 |
8 | provider:
9 | name: aws
10 | runtime: nodejs14.x
11 | lambdaHashingVersion: 20201221
12 |
13 | functions:
14 | router:
15 | handler: router.handler
16 | events:
17 | - http:
18 | path: /
19 | method: ANY
20 | cors: true
21 | inventory:
22 | handler: subgraphs/inventory.handler
23 | events:
24 | - http:
25 | path: inventory
26 | method: ANY
27 | cors: true
28 | products:
29 | handler: subgraphs/products.handler
30 | events:
31 | - http:
32 | path: products
33 | method: ANY
34 | cors: true
35 | users:
36 | handler: subgraphs/users.handler
37 | events:
38 | - http:
39 | path: users
40 | method: ANY
41 | cors: true
42 | pandas:
43 | handler: subgraphs/pandas.handler
44 | events:
45 | - http:
46 | path: pandas
47 | method: ANY
48 | cors: true
49 |
50 | custom:
51 | serverless-offline:
52 | noPrependStageInUrl: true
53 | useChildProcesses: true
54 | host: 0.0.0.0
55 | httpPort: 4000
56 |
--------------------------------------------------------------------------------
/serverless/subgraphs/inventory.graphql:
--------------------------------------------------------------------------------
1 | directive @tag(name: String!) repeatable on FIELD_DEFINITION
2 |
3 | extend type Product @key(fields: "id") {
4 | id: ID! @external @tag(name: "hi from inventory")
5 | dimensions: ProductDimension @external
6 | delivery(zip: String): DeliveryEstimates @requires(fields: "dimensions { size weight }")
7 | }
8 |
9 | type ProductDimension {
10 | size: String
11 | weight: Float @tag(name: "hi from inventory value type field")
12 | }
13 |
14 | type DeliveryEstimates {
15 | estimatedDelivery: String
16 | fastestDelivery: String
17 | }
18 |
--------------------------------------------------------------------------------
/serverless/subgraphs/inventory.js:
--------------------------------------------------------------------------------
1 | const { ApolloServer, gql } = require("apollo-server-lambda");
2 | const { buildSubgraphSchema } = require('@apollo/subgraph');
3 | const { readFileSync } = require('fs');
4 | const { resolve } = require('path');
5 |
6 | const delivery = [
7 | { id: 'apollo-federation', estimatedDelivery: '6/25/2021', fastestDelivery: '6/24/2021' },
8 | { id: 'apollo-studio', estimatedDelivery: '6/25/2021', fastestDelivery: '6/24/2021' },
9 | ]
10 |
11 | const typeDefs = gql(readFileSync(resolve(__dirname, './inventory.graphql'), { encoding: 'utf-8' }));
12 | const resolvers = {
13 | Product: {
14 | delivery: (product, args, context) => {
15 | return delivery.find(p => p.id == product.id);
16 | }
17 | }
18 | }
19 | const server = new ApolloServer({ schema: buildSubgraphSchema({ typeDefs, resolvers }) });
20 | exports.handler = server.createHandler();
21 |
--------------------------------------------------------------------------------
/serverless/subgraphs/pandas.graphql:
--------------------------------------------------------------------------------
1 | type Query {
2 | allPandas: [Panda]
3 | panda(name: ID!): Panda
4 | }
5 |
6 | type Panda {
7 | name:ID!
8 | favoriteFood: String
9 | }
10 |
--------------------------------------------------------------------------------
/serverless/subgraphs/pandas.js:
--------------------------------------------------------------------------------
1 | const { ApolloServer, gql } = require("apollo-server-lambda");
2 | const { readFileSync } = require('fs');
3 | const { resolve } = require('path');
4 |
5 | const pandas = [
6 | { name: 'Basi', favoriteFood: "bamboo leaves" },
7 | { name: 'Yun', favoriteFood: "apple" }
8 | ]
9 |
10 | const typeDefs = gql(readFileSync(resolve(__dirname, './pandas.graphql'), { encoding: 'utf-8' }));
11 | const resolvers = {
12 | Query: {
13 | allPandas: (_, args, context) => {
14 | return pandas;
15 | },
16 | panda: (_, args, context) => {
17 | return pandas.find(p => p.id == args.id);
18 | }
19 | },
20 | }
21 | const server = new ApolloServer({ typeDefs, resolvers });
22 | exports.handler = server.createHandler();
23 |
--------------------------------------------------------------------------------
/serverless/subgraphs/products.graphql:
--------------------------------------------------------------------------------
1 | directive @tag(name: String!) repeatable on FIELD_DEFINITION
2 |
3 | type Product @key(fields: "id") @key(fields: "sku package") @key(fields: "sku variation { id }"){
4 | id: ID! @tag(name: "hi from products")
5 | sku: String @tag(name: "hi from products")
6 | package: String
7 | variation: ProductVariation
8 | dimensions: ProductDimension
9 |
10 | createdBy: User @provides(fields: "totalProductsCreated")
11 | }
12 |
13 | type ProductVariation {
14 | id: ID!
15 | }
16 |
17 | type ProductDimension {
18 | size: String
19 | weight: Float
20 | }
21 |
22 | extend type Query {
23 | allProducts: [Product]
24 | product(id: ID!): Product
25 | }
26 |
27 | extend type User @key(fields: "email") {
28 | email: ID! @external
29 | totalProductsCreated: Int @external
30 | }
31 |
--------------------------------------------------------------------------------
/serverless/subgraphs/products.js:
--------------------------------------------------------------------------------
1 | const { ApolloServer, gql } = require("apollo-server-lambda");
2 | const { buildSubgraphSchema } = require('@apollo/subgraph');
3 | const { readFileSync } = require('fs');
4 | const { resolve } = require('path');
5 |
6 | const products = [
7 | { id: 'apollo-federation', sku: 'federation', package: '@apollo/federation', variation: "OSS" },
8 | { id: 'apollo-studio', sku: 'studio', package: '', variation: "platform" },
9 | ]
10 | const ql = readFileSync(resolve(__dirname, './products.graphql'), { encoding: 'utf-8' });
11 | const typeDefs = gql(ql) ;
12 | const resolvers = {
13 | Query: {
14 | allProducts: (_, args, context) => {
15 | return products;
16 | },
17 | product: (_, args, context) => {
18 | return products.find(p => p.id == args.id);
19 | }
20 | },
21 | Product: {
22 | variation: (reference) => {
23 | if (reference.variation) return { id: reference.variation };
24 | return { id: products.find(p => p.id == reference.id).variation }
25 | },
26 | dimensions: () => {
27 | return { size: "1", weight: 1 }
28 | },
29 | createdBy: (reference) => {
30 | return { email: 'support@apollographql.com', totalProductsCreated: 1337 }
31 | },
32 | __resolveReference: (reference) => {
33 | if (reference.id) return products.find(p => p.id == reference.id);
34 | else if (reference.sku && reference.package) return products.find(p => p.sku == reference.sku && p.package == reference.package);
35 | else return { id: 'rover', package: '@apollo/rover', ...reference };
36 | }
37 | }
38 | }
39 |
40 | const server = new ApolloServer({ schema: buildSubgraphSchema({ typeDefs, resolvers }) });
41 | exports.handler = server.createHandler();
42 |
--------------------------------------------------------------------------------
/serverless/subgraphs/users.graphql:
--------------------------------------------------------------------------------
1 | type User @key(fields:"email") {
2 | email:ID!
3 | name: String
4 | totalProductsCreated: Int
5 | }
--------------------------------------------------------------------------------
/serverless/subgraphs/users.js:
--------------------------------------------------------------------------------
1 | const { ApolloServer, gql } = require("apollo-server-lambda");
2 | const { buildSubgraphSchema } = require('@apollo/subgraph');
3 | const { readFileSync } = require('fs');
4 | const { resolve } = require('path');
5 |
6 | const users = [
7 | { email: 'support@apollographql.com', name: "Apollo Studio Support", totalProductsCreated: 4 }
8 | ]
9 |
10 | const typeDefs = gql(readFileSync(resolve(__dirname, './users.graphql'), { encoding: 'utf-8' }));
11 | const resolvers = {
12 | User: {
13 | __resolveReference: (reference) => {
14 | return users.find(u => u.email == reference.email);
15 | }
16 | }
17 | }
18 |
19 | const server = new ApolloServer({ schema: buildSubgraphSchema({ typeDefs, resolvers }) });
20 | exports.handler = server.createHandler();
21 |
--------------------------------------------------------------------------------
/serverless/supergraph.graphql:
--------------------------------------------------------------------------------
1 | schema
2 | @core(feature: "https://specs.apollo.dev/core/v0.2"),
3 | @core(feature: "https://specs.apollo.dev/join/v0.1", for: EXECUTION),
4 | @core(feature: "https://specs.apollo.dev/tag/v0.1")
5 | {
6 | query: Query
7 | }
8 |
9 | directive @core(as: String, feature: String!, for: core__Purpose) repeatable on SCHEMA
10 |
11 | directive @join__field(graph: join__Graph, provides: join__FieldSet, requires: join__FieldSet) on FIELD_DEFINITION
12 |
13 | directive @join__graph(name: String!, url: String!) on ENUM_VALUE
14 |
15 | directive @join__owner(graph: join__Graph!) on INTERFACE | OBJECT
16 |
17 | directive @join__type(graph: join__Graph!, key: join__FieldSet) repeatable on INTERFACE | OBJECT
18 |
19 | directive @tag(name: String!) repeatable on FIELD_DEFINITION | INTERFACE | OBJECT | UNION
20 |
21 | type DeliveryEstimates {
22 | estimatedDelivery: String
23 | fastestDelivery: String
24 | }
25 |
26 | type Panda {
27 | favoriteFood: String
28 | name: ID!
29 | }
30 |
31 | type Product
32 | @join__owner(graph: PRODUCTS)
33 | @join__type(graph: PRODUCTS, key: "id")
34 | @join__type(graph: PRODUCTS, key: "sku package")
35 | @join__type(graph: PRODUCTS, key: "sku variation{id}")
36 | @join__type(graph: INVENTORY, key: "id")
37 | {
38 | createdBy: User @join__field(graph: PRODUCTS, provides: "totalProductsCreated")
39 | delivery(zip: String): DeliveryEstimates @join__field(graph: INVENTORY, requires: "dimensions{size weight}")
40 | dimensions: ProductDimension @join__field(graph: PRODUCTS)
41 | id: ID! @join__field(graph: PRODUCTS) @tag(name: "hi from inventory") @tag(name: "hi from products")
42 | package: String @join__field(graph: PRODUCTS)
43 | sku: String @join__field(graph: PRODUCTS) @tag(name: "hi from products")
44 | variation: ProductVariation @join__field(graph: PRODUCTS)
45 | }
46 |
47 | type ProductDimension {
48 | size: String
49 | weight: Float @tag(name: "hi from inventory value type field")
50 | }
51 |
52 | type ProductVariation {
53 | id: ID!
54 | }
55 |
56 | type Query {
57 | allPandas: [Panda] @join__field(graph: PANDAS)
58 | allProducts: [Product] @join__field(graph: PRODUCTS)
59 | panda(name: ID!): Panda @join__field(graph: PANDAS)
60 | product(id: ID!): Product @join__field(graph: PRODUCTS)
61 | }
62 |
63 | type User
64 | @join__owner(graph: USERS)
65 | @join__type(graph: USERS, key: "email")
66 | @join__type(graph: PRODUCTS, key: "email")
67 | {
68 | email: ID! @join__field(graph: USERS)
69 | name: String @join__field(graph: USERS)
70 | totalProductsCreated: Int @join__field(graph: USERS)
71 | }
72 |
73 | enum core__Purpose {
74 | """
75 | `EXECUTION` features provide metadata necessary to for operation execution.
76 | """
77 | EXECUTION
78 |
79 | """
80 | `SECURITY` features provide metadata necessary to securely resolve fields.
81 | """
82 | SECURITY
83 | }
84 |
85 | scalar join__FieldSet
86 |
87 | enum join__Graph {
88 | INVENTORY @join__graph(name: "inventory" url: "http://localhost:4000/inventory")
89 | PANDAS @join__graph(name: "pandas" url: "http://localhost:4000/pandas")
90 | PRODUCTS @join__graph(name: "products" url: "http://localhost:4000/products")
91 | USERS @join__graph(name: "users" url: "http://localhost:4000/users")
92 | }
93 |
--------------------------------------------------------------------------------
/serverless/supergraph.yaml:
--------------------------------------------------------------------------------
1 | subgraphs:
2 | inventory:
3 | routing_url: http://localhost:4000/inventory
4 | schema:
5 | file: subgraphs/inventory.graphql
6 | products:
7 | routing_url: http://localhost:4000/products
8 | schema:
9 | file: subgraphs/products.graphql
10 | users:
11 | routing_url: http://localhost:4000/users
12 | schema:
13 | file: subgraphs/users.graphql
14 | pandas:
15 | routing_url: http://localhost:4000/pandas
16 | schema:
17 | file: subgraphs/pandas.graphql
18 |
--------------------------------------------------------------------------------
/subgraphs/inventory/Dockerfile:
--------------------------------------------------------------------------------
1 | from node:16-alpine
2 |
3 | WORKDIR /usr/src/app
4 |
5 | COPY package.json .
6 |
7 | RUN npm install
8 |
9 | COPY inventory.js .
10 | COPY inventory.graphql .
11 |
12 | CMD [ "node", "inventory.js" ]
13 |
--------------------------------------------------------------------------------
/subgraphs/inventory/inventory.graphql:
--------------------------------------------------------------------------------
1 | directive @tag(name: String!) repeatable on FIELD_DEFINITION
2 |
3 | extend type Product @key(fields: "id") {
4 | id: ID! @external @tag(name: "hi-from-inventory")
5 | dimensions: ProductDimension @external
6 | delivery(zip: String): DeliveryEstimates @requires(fields: "dimensions { size weight }")
7 | }
8 |
9 | type ProductDimension {
10 | size: String
11 | weight: Float @tag(name: "hi-from-inventory-value-type-field")
12 | }
13 |
14 | type DeliveryEstimates {
15 | estimatedDelivery: String
16 | fastestDelivery: String
17 | }
18 |
--------------------------------------------------------------------------------
/subgraphs/inventory/inventory.js:
--------------------------------------------------------------------------------
1 | // Open Telemetry (optional)
2 | const { ApolloOpenTelemetry } = require('supergraph-demo-opentelemetry');
3 |
4 | if (process.env.APOLLO_OTEL_EXPORTER_TYPE) {
5 | new ApolloOpenTelemetry({
6 | type: 'subgraph',
7 | name: 'inventory',
8 | exporter: {
9 | type: process.env.APOLLO_OTEL_EXPORTER_TYPE, // console, zipkin, collector
10 | host: process.env.APOLLO_OTEL_EXPORTER_HOST,
11 | port: process.env.APOLLO_OTEL_EXPORTER_PORT,
12 | }
13 | }).setupInstrumentation();
14 | }
15 |
16 | const { ApolloServer, gql } = require('apollo-server');
17 | const { buildSubgraphSchema } = require('@apollo/subgraph');
18 | const { readFileSync } = require('fs');
19 |
20 | const port = process.env.APOLLO_PORT || 4000;
21 |
22 | const delivery = [
23 | { id: 'apollo-federation', estimatedDelivery: '6/25/2021', fastestDelivery: '6/24/2021' },
24 | { id: 'apollo-studio', estimatedDelivery: '6/25/2021', fastestDelivery: '6/24/2021' },
25 | ]
26 |
27 | const typeDefs = gql(readFileSync('./inventory.graphql', { encoding: 'utf-8' }));
28 | const resolvers = {
29 | Product: {
30 | delivery: (product, args, context) => {
31 | return delivery.find(p => p.id == product.id);
32 | }
33 | }
34 | }
35 | const server = new ApolloServer({ schema: buildSubgraphSchema({ typeDefs, resolvers }) });
36 | server.listen( {port: port} ).then(({ url }) => {
37 | console.log(`🚀 Inventory subgraph ready at ${url}`);
38 | }).catch(err => {console.error(err)});
39 |
--------------------------------------------------------------------------------
/subgraphs/inventory/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "subgraph-inventory",
3 | "version": "1.1.48",
4 | "description": "",
5 | "main": "inventory.js",
6 | "scripts": {
7 | "start": "node inventory.js"
8 | },
9 | "dependencies": {
10 | "@apollo/subgraph": "0.6.1",
11 | "apollo-server": "3.13.0",
12 | "supergraph-demo-opentelemetry": "0.2.4",
13 | "graphql": "16.10.0"
14 | },
15 | "keywords": [],
16 | "author": "",
17 | "license": "MIT"
18 | }
19 |
--------------------------------------------------------------------------------
/subgraphs/pandas/Dockerfile:
--------------------------------------------------------------------------------
1 | from node:16-alpine
2 |
3 | WORKDIR /usr/src/app
4 |
5 | COPY package.json .
6 |
7 | RUN npm install
8 |
9 | COPY pandas.js .
10 | COPY pandas.graphql .
11 |
12 | CMD [ "node", "pandas.js" ]
13 |
--------------------------------------------------------------------------------
/subgraphs/pandas/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "subgraph-pandas",
3 | "version": "1.1.37",
4 | "description": "",
5 | "main": "pandas.js",
6 | "scripts": {
7 | "start": "node pandas.js"
8 | },
9 | "dependencies": {
10 | "apollo-server": "3.13.0",
11 | "graphql": "16.10.0"
12 | },
13 | "keywords": [],
14 | "author": "",
15 | "license": "MIT"
16 | }
17 |
--------------------------------------------------------------------------------
/subgraphs/pandas/pandas.graphql:
--------------------------------------------------------------------------------
1 | type Query {
2 | allPandas: [Panda]
3 | panda(name: ID!): Panda
4 | }
5 |
6 | type Panda {
7 | name:ID!
8 | favoriteFood: String
9 | }
10 |
--------------------------------------------------------------------------------
/subgraphs/pandas/pandas.js:
--------------------------------------------------------------------------------
1 | const { ApolloServer, gql } = require('apollo-server');
2 | const { readFileSync } = require('fs');
3 |
4 | const port = process.env.APOLLO_PORT || 4000;
5 |
6 | const pandas = [
7 | { name: 'Basi', favoriteFood: "bamboo leaves" },
8 | { name: 'Yun', favoriteFood: "apple" }
9 | ]
10 |
11 | const typeDefs = gql(readFileSync('./pandas.graphql', { encoding: 'utf-8' }));
12 | const resolvers = {
13 | Query: {
14 | allPandas: (_, args, context) => {
15 | return pandas;
16 | },
17 | panda: (_, args, context) => {
18 | return pandas.find(p => p.id == args.id);
19 | }
20 | },
21 | }
22 | const server = new ApolloServer({ typeDefs, resolvers });
23 | server.listen( {port: port} ).then(({ url }) => {
24 | console.log(`🚀 Pandas subgraph ready at ${url}`);
25 | }).catch(err => {console.error(err)});
26 |
--------------------------------------------------------------------------------
/subgraphs/products/Dockerfile:
--------------------------------------------------------------------------------
1 | from node:16-alpine
2 |
3 | WORKDIR /usr/src/app
4 |
5 | COPY package.json .
6 |
7 | RUN npm install
8 |
9 | COPY products.js .
10 | COPY products.graphql .
11 |
12 | CMD [ "node", "products.js" ]
13 |
--------------------------------------------------------------------------------
/subgraphs/products/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "subgraph-products",
3 | "version": "1.1.44",
4 | "description": "",
5 | "main": "products.js",
6 | "scripts": {
7 | "start": "node products.js"
8 | },
9 | "dependencies": {
10 | "@apollo/subgraph": "0.6.1",
11 | "apollo-server": "3.13.0",
12 | "supergraph-demo-opentelemetry": "0.2.4",
13 | "graphql": "16.10.0"
14 | },
15 | "keywords": [],
16 | "author": "",
17 | "license": "MIT"
18 | }
19 |
--------------------------------------------------------------------------------
/subgraphs/products/products.graphql:
--------------------------------------------------------------------------------
1 | directive @tag(name: String!) repeatable on FIELD_DEFINITION
2 |
3 | type Product @key(fields: "id") @key(fields: "sku package") @key(fields: "sku variation { id }"){
4 | id: ID! @tag(name: "hi-from-products")
5 | sku: String @tag(name: "hi-from-products")
6 | package: String
7 | variation: ProductVariation
8 | dimensions: ProductDimension
9 |
10 | createdBy: User @provides(fields: "totalProductsCreated")
11 | }
12 |
13 | type ProductVariation {
14 | id: ID!
15 | }
16 |
17 | type ProductDimension {
18 | size: String
19 | weight: Float
20 | }
21 |
22 | extend type Query {
23 | allProducts: [Product]
24 | product(id: ID!): Product
25 | }
26 |
27 | extend type User @key(fields: "email") {
28 | email: ID! @external
29 | totalProductsCreated: Int @external
30 | }
31 |
--------------------------------------------------------------------------------
/subgraphs/products/products.js:
--------------------------------------------------------------------------------
1 | // Open Telemetry (optional)
2 | const { ApolloOpenTelemetry } = require('supergraph-demo-opentelemetry');
3 |
4 | if (process.env.APOLLO_OTEL_EXPORTER_TYPE) {
5 | new ApolloOpenTelemetry({
6 | type: 'subgraph',
7 | name: 'products',
8 | exporter: {
9 | type: process.env.APOLLO_OTEL_EXPORTER_TYPE, // console, zipkin, collector
10 | host: process.env.APOLLO_OTEL_EXPORTER_HOST,
11 | port: process.env.APOLLO_OTEL_EXPORTER_PORT,
12 | }
13 | }).setupInstrumentation();
14 | }
15 |
16 | const { ApolloServer, gql } = require('apollo-server');
17 | const { buildSubgraphSchema } = require('@apollo/subgraph');
18 | const { readFileSync } = require('fs');
19 |
20 | const port = process.env.APOLLO_PORT || 4000;
21 |
22 | const products = [
23 | { id: 'apollo-federation', sku: 'federation', package: '@apollo/federation', variation: "OSS" },
24 | { id: 'apollo-studio', sku: 'studio', package: '', variation: "platform" },
25 | ]
26 | const typeDefs = gql(readFileSync('./products.graphql', { encoding: 'utf-8' }));
27 | const resolvers = {
28 | Query: {
29 | allProducts: (_, args, context) => {
30 | return products;
31 | },
32 | product: (_, args, context) => {
33 | return products.find(p => p.id == args.id);
34 | }
35 | },
36 | Product: {
37 | variation: (reference) => {
38 | if (reference.variation) return { id: reference.variation };
39 | return { id: products.find(p => p.id == reference.id).variation }
40 | },
41 | dimensions: () => {
42 | return { size: "1", weight: 1 }
43 | },
44 | createdBy: (reference) => {
45 | return { email: 'support@apollographql.com', totalProductsCreated: 1337 }
46 | },
47 | __resolveReference: (reference) => {
48 | if (reference.id) return products.find(p => p.id == reference.id);
49 | else if (reference.sku && reference.package) return products.find(p => p.sku == reference.sku && p.package == reference.package);
50 | else return { id: 'rover', package: '@apollo/rover', ...reference };
51 | }
52 | }
53 | }
54 | const server = new ApolloServer({ schema: buildSubgraphSchema({ typeDefs, resolvers }) });
55 | server.listen( {port: port} ).then(({ url }) => {
56 | console.log(`🚀 Products subgraph ready at ${url}`);
57 | }).catch(err => {console.error(err)});
58 |
--------------------------------------------------------------------------------
/subgraphs/users/Dockerfile:
--------------------------------------------------------------------------------
1 | from node:16-alpine
2 |
3 | WORKDIR /usr/src/app
4 |
5 | COPY package.json .
6 |
7 | RUN npm install
8 |
9 | COPY users.js .
10 | COPY users.graphql .
11 |
12 | CMD [ "node", "users.js" ]
13 |
--------------------------------------------------------------------------------
/subgraphs/users/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "subgraph-users",
3 | "version": "1.1.46",
4 | "description": "",
5 | "main": "users.js",
6 | "scripts": {
7 | "start": "node users.js"
8 | },
9 | "dependencies": {
10 | "@apollo/subgraph": "0.6.1",
11 | "apollo-server": "3.13.0",
12 | "supergraph-demo-opentelemetry": "0.2.4",
13 | "graphql": "16.10.0"
14 | },
15 | "keywords": [],
16 | "author": "",
17 | "license": "MIT"
18 | }
19 |
--------------------------------------------------------------------------------
/subgraphs/users/users.graphql:
--------------------------------------------------------------------------------
1 | type User @key(fields:"email") {
2 | email:ID!
3 | name: String
4 | totalProductsCreated: Int
5 | }
--------------------------------------------------------------------------------
/subgraphs/users/users.js:
--------------------------------------------------------------------------------
1 | // Open Telemetry (optional)
2 | const { ApolloOpenTelemetry } = require('supergraph-demo-opentelemetry');
3 |
4 | if (process.env.APOLLO_OTEL_EXPORTER_TYPE) {
5 | new ApolloOpenTelemetry({
6 | type: 'subgraph',
7 | name: 'users',
8 | exporter: {
9 | type: process.env.APOLLO_OTEL_EXPORTER_TYPE, // console, zipkin, collector
10 | host: process.env.APOLLO_OTEL_EXPORTER_HOST,
11 | port: process.env.APOLLO_OTEL_EXPORTER_PORT,
12 | }
13 | }).setupInstrumentation();
14 | }
15 |
16 | const { ApolloServer, gql } = require('apollo-server');
17 | const { buildSubgraphSchema } = require('@apollo/subgraph');
18 | const { readFileSync } = require('fs');
19 |
20 | const port = process.env.APOLLO_PORT || 4000;
21 |
22 | const users = [
23 | { email: 'support@apollographql.com', name: "Apollo Studio Support", totalProductsCreated: 4 }
24 | ]
25 |
26 | const typeDefs = gql(readFileSync('./users.graphql', { encoding: 'utf-8' }));
27 | const resolvers = {
28 | User: {
29 | __resolveReference: (reference) => {
30 | return users.find(u => u.email == reference.email);
31 | }
32 | }
33 | }
34 | const server = new ApolloServer({ schema: buildSubgraphSchema({ typeDefs, resolvers }) });
35 | server.listen( {port: port} ).then(({ url }) => {
36 | console.log(`🚀 Users subgraph ready at ${url}`);
37 | }).catch(err => {console.error(err)});
38 |
--------------------------------------------------------------------------------
/supergraph.graphql:
--------------------------------------------------------------------------------
1 | schema
2 | @core(feature: "https://specs.apollo.dev/core/v0.2"),
3 | @core(feature: "https://specs.apollo.dev/join/v0.1", for: EXECUTION),
4 | @core(feature: "https://specs.apollo.dev/tag/v0.1")
5 | {
6 | query: Query
7 | }
8 |
9 | directive @core(as: String, feature: String!, for: core__Purpose) repeatable on SCHEMA
10 |
11 | directive @join__field(graph: join__Graph, provides: join__FieldSet, requires: join__FieldSet) on FIELD_DEFINITION
12 |
13 | directive @join__graph(name: String!, url: String!) on ENUM_VALUE
14 |
15 | directive @join__owner(graph: join__Graph!) on INTERFACE | OBJECT
16 |
17 | directive @join__type(graph: join__Graph!, key: join__FieldSet) repeatable on INTERFACE | OBJECT
18 |
19 | directive @tag(name: String!) repeatable on FIELD_DEFINITION | INTERFACE | OBJECT | UNION
20 |
21 | type DeliveryEstimates {
22 | estimatedDelivery: String
23 | fastestDelivery: String
24 | }
25 |
26 | type Panda {
27 | favoriteFood: String
28 | name: ID!
29 | }
30 |
31 | type Product
32 | @join__owner(graph: PRODUCTS)
33 | @join__type(graph: PRODUCTS, key: "id")
34 | @join__type(graph: PRODUCTS, key: "sku package")
35 | @join__type(graph: PRODUCTS, key: "sku variation{id}")
36 | @join__type(graph: INVENTORY, key: "id")
37 | {
38 | createdBy: User @join__field(graph: PRODUCTS, provides: "totalProductsCreated")
39 | delivery(zip: String): DeliveryEstimates @join__field(graph: INVENTORY, requires: "dimensions{size weight}")
40 | dimensions: ProductDimension @join__field(graph: PRODUCTS)
41 | id: ID! @join__field(graph: PRODUCTS) @tag(name: "hi-from-inventory") @tag(name: "hi-from-products")
42 | package: String @join__field(graph: PRODUCTS)
43 | sku: String @join__field(graph: PRODUCTS) @tag(name: "hi-from-products")
44 | variation: ProductVariation @join__field(graph: PRODUCTS)
45 | }
46 |
47 | type ProductDimension {
48 | size: String
49 | weight: Float @tag(name: "hi-from-inventory-value-type-field")
50 | }
51 |
52 | type ProductVariation {
53 | id: ID!
54 | }
55 |
56 | type Query {
57 | allPandas: [Panda] @join__field(graph: PANDAS)
58 | allProducts: [Product] @join__field(graph: PRODUCTS)
59 | panda(name: ID!): Panda @join__field(graph: PANDAS)
60 | product(id: ID!): Product @join__field(graph: PRODUCTS)
61 | }
62 |
63 | type User
64 | @join__owner(graph: USERS)
65 | @join__type(graph: USERS, key: "email")
66 | @join__type(graph: PRODUCTS, key: "email")
67 | {
68 | email: ID! @join__field(graph: USERS)
69 | name: String @join__field(graph: USERS)
70 | totalProductsCreated: Int @join__field(graph: USERS)
71 | }
72 |
73 | enum core__Purpose {
74 | """
75 | `EXECUTION` features provide metadata necessary to for operation execution.
76 | """
77 | EXECUTION
78 |
79 | """
80 | `SECURITY` features provide metadata necessary to securely resolve fields.
81 | """
82 | SECURITY
83 | }
84 |
85 | scalar join__FieldSet
86 |
87 | enum join__Graph {
88 | INVENTORY @join__graph(name: "inventory" url: "http://inventory:4000/graphql")
89 | PANDAS @join__graph(name: "pandas" url: "http://pandas:4000/graphql")
90 | PRODUCTS @join__graph(name: "products" url: "http://products:4000/graphql")
91 | USERS @join__graph(name: "users" url: "http://users:4000/graphql")
92 | }
93 |
--------------------------------------------------------------------------------
/supergraph.yaml:
--------------------------------------------------------------------------------
1 | subgraphs:
2 | inventory:
3 | routing_url: http://inventory:4000/graphql
4 | schema:
5 | file: ./subgraphs/inventory/inventory.graphql
6 | products:
7 | routing_url: http://products:4000/graphql
8 | schema:
9 | file: ./subgraphs/products/products.graphql
10 | users:
11 | routing_url: http://users:4000/graphql
12 | schema:
13 | file: ./subgraphs/users/users.graphql
14 | pandas:
15 | routing_url: http://pandas:4000/graphql
16 | schema:
17 | file: ./subgraphs/pandas/pandas.graphql
18 |
--------------------------------------------------------------------------------